repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/analyzer.rs
src/analyzer.rs
use serde::{Deserialize, Serialize}; use typed_builder::TypedBuilder; #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum AnalyzerFeature { Frequency, Norm, Position, } #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum AnalyzerCase { Lower, None, Upper, } #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum NgramStreamType { Binary, Utf8, } #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum GeoJsonType { Shape, Centroid, Point, } #[derive(Clone, Debug, Serialize, Deserialize, TypedBuilder, PartialEq)] #[builder(doc)] pub struct DelimiterAnalyzerProperties { /// The value will be used as delimiter to split text into tokens as /// specified in RFC 4180, without starting new records on newlines. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub delimiter: Option<String>, } #[derive(Clone, Debug, Serialize, Deserialize, TypedBuilder, PartialEq)] #[builder(doc)] pub struct StemAnalyzerProperties { /// Format: `language[_COUNTRY][.encoding][@variant]` pub locale: String, } #[derive(Clone, Debug, Serialize, Deserialize, TypedBuilder, PartialEq)] #[builder(doc)] pub struct NormAnalyzerProperties { /// Format: `language[_COUNTRY][.encoding][@variant]` pub locale: String, /// Case conversion. Default: `"lower"` #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub case: Option<AnalyzerCase>, /// Preserve accents in returned words. Default: `false` #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub accent: Option<bool>, } #[derive(Clone, Debug, Serialize, Deserialize, TypedBuilder, PartialEq)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct NgramAnalyzerProperties { /// Minimum n-gram length. pub min: u16, /// Maximum n-gram length. pub max: u16, /// Output the original value as well. pub preserve_original: bool, /// Type of the input stream. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub stream_type: Option<NgramStreamType>, } #[derive(Clone, Debug, Serialize, Deserialize, TypedBuilder, PartialEq)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct TextAnalyzerProperties { /// Format: `language[_COUNTRY][.encoding][@variant]` pub locale: String, #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub case: Option<AnalyzerCase>, #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub accent: Option<bool>, /// Words to omit from result. /// Defaults to the words loaded from the file at `stopwordsPath`. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub stopwords: Option<Vec<String>>, /// Path with a `language` sub-directory containing files with words to /// omit. /// /// Defaults to the path specified in the server-side environment variable /// IRESEARCH_TEXT_STOPWORD_PATH` or the current working directory of the /// ArangoDB process. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub stopwords_path: Option<Vec<String>>, /// Apply stemming on returned words. /// Default: `true` #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub stemming: Option<bool>, } #[derive(Clone, Debug, Serialize, Deserialize, TypedBuilder, PartialEq)] #[builder(doc)] pub struct GeoJsonAnalyzerProperties { /// Whether to index all GeoJSON geometry types, just the centroid, or just /// points #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub r#type: Option<GeoJsonType>, // Skip the options as they "generally should remain unchanged" } #[derive(Clone, Debug, Serialize, Deserialize, TypedBuilder, PartialEq)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct PipelineAnalyzerProperties { pub pipeline: Vec<PipelineAnalyzers>, } #[derive(Clone, Debug, Serialize, Deserialize, TypedBuilder, PartialEq)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct StopwordsAnalyzerProperties { #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub hex: Option<bool>, pub stopwords: Vec<String>, } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase", tag = "type")] pub enum AnalyzerInfo { /// The `identity` Analyzer does not take additional properties. Identity { name: String, #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, }, Delimiter { name: String, #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<DelimiterAnalyzerProperties>, }, Stem { name: String, #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<StemAnalyzerProperties>, }, Norm { name: String, #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<NormAnalyzerProperties>, }, Ngram { name: String, #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<NgramAnalyzerProperties>, }, Text { name: String, #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<TextAnalyzerProperties>, }, Geojson { name: String, #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<GeoJsonAnalyzerProperties>, }, Stopwords { name: String, properties: StopwordsAnalyzerProperties, #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, }, Pipeline { name: String, properties: PipelineAnalyzerProperties, }, } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct AnalyzerDescription { pub name: String, } //these are the exact same analyzer types , but customized to be used in a // pipeline analyzer since in pipeline analyzers `name` is not required for each // sub-analyzer, the name filed is deleted #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase", tag = "type")] pub enum PipelineAnalyzers { /// The `identity` Analyzer does not take additional properties. Identity { #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, }, Delimiter { #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<DelimiterAnalyzerProperties>, }, Stem { #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<StemAnalyzerProperties>, }, Norm { #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<NormAnalyzerProperties>, }, Ngram { #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<NgramAnalyzerProperties>, }, Text { #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<TextAnalyzerProperties>, }, Geojson { #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<GeoJsonAnalyzerProperties>, }, Stopwords { properties: StopwordsAnalyzerProperties, #[serde(skip_serializing_if = "Option::is_none")] features: Option<Vec<AnalyzerFeature>>, }, }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/response.rs
src/response.rs
//! types to deserialize responses from arangoDB server via HTTP request, as //! well as convenient functions to deserialize `Response`. //! //! For response with `error` and `code` fields indicating the whether the //! request is successful, use `deserialize_response` to abstract over request //! status and data of concerns. //! //! For response storing all information in `result` filed, use //! `ArangoResult`. use std::ops::Deref; use log::trace; use serde::{ de::{self, DeserializeOwned, Deserializer}, Deserialize, }; use serde_json::value::Value; use crate::{ArangoError, ClientError}; /// Deserialize response from arango server /// /// There are different type of json object when requests to arangoDB /// server is accepted or not. Here provides an abstraction for /// response of success and failure. /// /// When ArangoDB server response error code, then an error would be cast. pub(crate) fn deserialize_response<T>(text: &str) -> Result<T, ClientError> where T: DeserializeOwned, { let response: Response<T> = serde_json::from_str(text)?; Ok(Into::<Result<T, ArangoError>>::into(response)?) } /// An helper enum to divide into successful and failed response /// /// Request to server can failed at application level, like insufficient /// permission, database not found and etc. Response from arangoDB can tell /// whether the query succeeded and why if it failed. /// /// The function of this enum is almost the same as `Result`, except that it's /// used to deserialize from server response. This enum is to facilitate /// deserialization and it should be converted to `Result<T, ArangoError>` /// eventually. #[derive(Debug)] pub(crate) enum Response<T> { Ok(T), Err(ArangoError), } impl<T> From<Response<T>> for Result<T, ArangoError> { fn from(resp: Response<T>) -> Self { match resp { Response::Ok(success) => Ok(success), Response::Err(err) => Err(err), } } } impl<'de, T> Deserialize<'de> for Response<T> where T: Deserialize<'de>, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let map = serde_json::Map::deserialize(deserializer)?; trace!("Deserialize normal Response: {:?}", map); let error = map .get("error") .map_or_else(|| Ok(false), Deserialize::deserialize) .map_err(de::Error::custom)?; let rest = Value::Object(map); if error { ArangoError::deserialize(rest) .map(Response::Err) .map_err(de::Error::custom) } else { T::deserialize(rest) .map(Response::Ok) .map_err(de::Error::custom) } } } /// Helper struct to deserialize json result that store /// information in "result" field #[derive(Deserialize, Debug)] pub(crate) struct ArangoResult<T> { #[serde(rename = "result")] result: T, } impl<T> ArangoResult<T> { pub fn unwrap(self) -> T { self.result } } impl<T> Deref for ArangoResult<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.result } } #[cfg(test)] mod test { use super::*; #[derive(Debug, Deserialize)] pub struct CollectionResponse { pub id: String, pub name: String, pub status: u8, pub r#type: u8, #[serde(rename = "isSystem")] pub is_system: bool, } #[test] fn response() { let text = "{\"id\":\"9947\",\"name\":\"relation\",\"status\":2,\"type\":3,\"isSystem\": \ false,\"globallyUniqueId\":\"hD260BE2A30F9/9947\"}"; let result = serde_json::from_str::<Response<CollectionResponse>>(text); assert_eq!(result.is_ok(), true, "failed: {:?}", result); let text = "{\"error\":false,\"code\":412,\"id\":\"9947\",\"name\":\"relation\",\"status\"\ :2,\"type\":3,\"isSystem\": false,\"globallyUniqueId\":\"hD260BE2A30F9/9947\"}"; let result = serde_json::from_str::<Response<CollectionResponse>>(text); assert_eq!(result.is_ok(), true, "failed: {:?}", result); let text = "{\"error\":true,\"code\":412,\"errorMessage\":\"error\",\"errorNum\":1200}"; let result = serde_json::from_str::<Response<CollectionResponse>>(text); assert_eq!(result.is_ok(), true, "failed: {:?}", result); let response = Into::<Result<_, _>>::into(result.unwrap()); assert_eq!( response.is_err(), true, "response should be error: {:?}", response ); } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/view.rs
src/view.rs
use serde::{Deserialize, Serialize}; use std::collections::HashMap; use typed_builder::TypedBuilder; #[derive(Debug, Serialize, Deserialize, PartialEq)] pub enum ViewType { #[serde(rename = "arangosearch")] ArangoSearchView, } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum StoreValues { None, Id, } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum PrimarySortCompression { Lz4, None, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ViewDescription { /// A globally unique identifier for this View. pub globally_unique_id: String, /// An identifier for this View. pub id: String, /// Name of the View. pub name: String, /// Type of the View #[serde(rename = "type")] pub typ: ViewType, } #[derive(Debug, Serialize, Deserialize, TypedBuilder)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct ArangoSearchViewLink { /// A list of names of Analyzers to apply to values of processed document /// attributes. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub analyzers: Option<Vec<String>>, /// An object mapping names of attributes to process for each document to /// definitions. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub fields: Option<HashMap<String, ArangoSearchViewLink>>, /// If set to `true`, all document attributes will be processed, otherwise /// only the attributes in `fields` will be processed. /// Default: false #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub include_all_fields: Option<bool>, /// If set to `true`, the position of values in array values will be /// tracked, otherwise all values in an array will be treated as equal /// alternatives. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub track_list_positions: Option<bool>, /// Controls how the view should keep track of the attribute values. /// Default: `"none"` #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub store_values: Option<StoreValues>, } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "lowercase")] pub enum SortDirection { Asc, Desc, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case", tag = "type")] pub enum ConsolidationPolicy { #[serde(rename_all = "camelCase")] /// Must be in the range of `0.0` to `1.0`. BytesAccum { threshold: u32 }, #[serde(rename_all = "camelCase")] Tier { /// Minimum number of segments that will be evaluated as candidates /// for consolidation. /// Default: `1` #[serde(skip_serializing_if = "Option::is_none")] segments_min: Option<u32>, /// Maximum number of segments that will be evaluated as candidates /// for consolidation. /// Default: `10` #[serde(skip_serializing_if = "Option::is_none")] segments_max: Option<u32>, /// Maximum allowed size of all consolidated segments. /// Default: `5368709120`, i.e. 5 GiB #[serde(skip_serializing_if = "Option::is_none")] segments_bytes_max: Option<u64>, /// Defines the value to treat all smaller segments as equal for /// consolidation selection. /// Default: `2097152`, i.e. 2 MiB #[serde(skip_serializing_if = "Option::is_none")] segments_bytes_floor: Option<u32>, /// Minimum score. min_score: u32, }, } #[derive(Debug, Serialize, Deserialize, TypedBuilder, Clone)] #[serde(rename_all = "camelCase")] pub struct PrimarySort { /// Attribute path for the value of each document used for /// sorting. pub field: String, /// If set to `"asc"`, the primary sorting order is ascending. /// If set to `"desc"`, the primary sorting order is descending. #[builder(default, setter(strip_option))] direction: Option<SortDirection>, #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] asc: Option<bool>, } impl PrimarySort { pub fn direction(&self) -> Option<SortDirection> { if self.direction.is_none() { if let Some(asc) = self.asc { if asc { Some(SortDirection::Asc) } else { Some(SortDirection::Desc) } } else { None } } else { self.direction.clone() } } } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct StoredValues { pub fields: Vec<String>, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ArangoSearchViewProperties { /// How many commits to wait between removing unused files. pub cleanup_interval_step: u32, /// How long to wait between applying the `consolidationPolicy`. pub consolidation_interval_msec: u32, /// Maximum number of writers cached in the pool. pub writebuffer_idle: u32, /// Maximum number of concurrent active writers that perform a transaction. pub writebuffer_active: u32, /// Maximum memory byte size per writer before a writer flush is triggered. pub writebuffer_size_max: u32, /// Consolidation policy to apply for selecting which segments should be /// merged pub consolidation_policy: ConsolidationPolicy, /// Attribute paths for which values should be stored in the view index /// in addition to those used for sorting via `primary_sort`. pub primary_sort: Option<Vec<PrimarySort>>, /// Compression to use for the primary sort data. /// Default: `"lz4"` pub primary_sort_compression: PrimarySortCompression, /// Attribute paths for which values should be stored in the view index /// in addition to those used for sorting via primary_sort. pub stored_values: Vec<StoredValues>, /// An object mapping names of linked collections to /// ArangoSearchViewLink pub links: HashMap<String, ArangoSearchViewLink>, } #[derive(Debug, Serialize, Deserialize, TypedBuilder)] #[serde(rename_all = "camelCase")] pub struct ArangoSearchViewPropertiesOptions { /// How many commits to wait between removing unused files. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] cleanup_interval_step: Option<u32>, /// How long to wait between applying the `consolidationPolicy`. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] consolidation_interval_msec: Option<u32>, /// Maximum number of writers cached in the pool. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] writebuffer_idle: Option<u32>, /// Maximum number of concurrent active writers that perform a transaction. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] writebuffer_active: Option<u32>, /// Maximum memory byte size per writer before a writer flush is triggered. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] writebuffer_size_max: Option<u32>, /// Consolidation policy to apply for selecting which segments should be /// merged #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] consolidation_policy: Option<ConsolidationPolicy>, /// Attribute paths for which values should be stored in the view index /// in addition to those used for sorting via `primary_sort`. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] primary_sort: Option<Vec<PrimarySort>>, /// Compression to use for the primary sort data. /// Default: `"lz4"` #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] primary_sort_compression: Option<PrimarySortCompression>, // Attribute paths for which values should be stored in the view index /// in addition to those used for sorting via primary_sort. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] stored_values: Option<Vec<StoredValues>>, /// An object mapping names of linked collections to /// ArangoSearchViewLink #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] links: Option<HashMap<String, ArangoSearchViewLink>>, } #[derive(Debug, Serialize, Deserialize, TypedBuilder)] #[serde(rename_all = "camelCase")] #[builder(doc)] pub struct ViewOptions { name: String, #[serde(rename = "type")] #[builder(default=ViewType::ArangoSearchView)] typ: ViewType, #[serde(flatten, skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] properties: Option<ArangoSearchViewPropertiesOptions>, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct View { #[serde(flatten)] pub description: ViewDescription, #[serde(flatten)] pub properties: ArangoSearchViewProperties, }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/database.rs
src/database.rs
//! struct and enum pertain to arangoDB database //! //! AQL query are all executed in database level, so Database offers AQL query. use std::{collections::HashMap, fmt::Debug, sync::Arc}; use log::trace; use maybe_async::maybe_async; use serde::{de::DeserializeOwned, Deserialize}; use serde_json::value::Value; use url::Url; use crate::{ analyzer::{AnalyzerDescription, AnalyzerInfo}, aql::{AqlQuery, Cursor}, client::ClientExt, collection::{ options::{CreateOptions, CreateParameters}, response::{Info, Properties}, Collection, CollectionType, }, connection::Version, graph::{Graph, GraphCollection, GraphResponse, GHARIAL_API_PATH}, index::{DeleteIndexResponse, Index, IndexCollection, INDEX_API_PATH}, response::{deserialize_response, ArangoResult}, transaction::{ ArangoTransaction, Transaction, TransactionList, TransactionSettings, TransactionState, TRANSACTION_HEADER, }, user::{ access_level_enum_to_str, DeleteUserResponse, User, UserAccessLevel, UserDatabasesGetResponse, UserResponse, }, view::{ ArangoSearchViewProperties, ArangoSearchViewPropertiesOptions, View, ViewDescription, ViewOptions, }, ClientError, }; #[derive(Debug, Clone)] pub struct Database<C: ClientExt> { name: String, base_url: Url, session: Arc<C>, } impl<'a, C: ClientExt> Database<C> { pub(crate) fn new<T: Into<String>>(name: T, arango_url: &Url, session: Arc<C>) -> Database<C> { let name = name.into(); let path = format!("/_db/{}/", name.as_str()); let url = arango_url.join(path.as_str()).unwrap(); Database { name, session, base_url: url, } } /// Retrieve all collections of this database. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn accessible_collections(&self) -> Result<Vec<Info>, ClientError> { // an invalid arango_url should never running through initialization // so we assume arango_url is a valid url // When we pass an invalid path, it should panic to eliminate the bug // in development. let url = self.base_url.join("_api/collection").unwrap(); trace!( "Retrieving collections from {:?}: {}", self.name, url.as_str() ); let resp = self.session.get(url, "").await?; let result: ArangoResult<Vec<Info>> = deserialize_response(resp.body())?; trace!("Collections retrieved"); Ok(result.unwrap()) } pub fn url(&self) -> &Url { &self.base_url } pub fn name(&self) -> &str { &self.name } pub fn session(&self) -> Arc<C> { Arc::clone(&self.session) } /// Get collection object with name. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn collection(&self, name: &str) -> Result<Collection<C>, ClientError> { let url = self .base_url .join(&format!("_api/collection/{}", name)) .unwrap(); let resp: Info = deserialize_response(self.session.get(url, "").await?.body())?; Ok(Collection::from_response(self, &resp)) } /// Create a collection via HTTP request with options. /// /// Return a collection object if success. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn create_collection_with_options<'f>( &self, options: CreateOptions<'f>, parameters: CreateParameters, ) -> Result<Collection<C>, ClientError> { let mut url = self.base_url.join("_api/collection").unwrap(); let query = serde_qs::to_string(&parameters).unwrap(); url.set_query(Some(query.as_str())); let resp = self .session .post(url, &serde_json::to_string(&options)?) .await?; let result: Properties = deserialize_response(resp.body())?; self.collection(&result.info.name).await } /// Create a collection via HTTP request. /// /// Return a collection object if success. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn create_collection(&self, name: &str) -> Result<Collection<C>, ClientError> { self.create_collection_with_options( CreateOptions::builder().name(name).build(), Default::default(), ) .await } #[maybe_async] pub async fn create_edge_collection(&self, name: &str) -> Result<Collection<C>, ClientError> { self.create_collection_with_options( CreateOptions::builder() .name(name) .collection_type(CollectionType::Edge) .build(), Default::default(), ) .await } /// Drops a collection /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn drop_collection(&self, name: &str) -> Result<String, ClientError> { let url_path = format!("_api/collection/{}", name); let url = self.base_url.join(&url_path).unwrap(); #[derive(Debug, Deserialize)] struct DropCollectionResponse { id: String, } let resp: DropCollectionResponse = deserialize_response(self.session.delete(url, "").await?.body())?; Ok(resp.id) } /// Get the version remote arango database server /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn arango_version(&self) -> Result<Version, ClientError> { let url = self.base_url.join("_api/version").unwrap(); let resp = self.session.get(url, "").await?; let version: Version = serde_json::from_str(resp.body())?; Ok(version) } /// Get information of current database. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn info(&self) -> Result<DatabaseDetails, ClientError> { let url = self.base_url.join("_api/database/current").unwrap(); let resp = self.session.get(url, "").await?; let res: ArangoResult<DatabaseDetails> = deserialize_response(resp.body())?; Ok(res.unwrap()) } /// Execute aql query, return a cursor if succeed. The major advantage of /// batch query is that cursors contain more information and stats /// about the AQL query, and users can fetch results in batch to save memory /// resources on clients. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn aql_query_batch<R>(&self, aql: AqlQuery<'_>) -> Result<Cursor<R>, ClientError> where R: DeserializeOwned, { let url = self.base_url.join("_api/cursor").unwrap(); let resp = self .session .post(url, &serde_json::to_string(&aql)?) .await?; deserialize_response(resp.body()) } /// Get next batch given the cursor id. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn aql_next_batch<R>(&self, cursor_id: &str) -> Result<Cursor<R>, ClientError> where R: DeserializeOwned, { let url = self .base_url .join(&format!("_api/cursor/{}", cursor_id)) .unwrap(); let resp = self.session.put(url, "").await?; deserialize_response(resp.body()) } #[maybe_async] async fn aql_fetch_all<R>(&self, response: Cursor<R>) -> Result<Vec<R>, ClientError> where R: DeserializeOwned, { let mut response_cursor = response; let mut results: Vec<R> = Vec::new(); loop { results.extend(response_cursor.result.into_iter()); if response_cursor.more { let id = response_cursor.id.unwrap().clone(); response_cursor = self.aql_next_batch(id.as_str()).await?; } else { break; } } Ok(results) } /// Execute AQL query fetch all results. /// /// DO NOT do this when the count of results is too large that network or /// memory resources cannot afford. /// /// DO NOT set a small batch size, otherwise clients will have to make many /// HTTP requests. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn aql_query<R>(&self, aql: AqlQuery<'_>) -> Result<Vec<R>, ClientError> where R: DeserializeOwned, { let response = self.aql_query_batch(aql).await?; if response.more { self.aql_fetch_all(response).await } else { Ok(response.result) } } /// Similar to `aql_query`, except that this method only accept a string of /// AQL query. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn aql_str<R>(&self, query: &str) -> Result<Vec<R>, ClientError> where R: DeserializeOwned, { let aql = AqlQuery::builder().query(query).build(); self.aql_query(aql).await } /// Similar to `aql_query`, except that this method only accept a string of /// AQL query, with additional bind vars. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn aql_bind_vars<R>( &self, query: &str, bind_vars: HashMap<&str, Value>, ) -> Result<Vec<R>, ClientError> where R: DeserializeOwned, { let aql = AqlQuery::builder() .query(query) .bind_vars(bind_vars) .build(); self.aql_query(aql).await } /// Create a new index on a collection. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn create_index( &self, collection: &str, index: &Index, ) -> Result<Index, ClientError> { let mut url = self.base_url.join(INDEX_API_PATH).unwrap(); url.set_query(Some(&format!("collection={}", collection))); let resp = self .session .post(url, &serde_json::to_string(&index)?) .await?; let result: Index = deserialize_response::<Index>(resp.body())?; Ok(result) } /// Retrieve an index by id /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn index(&self, id: &str) -> Result<Index, ClientError> { let url = self .base_url .join(&format!("{}/{}", INDEX_API_PATH, id)) .unwrap(); let resp = self.session.get(url, "").await?; let result: Index = deserialize_response::<Index>(resp.body())?; Ok(result) } /// Retrieve a list of indexes for a collection. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn indexes(&self, collection: &str) -> Result<IndexCollection, ClientError> { let mut url = self.base_url.join(INDEX_API_PATH).unwrap(); url.set_query(Some(&format!("collection={}", collection))); let resp = self.session.get(url, "").await?; let result: IndexCollection = deserialize_response::<IndexCollection>(resp.body())?; Ok(result) } /// Delete an index by id. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn delete_index(&self, id: &str) -> Result<DeleteIndexResponse, ClientError> { let url = self .base_url .join(&format!("{}/{}", INDEX_API_PATH, id)) .unwrap(); let resp = self.session.delete(url, "").await?; let result: DeleteIndexResponse = deserialize_response::<DeleteIndexResponse>(resp.body())?; Ok(result) } /// Create a new graph in the graph module. /// /// # Arguments /// * `graph` - The graph object to create, its name must be unique. /// * `wait_for_sync` - define if the request should wait until everything /// is synced to disc. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn create_graph( &self, graph: Graph, wait_for_sync: bool, ) -> Result<Graph, ClientError> { let mut url = self.base_url.join(GHARIAL_API_PATH).unwrap(); url.set_query(Some(&format!("waitForSync={}", wait_for_sync))); let resp = self .session .post(url, &serde_json::to_string(&graph)?) .await?; let result: GraphResponse = deserialize_response::<GraphResponse>(resp.body())?; Ok(result.graph) } /// Retrieve an graph by name /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn graph(&self, name: &str) -> Result<Graph, ClientError> { let url = self .base_url .join(&format!("{}/{}", GHARIAL_API_PATH, name)) .unwrap(); let resp = self.session.get(url, "").await?; let result: GraphResponse = deserialize_response::<GraphResponse>(resp.body())?; Ok(result.graph) } /// Retrieve the list of created graphs. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn graphs(&self) -> Result<GraphCollection, ClientError> { let url = self.base_url.join(GHARIAL_API_PATH).unwrap(); let resp = self.session.get(url, "").await?; let result: GraphCollection = deserialize_response::<GraphCollection>(resp.body())?; Ok(result) } /// Drops an existing graph object by name. Optionally all collections not /// used by other graphs can be dropped as well. /// /// # Arguments /// * `name` - The name of the graph to drop /// * `drop_collections`- if set to `true`, drops collections of this graph /// as well. /// Collections will only be dropped if they are not used in other graphs. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn drop_graph(&self, name: &str, drop_collections: bool) -> Result<(), ClientError> { let mut url = self .base_url .join(&format!("{}/{}", GHARIAL_API_PATH, name)) .unwrap(); url.set_query(Some(&format!("dropCollections={}", drop_collections))); self.session.delete(url, "").await?; Ok(()) } /// Return the currently running server-side transactions /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn list_transactions(&self) -> Result<Vec<TransactionState>, ClientError> { let url = self.base_url.join("_api/transaction").unwrap(); let resp = self.session.get(url, "").await?; let result: TransactionList = deserialize_response(resp.body())?; Ok(result.transactions) } /// Begin a server-side transaction, the transaction settings should specify /// at least collections to be updated through the write list /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn begin_transaction( &self, transaction_settings: TransactionSettings, ) -> Result<Transaction<C>, ClientError> { let url = self.base_url.join("_api/transaction/begin").unwrap(); let resp = self .session .post(url, &serde_json::to_string(&transaction_settings)?) .await?; let result: ArangoResult<ArangoTransaction> = deserialize_response(resp.body())?; let transaction = result.unwrap(); let tx_id = transaction.id.clone(); let mut session = (*self.session).clone(); session .headers() .insert(TRANSACTION_HEADER, tx_id.parse().unwrap()); Ok(Transaction::<C>::new( transaction, Arc::new(session), self.base_url.clone(), )) } /// Returns an object containing a listing of all Views in a database, /// regardless of their typ /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn list_views(&self) -> Result<Vec<ViewDescription>, ClientError> { let url = self.base_url.join("_api/view").unwrap(); let resp = self.session.get(url, "").await?; let result: ArangoResult<Vec<ViewDescription>> = deserialize_response(resp.body())?; Ok(result.unwrap()) } /// Creates an ArangoSearch View /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn create_view(&self, view_options: ViewOptions) -> Result<View, ClientError> { let url = self.base_url.join("_api/view").unwrap(); let resp = self .session .post(url, &serde_json::to_string(&view_options)?) .await?; let result: View = deserialize_response(resp.body())?; Ok(result) } /// Return information about a View /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn view(&self, view_name: &str) -> Result<ViewDescription, ClientError> { let url = self .base_url .join(&format!("_api/view/{}", view_name)) .unwrap(); let resp = self.session.get(url, "").await?; let result: ViewDescription = deserialize_response(resp.body())?; Ok(result) } /// Read properties of a View /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn view_properties( &self, view_name: &str, ) -> Result<ArangoSearchViewProperties, ClientError> { let url = self .base_url .join(&format!("_api/view/{}/properties", view_name)) .unwrap(); let resp = self.session.get(url, "").await?; let result: ArangoSearchViewProperties = deserialize_response(resp.body())?; Ok(result) } /// Changes all the properties of an ArangoSearch /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn replace_view_properties( &self, view_name: &str, properties: ArangoSearchViewPropertiesOptions, ) -> Result<View, ClientError> { let url = self .base_url .join(&format!("_api/view/{}/properties", view_name)) .unwrap(); let resp = self .session .put(url, &serde_json::to_string(&properties)?) .await?; let result: View = deserialize_response(resp.body())?; Ok(result) } /// Partially changes properties of an ArangoSearch View /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn update_view_properties( &self, view_name: &str, properties: ArangoSearchViewPropertiesOptions, ) -> Result<View, ClientError> { let url = self .base_url .join(&format!("_api/view/{}/properties", view_name)) .unwrap(); let resp = self .session .patch(url, &serde_json::to_string(&properties)?) .await?; let result: View = deserialize_response(resp.body())?; Ok(result) } /// Drops the View identified by view-name. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn drop_view(&self, view_name: &str) -> Result<bool, ClientError> { let url = self .base_url .join(&format!("_api/view/{}", view_name)) .unwrap(); let resp = self.session.delete(url, "").await?; let result: ArangoResult<bool> = deserialize_response(resp.body())?; Ok(result.unwrap()) } #[maybe_async] pub async fn list_analyzers(&self) -> Result<Vec<AnalyzerInfo>, ClientError> { let url = self.base_url.join("_api/analyzer").unwrap(); let resp = self.session.get(url, "").await?; let result: ArangoResult<Vec<AnalyzerInfo>> = deserialize_response(resp.body())?; Ok(result.unwrap()) } /// Create an Analyzer with the supplied definition /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn create_analyzer( &self, analyzer: AnalyzerInfo, ) -> Result<AnalyzerInfo, ClientError> { let url = self.base_url.join("_api/analyzer").unwrap(); let resp = self .session .post(url, &serde_json::to_string(&analyzer)?) .await?; let result: AnalyzerInfo = deserialize_response(resp.body())?; Ok(result) } /// Return the Analyzer definition /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn analyzer(&self, analyzer_name: &str) -> Result<AnalyzerInfo, ClientError> { let url = self .base_url .join(&format!("_api/analyzer/{}", analyzer_name)) .unwrap(); let resp = self.session.get(url, "").await?; let result: AnalyzerInfo = deserialize_response(resp.body())?; Ok(result) } ///Removes an Analyzer configuration identified by analyzer_name. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn drop_analyzer( &self, analyzer_name: &str, ) -> Result<AnalyzerDescription, ClientError> { let url = self .base_url .join(&format!("_api/analyzer/{}", analyzer_name)) .unwrap(); let resp = self.session.delete(url, "").await?; let result: AnalyzerDescription = deserialize_response(resp.body())?; Ok(result) } /// List available users /// /// Fetches data about all users. You need the Administrate server access /// level in order to execute this REST call. Otherwise, you will only /// get information about yourself. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn users(&self) -> Result<Vec<User>, ClientError> { let url = self.base_url.join(&format!("_api/user/")).unwrap(); let resp = self.session.get(url, "").await?; let result: UserResponse = deserialize_response(resp.body())?; Ok(result.result) } /// Create User /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn create_user(&self, user: User) -> Result<User, ClientError> { let url = self.base_url.join("_api/user").unwrap(); let resp = self .session .post(url, &serde_json::to_string(&user)?) .await?; let result = deserialize_response(resp.body())?; Ok(result) } /// Create User /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn update_user(&self, username: String, user: User) -> Result<User, ClientError> { let url = self .base_url .join(&format!("_api/user/{}", username)) .unwrap(); let resp = self .session .put(url, &serde_json::to_string(&user)?) .await?; let result = deserialize_response(resp.body())?; Ok(result) } /// Delete User /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn delete_user(&self, username: String) -> Result<(), ClientError> { let url = self .base_url .join(&format!("_api/user/{}", username)) .unwrap(); let resp = self.session.delete(url, "").await?; let _: DeleteUserResponse = deserialize_response(resp.body())?; Ok(()) } /// Get user-accessible databases /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn user_databases( &self, username: String, full: bool, ) -> Result<UserDatabasesGetResponse, ClientError> { let url = self .base_url .join(&format!("_api/user/{username}/database/?full={full}")) .unwrap(); let resp = self.session.get(url, "").await?; let result = deserialize_response(resp.body())?; Ok(result) } /// Get user-accessible databases /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn user_db_access_level( &self, username: String, db_name: String, ) -> Result<UserDatabasesGetResponse, ClientError> { let url = self .base_url .join(&format!("_api/user/{username}/database/{db_name}")) .unwrap(); let resp = self.session.get(url, "").await?; let result = deserialize_response(resp.body())?; Ok(result) } /// Set user's databases access level /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn user_db_access_put( &self, username: String, db_name: String, access_level: UserAccessLevel, ) -> Result<Value, ClientError> { let url = self .base_url .join(&format!("_api/user/{username}/database/{db_name}")) .unwrap(); let resp = self .session .put( url, format!( "{{ \"grant\":\"{}\" }}", access_level_enum_to_str(access_level) ), ) .await?; let result = deserialize_response(resp.body())?; Ok(result) } /// Set user's databases access level /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn user_db_collection_access( &self, username: String, db_name: String, collection: String, ) -> Result<Value, ClientError> { let url = self .base_url .join(&format!( "_api/user/{username}/database/{db_name}/{collection}" )) .unwrap(); let resp = self.session.get(url, "").await?; let result = deserialize_response(resp.body())?; Ok(result) } /// Set user's databases access level /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn user_db_collection_access_put( &self, username: String, db_name: String, collection: String, access_level: UserAccessLevel, ) -> Result<Value, ClientError> { let url = self .base_url .join(&format!( "_api/user/{username}/database/{db_name}/{collection}" )) .unwrap(); let resp = self .session .put( url, format!( "{{ \"grant\":\"{}\" }}", access_level_enum_to_str(access_level) ), ) .await?; let result = deserialize_response(resp.body())?; Ok(result) } } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DatabaseDetails { pub name: String, pub id: String, pub path: String, pub is_system: bool, }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/error.rs
src/error.rs
use std::fmt; use serde::Deserialize; use thiserror::Error; use crate::connection::Permission; #[derive(Error, Debug)] pub enum ClientError { #[error("Insufficient permission ({permission:?}) to operate: {operation}")] InsufficientPermission { permission: Permission, operation: String, }, #[error("Server is not ArangoDB: {0}")] InvalidServer(String), #[error("Error from server: {0}")] Arango(#[from] ArangoError), #[error("Error from serde: {0}")] Serde(#[from] serde_json::error::Error), #[error("HTTP client error: {0}")] HttpClient(String), } #[derive(Deserialize, Debug, Error)] pub struct ArangoError { pub(crate) code: u16, #[serde(rename = "errorNum")] pub(crate) error_num: u16, #[serde(rename = "errorMessage")] pub(crate) message: String, } impl fmt::Display for ArangoError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}({})", self.message, self.error_num) } } impl ArangoError { /// Get the HTTP status code of an error response. pub fn code(&self) -> u16 { self.code } pub fn error_num(&self) -> u16 { self.error_num } pub fn message(&self) -> &str { &self.message } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/aql.rs
src/aql.rs
/// Types related to AQL query in arangoDB. /// /// While aql queries are performed on database, it would be ponderous to /// place all aql query related methods and types in `arangors::database`. /// /// Steps to perform a AQL query: /// 1. (optional) construct a AqlQuery object. /// - (optional) construct AqlOption. /// 1. perform AQL query via `database.aql_query`. use std::collections::HashMap; use serde::{Deserialize, Serialize}; use serde_json::value::Value; use typed_builder::TypedBuilder; #[derive(Debug, Serialize, TypedBuilder)] #[builder( doc, builder_method(doc = r#"Create a builder for building `AqlQuery`. On the builder, call `.query(...)`, `.bind_vars(...)(optional)`, `.bind_var(...)(optional)`, `.try_bind(...)(optional)`, `.count(...)(optional)`, `.batch_size(...)(optional)`, `.cache(...)(optional)`, `.memory_limit(...)(optional)`, `.ttl(...)(optional)`, `.options(...)(optional)` to set the values of the fields (they accept Into values). Use `.try_bind(...)` to accept any serializable struct while `.bind_value(...)` accepts an `Into<serde_json::Value>`. Finally, call .build() to create the instance of AqlQuery."#) )] #[serde(rename_all = "camelCase")] pub struct AqlQuery<'a> { /// query string to be executed query: &'a str, /// bind parameters to substitute in query string #[serde(skip_serializing_if = "HashMap::is_empty")] #[builder(default)] bind_vars: HashMap<&'a str, Value>, /// Indicates whether the number of documents in the result set should be /// returned in the "count" attribute of the result. /// /// Calculating the 'count' attribute might have a performance impact /// for some queries in the future so this option is turned off by default, /// and 'count' is only returned when requested. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] count: Option<bool>, /// Maximum number of result documents to be transferred from the server to /// the client in one round-trip. /// /// If this attribute is not set, a server-controlled default value will /// be used. /// /// A batchSize value of 0 is disallowed. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] batch_size: Option<u32>, /// A flag to determine whether the AQL query cache shall be used. /// /// If set to false, then any query cache lookup will be skipped for the /// query. If set to true, it will lead to the query cache being /// checked for the query if the query cache mode is either on or /// demand. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] cache: Option<bool>, /// The maximum number of memory (measured in bytes) that the query is /// allowed to use. /// /// If set, then the query will fail with error 'resource /// limit exceeded' in case it allocates too much memory. /// /// A value of 0 indicates that there is no memory limit. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] memory_limit: Option<u64>, /// The time-to-live for the cursor (in seconds). /// /// The cursor will be removed on the server automatically after /// the specified amount of time. This is useful to ensure garbage /// collection of cursors that are not fully fetched by clients. /// /// If not set, a server-defined value will be used. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] ttl: Option<u32>, /// Options #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] options: Option<AqlOptions>, } // when binding the first query variable #[allow(non_camel_case_types, missing_docs)] impl<'a, __query, __count, __batch_size, __cache, __memory_limit, __ttl, __options> AqlQueryBuilder< 'a, ( __query, (), __count, __batch_size, __cache, __memory_limit, __ttl, __options, ), > { #[allow(clippy::type_complexity)] pub fn bind_var<K, V>( self, key: K, value: V, ) -> AqlQueryBuilder< 'a, ( __query, (HashMap<&'a str, Value>,), __count, __batch_size, __cache, __memory_limit, __ttl, __options, ), > where K: Into<&'a str>, V: Into<Value>, { let mut bind_vars = HashMap::new(); bind_vars.insert(key.into(), value.into()); let (query, _, count, batch_size, cache, memory_limit, ttl, options) = self.fields; AqlQueryBuilder { fields: ( query, (bind_vars,), count, batch_size, cache, memory_limit, ttl, options, ), phantom: self.phantom, } } #[allow(clippy::type_complexity)] pub fn try_bind<K, V>( self, key: K, value: V, ) -> Result< AqlQueryBuilder< 'a, ( __query, (HashMap<&'a str, Value>,), __count, __batch_size, __cache, __memory_limit, __ttl, __options, ), >, serde_json::Error, > where K: Into<&'a str>, V: serde::Serialize, { Ok(self.bind_var(key, serde_json::to_value(value)?)) } } // when bind_var(s) are not empty #[allow(non_camel_case_types, missing_docs)] impl<'a, __query, __count, __batch_size, __cache, __memory_limit, __ttl, __options> AqlQueryBuilder< 'a, ( __query, (HashMap<&'a str, Value>,), __count, __batch_size, __cache, __memory_limit, __ttl, __options, ), > { #[allow(clippy::type_complexity)] pub fn bind_var<K, V>( mut self, key: K, value: V, ) -> AqlQueryBuilder< 'a, ( __query, (HashMap<&'a str, Value>,), __count, __batch_size, __cache, __memory_limit, __ttl, __options, ), > where K: Into<&'a str>, V: Into<Value>, { (self.fields.1).0.insert(key.into(), value.into()); self } #[allow(clippy::type_complexity)] pub fn try_bind<K, V>( self, key: K, value: V, ) -> Result< AqlQueryBuilder< 'a, ( __query, (HashMap<&'a str, Value>,), __count, __batch_size, __cache, __memory_limit, __ttl, __options, ), >, serde_json::Error, > where K: Into<&'a str>, V: serde::Serialize, { Ok(self.bind_var(key, serde_json::to_value(value)?)) } } #[derive(Debug, Serialize, TypedBuilder, PartialEq)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct AqlOptions { /// When set to true, the query will throw an exception and abort instead of /// producing a warning. /// /// This option should be used during development to catch potential issues /// early. /// /// When the attribute is set to false, warnings will not be propagated to /// exceptions and will be returned with the query result. /// There is also a server configuration option `--query.fail-on-warning` /// for setting the default value for `fail_on_warning` so it does not /// need to be set on a per-query level. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] fail_on_warning: Option<bool>, /// If set to true, then the additional query profiling information will /// be returned in the sub-attribute profile of the extra return attribute /// if the query result is not served from the query cache. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] profile: Option<bool>, /// Limits the maximum number of warnings a query will return. /// /// The number of warnings a query will return is limited to 10 by default, /// but that number can be increased or decreased by setting this attribute. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] max_warning_count: Option<u32>, /// If set to true and the query contains a LIMIT clause, then the result /// will have an extra attribute with the sub-attributes stats and /// fullCount, `{ ... , "extra": { "stats": { "fullCount": 123 } } }`. /// /// The fullCount attribute will contain the number of documents in the /// result before the last LIMIT in the query was applied. It can be /// used to count the number of documents that match certain filter /// criteria, but only return a subset of them, in one go. It is thus /// similar to MySQL's `SQL_CALC_FOUND_ROWS` hint. Note that setting /// the option will disable a few LIMIT optimizations and may lead to /// more documents being processed, and thus make queries run longer. /// Note that the fullCount attribute /// will only be present in the result if the query has a LIMIT clause /// and the LIMIT clause is actually used in the query. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] full_count: Option<bool>, /// Limits the maximum number of plans that are created by the AQL query /// optimizer. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] max_plans: Option<u32>, /// A list string indicating to-be-included or to-be-excluded optimizer /// rules can be put into this attribute, telling the optimizer to /// include or exclude specific rules. /// /// To disable a rule, prefix its name with a `-`. /// /// To enable a rule, prefix it with a `+`. /// /// There is also a pseudo-rule `"all"`, which will match all optimizer /// rules. #[serde(skip_serializing_if = "Vec::is_empty")] #[builder(default)] optimizer: Vec<String>, /// Maximum number of operations after which an intermediate commit is /// performed automatically. /// /// Honored by the RocksDB storage engine only. #[cfg(feature = "rocksdb")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] intermediate_commit_count: Option<u32>, /// Maximum total size of operations after which an intermediate commit is /// performed automatically. /// /// Honored by the RocksDB storage engine only. #[cfg(feature = "rocksdb")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] intermediate_commit_size: Option<u32>, /// Transaction size limit in bytes. /// /// Honored by the RocksDB storage engine only. #[cfg(feature = "rocksdb")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] max_transaction_size: Option<u32>, /// This enterprise parameter allows to configure how long a DBServer will /// have time to bring the satellite collections involved in the query into /// sync. /// /// The default value is 60.0 (seconds). When the max time has been /// reached the query will be stopped. #[cfg(feature = "enterprise")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] satellite_sync_wait: Option<bool>, } impl Default for AqlOptions { fn default() -> AqlOptions { Self::builder().build() } } impl AqlOptions { pub fn set_optimizer(&mut self, optimizer: String) { self.optimizer.push(optimizer) } } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct QueryStats { /// The total number of data-modification operations successfully executed. /// /// This is equivalent to the number of documents created, updated or /// removed by `INSERT`, `UPDATE`, `REPLACE` or `REMOVE` operations. pub writes_executed: usize, /// Total number of data-modification operations that were unsuccessful, /// but have been ignored because of query option ignoreErrors. pub writes_ignored: usize, /// Total number of documents iterated over when scanning a collection /// without an index. /// /// Documents scanned by subqueries will be included in the result, but not /// no operations triggered by built-in or user-defined AQL functions. pub scanned_full: usize, /// Total number of documents iterated over when scanning a collection /// using an index. /// /// Documents scanned by subqueries will be included in the result, but not /// no operations triggered by built-in or user-defined AQL functions. pub scanned_index: usize, /// Total number of documents that were removed after executing a filter /// condition in a FilterNode. /// /// Note that IndexRangeNodes can also filter documents by selecting only /// the required index range from a collection, and the filtered value /// only indicates how much filtering was done by FilterNodes. pub filtered: usize, /// Total number of documents that matched the search condition if the /// query's final LIMIT statement were not present. /// /// This attribute will only be returned if the fullCount option was set /// when starting the query and will only contain a sensible value if the /// query contained a LIMIT operation on the top level. pub full_count: Option<usize>, pub http_requests: usize, pub execution_time: f64, } #[derive(Deserialize, Debug)] pub struct Cursor<T> { /// the total number of result documents available /// /// only available if the query was executed with the count attribute /// set pub count: Option<usize>, /// a boolean flag indicating whether the query result was served from /// the query cache or not. /// /// If the query result is served from the query cache, the extra /// return attribute will not contain any stats sub-attribute /// and no profile sub-attribute., pub cached: bool, /// A boolean indicator whether there are more results available for /// the cursor on the server #[serde(rename = "hasMore")] pub more: bool, /// (anonymous json object): an array of result documents (might be /// empty if query has no results) pub result: Vec<T>, /// id of temporary cursor created on the server pub id: Option<String>, /// an optional JSON object with extra information about the query /// result contained in its stats sub-attribute. For /// data-modification queries, the extra.stats sub-attribute /// will contain the number of /// modified documents and the number of documents that could /// not be modified due to an error if ignoreErrors query /// option is specified. pub extra: Option<QueryExtra>, } #[derive(Deserialize, Debug)] pub struct QueryExtra { // TODO pub stats: Option<QueryStats>, // TODO pub warnings: Option<Vec<Value>>, } #[cfg(test)] mod test { use super::*; #[test] fn aql_query_builder_bind_var() { let q = r#"FOR i in test_collection FILTER i.username==@username AND i.password==@password return i"#; let aql = AqlQuery::builder() .query(q) // test the first bind .bind_var("username", "test2") // test the second bind .bind_var("password", "test2_pwd") .count(true) .batch_size(256) .cache(false) .memory_limit(100) .ttl(10) .build(); assert_eq!(aql.query, q); assert_eq!(aql.count, Some(true)); assert_eq!(aql.batch_size, Some(256u32)); assert_eq!(aql.cache, Some(false)); assert_eq!(aql.memory_limit, Some(100)); assert_eq!(aql.ttl, Some(10)); assert_eq!(aql.options, None); assert_eq!( aql.bind_vars.get("username"), Some(&Value::String("test2".to_owned())) ); assert_eq!( aql.bind_vars.get("password"), Some(&Value::String("test2_pwd".to_owned())) ); } #[test] fn aql_query_builder_try_bind() { #[derive(Serialize, Deserialize, Debug)] struct User { pub username: String, pub password: String, } let user = User { username: "test2".to_owned(), password: "test2_pwd".to_owned(), }; let q = r#"FOR i in test_collection FILTER i==@user return i"#; let aql = AqlQuery::builder() .query(q) .try_bind("user", user) .unwrap() .build(); assert_eq!(aql.query, q); assert_eq!(aql.count, None); assert_eq!(aql.batch_size, None); let mut map = serde_json::Map::new(); map.insert("username".into(), "test2".into()); map.insert("password".into(), "test2_pwd".into()); assert_eq!(aql.bind_vars.get("user"), Some(&Value::Object(map))); let aql = AqlQuery::builder() .query(r#"FOR i in test_collection FILTER i.username==@username AND i.password==@password return i"#) // test the first bind .try_bind("username", "test2") .unwrap() // test the second bind .try_bind("password", "test2_pwd") .unwrap() .build(); assert_eq!( aql.bind_vars.get("username"), Some(&Value::String("test2".to_owned())) ); assert_eq!( aql.bind_vars.get("password"), Some(&Value::String("test2_pwd".to_owned())) ); } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/query.rs
src/query.rs
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/graph.rs
src/graph.rs
//! This module facilitates the building of new named graphs as well as the //! retrieval of existing indexes in ArangoDB. //! //! The various structures are following the HTTP specification as detailed in //! this ArangoDB [section](https://www.arangodb.com/docs/stable/http/gharial-management.html) //! //! For detailed information about ArangoDB named graphs, please check out the //! official ArangoDB [documentation](https://www.arangodb.com/docs/stable/http/gharial.html). use serde::{Deserialize, Serialize}; use typed_builder::TypedBuilder; pub(crate) const GHARIAL_API_PATH: &str = "_api/gharial"; /// Represents a Named Graph in ArangoDB. #[derive(Debug, Clone, Serialize, Deserialize, Default, TypedBuilder)] #[serde(rename_all = "camelCase")] pub struct Graph { /// Name of the graph #[builder(default)] pub name: String, /// An array of definitions for the relations of the graph. #[builder(default)] pub edge_definitions: Vec<EdgeDefinition>, /// An array of additional vertex collections. Documents within these /// collections do not have edges within this graph. #[builder(default)] #[serde(skip_serializing_if = "Vec::is_empty", default = "Vec::new")] pub orphan_collections: Vec<String>, /// Define if the created graph should be smart (Enterprise Edition only). #[builder(default)] #[serde(skip_serializing_if = "Option::is_none")] pub is_smart: Option<bool>, /// Whether to create a Disjoint SmartGraph instead of a regular SmartGraph /// (Enterprise Edition only). #[builder(default)] #[serde(skip_serializing_if = "Option::is_none")] pub is_disjoint: Option<bool>, /// a JSON object to define options for creating collections within this /// graph. #[builder(default)] #[serde(skip_serializing_if = "Option::is_none")] pub options: Option<GraphOptions>, } /// Represents the available options for a [`Graph`] Creation /// /// [`Graph`]: struct.Graph.html #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GraphOptions { /// Only has effect in Enterprise Edition and it is required if isSmart is /// true. The attribute name that is used to smartly shard the vertices /// of a graph. Every vertex in this SmartGraph has to have this /// attribute. Cannot be modified later. #[serde(skip_serializing_if = "Option::is_none")] pub smart_graph_attribute: Option<String>, /// The number of shards that is used for every collection within this /// graph. Cannot be modified later. #[serde(skip_serializing_if = "Option::is_none")] pub number_of_shards: Option<u32>, /// The replication factor used when initially creating collections for this /// graph. Can be set to "satellite" to create a SatelliteGraph, which /// will ignore numberOfShards, minReplicationFactor and writeConcern /// (Enterprise Edition only). #[serde(skip_serializing_if = "Option::is_none")] pub replication_factor: Option<u32>, /// Write concern for new collections in the graph. /// It determines how many copies of each shard are required to be in sync /// on the different DB-Servers. If there are less then these many /// copies in the cluster a shard will refuse to write. Writes to shards /// with enough up-to-date copies will succeed at the same time however. /// The value of writeConcern can not be larger than replicationFactor. /// (cluster only) #[serde(skip_serializing_if = "Option::is_none")] pub write_concern: Option<u32>, } /// Represents one Edge definition for a [`Graph`] Creation. /// /// [`Graph`]: struct.Graph.html #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct EdgeDefinition { /// Name of the edge collection pub collection: String, /// List of the `_from` collection names pub from: Vec<String>, /// List of the `_to` collection names pub to: Vec<String>, } /// Represents a collection of [`Graphs`] on a database in ArangoDB. /// /// [`Graphs`]: struct.Graph.html #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GraphCollection { pub graphs: Vec<Graph>, } /// Represents a [`Graph`] as returned by ArangoDB after a HTTP retrieval /// /// [`Graph`]: struct.Graph.html #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GraphResponse { pub graph: Graph, }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/transaction.rs
src/transaction.rs
use maybe_async::maybe_async; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_json::Value; use std::{collections::HashMap, sync::Arc}; use typed_builder::TypedBuilder; use url::Url; use crate::{ aql::Cursor, client::ClientExt, collection::response::Info, response::{deserialize_response, ArangoResult}, AqlQuery, ClientError, Collection, }; pub const TRANSACTION_HEADER: &str = "x-arango-trx-id"; #[derive(Debug, Serialize, Deserialize, TypedBuilder)] #[builder(doc)] pub struct TransactionCollections { #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] read: Option<Vec<String>>, write: Vec<String>, } #[derive(Debug, Serialize, Deserialize, TypedBuilder)] #[serde(rename_all = "camelCase")] #[builder(doc)] pub struct TransactionSettings { collections: TransactionCollections, #[builder(default, setter(strip_option))] #[serde(skip_serializing_if = "Option::is_none")] wait_for_sync: Option<bool>, #[builder(default = true)] allow_implicit: bool, #[builder(default, setter(strip_option))] #[serde(skip_serializing_if = "Option::is_none")] lock_timeout: Option<usize>, #[builder(default, setter(strip_option))] #[serde(skip_serializing_if = "Option::is_none")] max_transaction_size: Option<usize>, } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum Status { Running, Committed, Aborted, } #[derive(Debug, Serialize, Deserialize)] pub struct ArangoTransaction { pub id: String, pub status: Status, } #[derive(Debug, Serialize, Deserialize)] pub struct TransactionState { pub id: String, pub state: Status, } #[derive(Debug, Serialize, Deserialize)] pub struct TransactionList { pub transactions: Vec<TransactionState>, } /// Represents a [`Transaction`] in ArangoDB. /// allow you to perform a multi-document transaction with individual begin and /// commit / abort commands. This is similar to the way traditional RDBMS do it /// with BEGIN, COMMIT and ROLLBACK operations. # Example /// ``` /// # use arangors::Connection; /// # use arangors::Document; /// # use arangors::transaction::{TransactionCollections, TransactionSettings}; /// # use serde_json::{json, Value}; /// /// # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)] /// # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)] /// # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)] /// # async fn main() -> Result<(),anyhow::Error>{ /// # let conn = Connection::establish_jwt("http://localhost:8529", "username", "password") /// # .await /// # .unwrap(); /// let database = conn.db("test_db").await.unwrap(); /// /// let tx = database.begin_transaction( /// TransactionSettings::builder() /// .lock_timeout(60000) /// .wait_for_sync(true) /// .collections( /// TransactionCollections::builder() /// .write(vec!["test_collection".to_owned()]) /// .build(), /// ) /// .build(), /// ).await.unwrap(); /// /// let test_doc: Document<Value> = Document::new(json!({ /// "user_name":"test21", /// "user_name":"test21_pwd", /// })); /// /// let collection = tx.collection("test_collection").await.unwrap(); /// let document = collection /// .create_document(test_doc, Default::default()) /// .await?; /// let header = document.header().unwrap(); /// let _key = &header._key; /// /// tx.abort().await.unwrap(); /// # Ok(()) /// # } /// ``` #[derive(Debug)] pub struct Transaction<C: ClientExt> { id: String, status: Status, session: Arc<C>, base_url: Url, } impl<C> Transaction<C> where C: ClientExt, { pub(crate) fn new(tx: ArangoTransaction, session: Arc<C>, base_url: Url) -> Self { Transaction { id: tx.id, status: tx.status, session, base_url, } } /// Returns the current transaction status (running, aborted or comitted) pub fn status(&self) -> &Status { &self.status } /// Returns the transaction id pub fn id(&self) -> &String { &self.id } pub fn url(&self) -> &Url { &self.base_url } /// The transaction session, contains the streaming transaction header value pub fn session(&self) -> Arc<C> { Arc::clone(&self.session) } /// Tries to commit the transaction, consuming the current object. /// /// On success all submitted operations will be written in the database and /// can no longer be aborted. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn commit_transaction(self) -> Result<Status, ClientError> { let url = self .base_url .join(&format!("_api/transaction/{}", self.id)) .unwrap(); let resp = self.session.put(url, "").await?; let result: ArangoResult<ArangoTransaction> = deserialize_response(resp.body())?; Ok(result.unwrap().status) } /// Tries to commit the transaction. /// /// On success all submitted operations will be written in the database and /// can no longer be aborted. A transaction can be committed multiple /// times. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn commit(&self) -> Result<Status, ClientError> { let url = self .base_url .join(&format!("_api/transaction/{}", self.id)) .unwrap(); let resp = self.session.put(url, "").await?; let result: ArangoResult<ArangoTransaction> = deserialize_response(resp.body())?; Ok(result.unwrap().status) } /// Tries to abort the transaction. /// /// On success all submitted operations will be cancelled and can no longer /// be committed. A ransaction can be aborted multiple times without /// error. /// /// # Warning /// /// If the transaction is aborted, then it means deletion on the server /// side. The current object can no longer be used for operations or /// commit. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn abort(&self) -> Result<Status, ClientError> { let url = self .base_url .join(&format!("_api/transaction/{}", self.id)) .unwrap(); let resp = self.session.delete(url, "").await?; let result: ArangoResult<ArangoTransaction> = deserialize_response(resp.body())?; Ok(result.unwrap().status) } /// Get collection object with name. /// /// The returned collection object will share its session with the /// transaction, meaning all operations using the colleciton will be /// transactional and require a transaction commit to be writen /// in ArangoDB. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn collection(&self, name: &str) -> Result<Collection<C>, ClientError> { let url = self .base_url .join(&format!("_api/collection/{}", name)) .unwrap(); let resp: Info = deserialize_response(self.session.get(url, "").await?.body())?; Ok(Collection::from_transaction_response(self, &resp)) } #[maybe_async] pub async fn aql_query_batch<R>(&self, aql: AqlQuery<'_>) -> Result<Cursor<R>, ClientError> where R: DeserializeOwned, { let url = self.base_url.join("_api/cursor").unwrap(); let resp = self .session .post(url, &serde_json::to_string(&aql)?) .await?; deserialize_response(resp.body()) } #[maybe_async] pub async fn aql_next_batch<R>(&self, cursor_id: &str) -> Result<Cursor<R>, ClientError> where R: DeserializeOwned, { let url = self .base_url .join(&format!("_api/cursor/{}", cursor_id)) .unwrap(); let resp = self.session.put(url, "").await?; deserialize_response(resp.body()) } #[maybe_async] async fn aql_fetch_all<R>(&self, response: Cursor<R>) -> Result<Vec<R>, ClientError> where R: DeserializeOwned, { let mut response_cursor = response; let mut results: Vec<R> = Vec::new(); loop { if response_cursor.more { let id = response_cursor.id.unwrap().clone(); results.extend(response_cursor.result.into_iter()); response_cursor = self.aql_next_batch(id.as_str()).await?; } else { break; } } Ok(results) } /// Execute AQL query fetch all results. /// /// DO NOT do this when the count of results is too large that network or /// memory resources cannot afford. /// /// DO NOT set a small batch size, otherwise clients will have to make many /// HTTP requests. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn aql_query<R>(&self, aql: AqlQuery<'_>) -> Result<Vec<R>, ClientError> where R: DeserializeOwned, { let response = self.aql_query_batch(aql).await?; if response.more { self.aql_fetch_all(response).await } else { Ok(response.result) } } /// Similar to `aql_query`, except that this method only accept a string of /// AQL query. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn aql_str<R>(&self, query: &str) -> Result<Vec<R>, ClientError> where R: DeserializeOwned, { let aql = AqlQuery::builder().query(query).build(); self.aql_query(aql).await } /// Similar to `aql_query`, except that this method only accept a string of /// AQL query, with additional bind vars. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn aql_bind_vars<R>( &self, query: &str, bind_vars: HashMap<&str, Value>, ) -> Result<Vec<R>, ClientError> where R: DeserializeOwned, { let aql = AqlQuery::builder() .query(query) .bind_vars(bind_vars) .build(); self.aql_query(aql).await } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/connection/auth.rs
src/connection/auth.rs
//! Type definitions for various authentication methods. /// According to aragndb document, supported auth methods are /// - basicAuth /// - JWT /// - no auth /// /// And this enum provides an abstraction to these methods. /// /// Auth is then used when initialize `Connection`. /// /// # Example /// ```rust, ignore /// use arangors::connection::Auth; /// /// let basic_auth = Auth::basic("username", "password"); /// let jwt_auth = Auth::jwt("username", "password"); /// let no_auth = Auth::None; /// let no_auth = Auth::default(); /// ``` #[derive(Debug, Clone, Default)] pub(crate) enum Auth<'a> { /// Basic auth Basic(Credential<'a>), /// JSON Web Token (JWT) auth Jwt(Credential<'a>), /// no auth #[default] None, } impl<'a> Auth<'a> { pub fn basic(username: &'a str, password: &'a str) -> Auth<'a> { Auth::Basic(Credential { username, password }) } pub fn jwt(username: &'a str, password: &'a str) -> Auth<'a> { Auth::Jwt(Credential { username, password }) } } /// Username and password holder for authentication #[derive(Debug, Clone, Hash)] pub(crate) struct Credential<'a> { /// username pub username: &'a str, /// password pub password: &'a str, }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/connection/options.rs
src/connection/options.rs
use serde::{Deserialize, Serialize}; use typed_builder::TypedBuilder; #[cfg(feature = "cluster")] use std::collections::HashMap; /// Options for create a collection #[derive(Serialize, PartialEq, TypedBuilder, Clone)] #[builder(doc)] #[serde(rename_all = "camelCase")] #[cfg(feature = "cluster")] pub struct CreateDatabaseOptions { /// The sharding method to use for new collections in this database. /// Valid values are: “”, “flexible”, or “single”. The first two are /// equivalent #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] sharding: Option<String>, /// (The default is 1): in a cluster, this attribute determines how many /// copies of each shard are kept on different DB-Servers. The value 1 means /// that only one copy (no synchronous replication) is kept. A value of k /// means that k-1 replicas are kept. It can also be the string "satellite" /// for a SatelliteCollection, where the replication factor is matched to /// the number of DB-Servers. /// /// Any two copies reside on different DB-Servers. Replication between them /// is synchronous, that is, every write operation to the “leader” copy will /// be replicated to all “follower” replicas, before the write operation is /// reported successful. /// /// If a server fails, this is detected automatically and one of the servers /// holding copies take over, usually without an error being reported. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] replication_factor: Option<usize>, /// Write concern for this collection (default: 1). /// /// It determines how many copies of each shard are required to be in sync /// on the different DB-Servers. If there are less then these many copies in /// the cluster a shard will refuse to write. Writes to shards with enough /// up-to-date copies will succeed at the same time however. The value of /// writeConcern can not be larger than replicationFactor. (cluster only) #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] write_concern: Option<usize>, } #[derive(Serialize, PartialEq, TypedBuilder)] #[serde(rename_all = "camelCase")] pub(crate) struct CreateDatabase<'a> { name: &'a str, #[cfg(feature = "cluster")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] options: Option<CreateDatabaseOptions>, } #[derive(Serialize, PartialEq, Deserialize)] pub enum ClusterRole { Coordinator, DBServer, Agent, } #[derive(Serialize, PartialEq, Deserialize)] #[serde(rename_all = "lowercase")] pub enum Engine { RocksDB, MMFiles, } #[derive(Serialize, PartialEq, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub enum ClusterStatus { Good, Bad, Failed, } #[derive(Serialize, PartialEq, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub enum SyncStatus { Serving, Startup, Stopping, Stopped, Shutdown, Undefined, Unknown, } #[derive(Serialize, Deserialize, PartialEq)] #[serde(rename_all = "PascalCase")] #[cfg(feature = "cluster")] pub struct ServerHealth { pub endpoint: String, pub role: ClusterRole, pub status: ClusterStatus, pub engine: Engine, pub version: String, pub leader: Option<String>, pub sync_status: Option<SyncStatus>, } #[derive(Serialize, Deserialize, PartialEq)] #[serde(rename_all = "PascalCase")] #[cfg(feature = "cluster")] pub struct ClusterHealth { pub cluster_id: String, pub health: HashMap<String, ServerHealth>, }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/connection/mod.rs
src/connection/mod.rs
//! Top level connection object that hold a http client (either synchronous or //! asynchronous), arango URL, and buffered accessible databases object. //! //! ## Establishing connections //! There is three way to establish connections: //! - jwt //! - basic auth //! - no authentication //! //! So are the `arangors` API: //! Example: //! //! - With authentication //! //! ```rust //! use arangors::Connection; //! //! # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)] //! # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)] //! # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)] //! # async fn main() { //! let conn = Connection::establish_jwt("http://localhost:8529", "username", "password") //! .await //! .unwrap(); //! let conn = Connection::establish_basic_auth("http://localhost:8529", "username", "password") //! .await //! .unwrap(); //! # } //! ``` //! //! - No authentication //! ```rust, ignore //! use arangors::Connection; //! let conn = Connection::establish_without_auth("http://localhost:8529").await.unwrap(); //! ``` use std::{collections::HashMap, fmt::Debug, sync::Arc}; use base64::{engine::general_purpose, Engine as _}; use http::header::{HeaderMap, AUTHORIZATION, SERVER}; use log::{debug, trace}; use maybe_async::maybe_async; use serde::{Deserialize, Serialize}; use serde_json::Value; use url::Url; use crate::{client::ClientExt, response::ArangoResult, ClientError}; use super::{database::Database, response::deserialize_response}; #[cfg(feature = "cluster")] use self::options::{ClusterHealth, CreateDatabase, CreateDatabaseOptions}; use self::{ auth::Auth, role::{Admin, Normal}, }; mod auth; pub mod options; pub mod role { #[derive(Debug)] pub struct Normal; #[derive(Debug)] pub struct Admin; } #[derive(Deserialize, Serialize, Debug)] pub enum Permission { #[serde(rename = "none")] NoAccess, #[serde(rename = "ro")] ReadOnly, #[serde(rename = "rw")] ReadWrite, } #[derive(Debug, Deserialize)] pub struct Version { pub server: String, pub version: String, pub license: String, } #[cfg(any(feature = "reqwest_async", feature = "reqwest_blocking"))] pub type Connection = GenericConnection<crate::client::reqwest::ReqwestClient>; #[cfg(feature = "surf_async")] pub type Connection = GenericConnection<crate::client::surf::SurfClient>; /// Connection is the top level API for this crate. /// It contains a http client, information about authentication, arangodb url. #[derive(Debug, Clone)] pub struct GenericConnection<C: ClientExt, S = Normal> { session: Arc<C>, arango_url: Url, username: String, #[allow(dead_code)] state: S, } impl<S, C: ClientExt> GenericConnection<C, S> { /// Validate the server at given arango url /// /// Cast `ClientError` if /// - Invalid url /// - Connection failed /// - SERVER header in response header is not `ArangoDB` or empty #[maybe_async] pub async fn validate_server(arango_url: &str) -> Result<(), ClientError> { let client = C::new(None)?; let resp = client.get(arango_url.parse().unwrap(), "").await?; // have `Server` in header match resp.headers().get(SERVER) { Some(server) => { // value of `Server` is `ArangoDB` let server_value = server.to_str().unwrap(); if server_value.eq_ignore_ascii_case("ArangoDB") { trace!("Validate arangoDB server done."); Ok(()) } else if server_value.eq_ignore_ascii_case("arangodb-oasis") { trace!("Validate arangoDB Oasis server done."); Ok(()) } else { Err(ClientError::InvalidServer(server_value.to_owned())) } } None => Err(ClientError::InvalidServer("Unknown".to_owned())), } } /// Get url for remote arangoDB server. pub fn url(&self) -> &Url { &self.arango_url } /// Get HTTP session. /// /// Users can use this method to get a authorized session to access /// arbitrary path on arangoDB Server. /// /// TODO This method should only be public in this crate when all features /// are implemented. pub fn session(&self) -> Arc<C> { Arc::clone(&self.session) } /// Get database object with name. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn db(&self, name: &str) -> Result<Database<C>, ClientError> { let db = Database::new(name, self.url(), self.session()); db.info().await?; Ok(db) } /// Get a list of accessible database /// /// This function uses the API that is used to retrieve a list of /// all databases the current user can access. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn accessible_databases(&self) -> Result<HashMap<String, Permission>, ClientError> { let url = self .arango_url .join(&format!("/_api/user/{}/database", &self.username)) .unwrap(); let resp = self.session.get(url, "").await?; let result: ArangoResult<HashMap<String, Permission>> = deserialize_response(resp.body())?; Ok(result.unwrap()) } // Returns the role of a server in a cluster. The role is returned in the role // attribute of the result /// /// Possible return values for role are: /// SINGLE: the server is a standalone server without clustering /// COORDINATOR: the server is a Coordinator in a cluster /// PRIMARY: the server is a DB-Server in a cluster /// SECONDARY: this role is not used anymore /// AGENT: the server is an Agency node in a cluster /// UNDEFINED: in a cluster, UNDEFINED is returned if the server role cannot /// be determined. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn server_role(&self) -> Result<String, ClientError> { let url = self.arango_url.join("/_admin/server/role").unwrap(); let resp = self.session.get(url, "").await?; let result: HashMap<String, Value> = deserialize_response(resp.body())?; Ok(result.get("role").unwrap().as_str().unwrap().to_owned()) } /// Returns the health of the cluster as assessed by the supervision /// (Agency) /// /// # Note /// this function would make a request to arango server. #[maybe_async] #[cfg(feature = "cluster")] pub async fn cluster_health(&self) -> Result<ClusterHealth, ClientError> { let url = self.arango_url.join("/_admin/cluster/health").unwrap(); let resp = self.session.get(url, "").await?; let result: ClusterHealth = deserialize_response(resp.body())?; Ok(result) } } impl<C: ClientExt> GenericConnection<C, Normal> { /// Establish connection to ArangoDB sever with Auth. /// /// The connection is establish in the following steps: /// 1. validate if it is a arangoDB server at the given base url /// 1. set authentication in header /// 1. build a http client that holds authentication tokens /// 1. construct databases objects for later use /// /// The most secure way to connect to a arangoDB server is via JWT /// token authentication, along with TLS encryption. #[maybe_async] async fn establish<T: Into<String>>( arango_url: T, auth: Auth<'_>, ) -> Result<GenericConnection<C, Normal>, ClientError> { let url_str = arango_url.into(); let arango_url = Url::parse(&url_str) .map_err(|_| ClientError::InvalidServer(format!("invalid url: {}", url_str)))? .join("/") .unwrap(); Self::validate_server(&url_str).await?; let username: String; let authorization = match auth { Auth::Basic(cred) => { username = String::from(cred.username); let token = general_purpose::STANDARD_NO_PAD .encode(format!("{}:{}", cred.username, cred.password)); Some(format!("Basic {}", token)) } Auth::Jwt(cred) => { username = String::from(cred.username); let token = Self::jwt_login(&arango_url, cred.username, cred.password).await?; Some(format!("Bearer {}", token)) } Auth::None => { username = String::from("root"); None } }; let mut headers = HeaderMap::new(); if let Some(value) = authorization { headers.insert(AUTHORIZATION, value.parse().unwrap()); } debug!("Established"); Ok(GenericConnection { arango_url, username, session: Arc::new(C::new(headers)?), state: Normal, }) } /// Establish connection to ArangoDB sever without Authentication. /// /// The target server **MUST DISABLE** authentication for all requests, /// which should only used for **test purpose**. /// /// Disable authentication means all operations are performed by root user. /// /// Example: /// ```rust, ignore /// use arangors::Connection; /// /// let conn = Connection::establish_without_auth("http://localhost:8529").await.unwrap(); /// ``` #[maybe_async] pub async fn establish_without_auth<T: Into<String>>( arango_url: T, ) -> Result<GenericConnection<C, Normal>, ClientError> { trace!("Establish without auth"); GenericConnection::establish(arango_url.into(), Auth::None).await } /// Establish connection to ArangoDB sever with basic auth. /// /// Example: /// ```rust /// use arangors::Connection; /// /// # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)] /// # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)] /// # #[cfg_attr(feature="blocking", maybe_async::must_be_sync)] /// # async fn main() { /// let conn = Connection::establish_basic_auth("http://localhost:8529", "username", "password") /// .await /// .unwrap(); /// # } /// ``` #[maybe_async] pub async fn establish_basic_auth( arango_url: &str, username: &str, password: &str, ) -> Result<GenericConnection<C, Normal>, ClientError> { trace!("Establish with basic auth"); GenericConnection::establish(arango_url, Auth::basic(username, password)).await } /// Establish connection to ArangoDB sever with jwt authentication. /// /// Prefered way to interact with arangoDB server. /// /// JWT token expires after 1 month. /// /// Example: /// /// ```rust /// use arangors::Connection; /// /// # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)] /// # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)] /// # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)] /// # async fn main() { /// let conn = Connection::establish_jwt("http://localhost:8529", "username", "password") /// .await /// .unwrap(); /// # } /// ``` #[maybe_async] pub async fn establish_jwt( arango_url: &str, username: &str, password: &str, ) -> Result<GenericConnection<C, Normal>, ClientError> { trace!("Establish with jwt"); GenericConnection::establish(arango_url, Auth::jwt(username, password)).await } #[maybe_async] async fn jwt_login<T: Into<String>>( arango_url: &Url, username: T, password: T, ) -> Result<String, ClientError> { #[derive(Deserialize)] struct Jwt { pub jwt: String, } let url = arango_url.join("/_open/auth").unwrap(); let mut map = HashMap::new(); map.insert("username", username.into()); map.insert("password", password.into()); let jwt: Jwt = deserialize_response( C::new(None)? .post(url, &serde_json::to_string(&map)?) .await? .body(), )?; Ok(jwt.jwt) } /// Create a database via HTTP request and add it into `self.databases`. /// /// If creation fails, an Error is cast. Otherwise, a bool is returned to /// indicate whether the database is correctly created. /// /// # Example /// ```rust /// use arangors::Connection; /// # #[cfg_attr(any(feature="reqwest_async"), maybe_async::maybe_async, tokio::main)] /// # #[cfg_attr(any(feature="surf_async"), maybe_async::maybe_async, async_std::main)] /// # #[cfg_attr(feature = "blocking", maybe_async::must_be_sync)] /// # async fn main() { /// let conn = Connection::establish_jwt("http://localhost:8529", "root", "KWNngteTps7XjrNv") /// .await /// .unwrap(); /// let result = conn.create_database("new_db").await.unwrap(); /// println!("{:?}", result); /// /// let result = conn.drop_database("new_db").await.unwrap(); /// println!("{:?}", result); /// # } /// ``` /// TODO tweak options on creating database /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn create_database(&self, name: &str) -> Result<Database<C>, ClientError> { let mut map = HashMap::new(); map.insert("name", name); let url = self.arango_url.join("/_api/database").unwrap(); let resp = self .session .post(url, &serde_json::to_string(&map)?) .await?; deserialize_response::<ArangoResult<bool>>(resp.body())?; self.db(name).await } #[maybe_async] #[cfg(feature = "cluster")] pub async fn create_database_with_options( &self, name: &str, options: CreateDatabaseOptions, ) -> Result<Database<C>, ClientError> { let url = self.arango_url.join("/_api/database").unwrap(); let final_options = CreateDatabase::builder() .name(name) .options(options) .build(); let resp = self .session .post(url, &serde_json::to_string(&final_options)?) .await?; deserialize_response::<ArangoResult<bool>>(resp.body())?; self.db(name).await } /// Drop database with name. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn drop_database(&self, name: &str) -> Result<(), ClientError> { let url_path = format!("/_api/database/{}", name); let url = self.arango_url.join(&url_path).unwrap(); let resp = self.session.delete(url, "").await?; deserialize_response::<ArangoResult<bool>>(resp.body())?; Ok(()) } #[maybe_async] pub async fn into_admin(self) -> Result<GenericConnection<C, Admin>, ClientError> { let dbs = self.accessible_databases().await?; let db = dbs .get("_system") .ok_or(ClientError::InsufficientPermission { permission: Permission::NoAccess, operation: String::from("access to _system database"), })?; match db { Permission::ReadWrite => Ok(self.into()), _ => Err(ClientError::InsufficientPermission { permission: Permission::ReadOnly, operation: String::from("write to _system database"), }), } } } impl<C: ClientExt> GenericConnection<C, Admin> { pub fn into_normal(self) -> GenericConnection<C, Normal> { self.into() } } impl<C: ClientExt> From<GenericConnection<C, Normal>> for GenericConnection<C, Admin> { fn from(conn: GenericConnection<C, Normal>) -> GenericConnection<C, Admin> { GenericConnection { arango_url: conn.arango_url, session: conn.session, username: conn.username, state: Admin, } } } impl<C: ClientExt> From<GenericConnection<C, Admin>> for GenericConnection<C, Normal> { fn from(conn: GenericConnection<C, Admin>) -> GenericConnection<C, Normal> { GenericConnection { arango_url: conn.arango_url, session: conn.session, username: conn.username, state: Normal, } } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/collection/response.rs
src/collection/response.rs
//! Types of response related to collection use crate::collection::{options::KeyOptions, CollectionType}; use serde::{ de::{Deserializer, Error as DeError}, Deserialize, }; #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Info { pub count: Option<u32>, pub id: String, pub name: String, pub globally_unique_id: String, pub is_system: bool, pub status: Status, #[serde(rename = "type")] pub collection_type: CollectionType, } #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum Status { NewBorn = 1, Unloaded = 2, Loaded = 3, Unloading = 4, Deleted = 5, Loading = 6, } impl<'de> Deserialize<'de> for Status { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let value = u8::deserialize(deserializer)?; match value { 1 => Ok(Status::NewBorn), 2 => Ok(Status::Unloaded), 3 => Ok(Status::Loaded), 4 => Ok(Status::Unloading), 5 => Ok(Status::Deleted), 6 => Ok(Status::Loading), _ => Err(DeError::custom( "Undefined behavior. If the crate breaks after an upgrade of ArangoDB, please \ contact the author.", )), } } } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Properties { #[serde(flatten)] pub info: Info, #[serde(flatten)] pub detail: Details, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Details { pub status_string: String, pub key_options: KeyOptions, pub wait_for_sync: bool, pub write_concern: u16, #[cfg(feature = "rocksdb")] pub cache_enabled: bool, #[cfg(feature = "rocksdb")] pub object_id: String, #[cfg(feature = "mmfiles")] pub is_volatile: bool, #[cfg(feature = "mmfiles")] pub do_compact: bool, #[cfg(feature = "mmfiles")] pub journal_size: usize, #[cfg(feature = "mmfiles")] pub index_buckets: usize, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ArangoIndex { pub count: Option<u32>, pub size: Option<u32>, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Figures { pub indexes: ArangoIndex, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Statistics { /// The number of documents currently present in the collection. pub count: Option<u32>, /// metrics of the collection pub figures: Figures, #[serde(flatten)] pub info: Info, #[serde(flatten)] pub detail: Details, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Revision { // pub uses_revisions_as_document_ids: Option<bool>, // pub sync_by_revision: bool, // pub min_revision: u32, // These 3 properties are for Arangodb 3.7 pub revision: String, #[serde(flatten)] pub info: Info, #[serde(flatten)] pub detail: Details, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Checksum { pub revision: String, pub checksum: String, #[serde(flatten)] pub info: Info, }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/collection/options.rs
src/collection/options.rs
//! Types of response related to collection use serde::{Deserialize, Serialize, Serializer}; use typed_builder::TypedBuilder; use crate::collection::CollectionType; /// Options for create a collection #[derive(Serialize, PartialEq, TypedBuilder, Clone)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct CreateParameters { /// Default is 1 which means the server will only report success back to the /// client if all replicas have created the collection. Set to 0 if you want /// faster server responses and don’t care about full replication. #[serde(skip_serializing_if = "Option::is_none", serialize_with = "bool2int")] #[builder(default, setter(strip_option))] wait_for_sync_replication: Option<bool>, /// Default is 1 which means the server will check if there are enough /// replicas available at creation time and bail out otherwise. Set to 0 to /// disable this extra check. #[serde(skip_serializing_if = "Option::is_none", serialize_with = "bool2int")] #[builder(default, setter(strip_option))] enforce_replication_factor: Option<bool>, } impl Default for CreateParameters { fn default() -> Self { Self::builder().build() } } fn bool2int<S>(v: &Option<bool>, ser: S) -> Result<S::Ok, S::Error> where S: Serializer, { if v.is_none() || *v.as_ref().unwrap() { ser.serialize_i8(1) } else { ser.serialize_i8(0) } } /// Options for create a collection #[derive(Serialize, PartialEq, TypedBuilder, Clone)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct CreateOptions<'a> { name: &'a str, /// the type of the collection to create #[serde(rename = "type", skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] collection_type: Option<CollectionType>, /// If true then the data is synchronized to disk before returning from a /// document create, update, replace or removal operation. (default: false) #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] wait_for_sync: Option<bool>, /// isSystem: If true, create a system collection. In this case /// collection-name should start with an underscore. End users should /// normally create non-system collections only. API implementors may be /// required to create system collections in very special occasions, but /// normally a regular collection will do. (The default is false) #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] is_system: Option<bool>, /// additional options for key generation #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] key_options: Option<KeyOptions>, /// Optional object that specifies the collection level schema for /// documents. The attribute keys rule, level and message must follow the /// rules documented in Document Schema Validation <https://www.arangodb.com/docs/devel/document-schema-validation.html> #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] schema: Option<serde_json::Value>, /// This attribute specifies the name of the sharding strategy to use for /// the collection. Since ArangoDB 3.4 there are different sharding /// strategies to select from when creating a new collection. The selected /// shardingStrategy value will remain fixed for the collection and cannot /// be changed afterwards. This is important to make the collection keep its /// sharding settings and always find documents already distributed to /// shards using the same initial sharding algorithm. The available /// sharding strategies are: /// /// - community-compat: default sharding used by ArangoDB Community Edition /// before version 3.4 /// - enterprise-compat: default sharding used by ArangoDB Enterprise /// Edition before version 3.4 /// - enterprise-smart-edge-compat: default sharding used by smart edge /// collections in ArangoDB Enterprise Edition before version 3.4 /// - hash: default sharding used for new collections starting from version /// 3.4 (excluding smart edge collections) /// - enterprise-hash-smart-edge: default sharding used for new smart edge /// collections starting from version 3.4 /// /// If no sharding strategy is specified, the default will be hash for all /// collections, and enterprise-hash-smart-edge for all smart edge /// collections (requires the Enterprise Edition of ArangoDB). Manually /// overriding the sharding strategy does not yet provide a benefit, but it /// may later in case other sharding strategies are added. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] sharding_strategy: Option<String>, /// whether or not the collection will be compacted (default is true) This /// option is meaningful for the MMFiles storage engine only. #[cfg(feature = "mmfiles")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] do_compat: Option<bool>, /// The maximal size of a journal or datafile in bytes. The value must be at /// least 1048576 (1 MiB). (The default is a configuration parameter) This /// option is meaningful for the MMFiles storage engine only. #[cfg(feature = "mmfiles")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] journal_size: Option<usize>, /// If true then the collection data is kept in-memory only and not made /// persistent. Unloading the collection will cause the collection data to /// be discarded. Stopping or re-starting the server will also cause full /// loss of data in the collection. Setting this option will make the /// resulting collection be slightly faster than regular collections because /// ArangoDB does not enforce any synchronization to disk and does not /// calculate any CRC checksums for datafiles (as there are no datafiles). /// This option should therefore be used for cache-type collections only, /// and not for data that cannot be re-created otherwise. (The default is /// false) This option is meaningful for the MMFiles storage engine only. #[cfg(feature = "mmfiles")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] is_volatile: Option<bool>, /// (The default is 1): in a cluster, this value determines the number of /// shards to create for the collection. In a single server setup, this /// option is meaningless. #[cfg(feature = "cluster")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] number_of_shards: Option<usize>, /// (The default is [ “_key” ]): in a cluster, this attribute determines /// which document attributes are used to determine the target shard for /// documents. Documents are sent to shards based on the values of their /// shard key attributes. The values of all shard key attributes in a /// document are hashed, and the hash value is used to determine the target /// shard. Note: Values of shard key attributes cannot be changed once set. /// This option is meaningless in a single server setup. #[cfg(feature = "cluster")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] shard_keys: Option<Vec<String>>, /// (The default is 1): in a cluster, this attribute determines how many /// copies of each shard are kept on different DB-Servers. The value 1 means /// that only one copy (no synchronous replication) is kept. A value of k /// means that k-1 replicas are kept. It can also be the string "satellite" /// for a SatelliteCollection, where the replication factor is matched to /// the number of DB-Servers. /// /// Any two copies reside on different DB-Servers. Replication between them /// is synchronous, that is, every write operation to the “leader” copy will /// be replicated to all “follower” replicas, before the write operation is /// reported successful. /// /// If a server fails, this is detected automatically and one of the servers /// holding copies take over, usually without an error being reported. #[cfg(feature = "cluster")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] replication_factor: Option<usize>, /// Write concern for this collection (default: 1). /// /// It determines how many copies of each shard are required to be in sync /// on the different DB-Servers. If there are less then these many copies in /// the cluster a shard will refuse to write. Writes to shards with enough /// up-to-date copies will succeed at the same time however. The value of /// writeConcern can not be larger than replicationFactor. (cluster only) #[cfg(feature = "cluster")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] write_concern: Option<usize>, /// (The default is ”“): in an Enterprise Edition cluster, this attribute /// binds the specifics of sharding for the newly created collection to /// follow that of a specified existing collection. Note: Using this /// parameter has consequences for the prototype collection. It can no /// longer be dropped, before the sharding-imitating collections are /// dropped. Equally, backups and restores of imitating collections alone /// will generate warnings (which can be overridden) about missing sharding /// prototype. #[cfg(feature = "enterprise")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] distribute_shards_like: Option<String>, /// In an Enterprise Edition cluster, this attribute determines an attribute /// of the collection that must contain the shard key value of the /// referred-to SmartJoin collection. Additionally, the shard key for a /// document in this collection must contain the value of this attribute, /// followed by a colon, followed by the actual primary key of the document. /// /// This feature can only be used in the Enterprise Edition and requires the /// distributeShardsLike attribute of the collection to be set to the name /// of another collection. It also requires the shardKeys attribute of the /// collection to be set to a single shard key attribute, with an additional /// ‘:’ at the end. A further restriction is that whenever documents are /// stored or updated in the collection, the value stored in the /// smartJoinAttribute must be a string. #[cfg(feature = "enterprise")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] smart_join_attribute: Option<String>, } fn is_true(x: &bool) -> bool { *x } #[derive(Debug, Deserialize, Serialize, TypedBuilder, PartialEq, Clone)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct KeyOptions { /// if set to true, then it is allowed to supply own key values in the _key /// attribute of a document. If set to false, then the key generator will /// solely be responsible for generating keys and supplying own key values /// in the _key attribute of documents is considered an error. #[serde(skip_serializing_if = "is_true")] #[builder(default = true)] pub allow_user_keys: bool, /// specifies the type of the key generator. The currently available /// generators are traditional and autoincrement. #[serde(skip_serializing_if = "Option::is_none", rename = "type")] #[builder(default, setter(strip_option))] pub key_type: Option<String>, /// increment value for autoincrement key generator. Not used for other key /// generator types. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub increment: Option<u32>, /// Initial offset value for autoincrement key generator. Not used for other /// key generator types. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] pub offset: Option<u32>, #[serde(skip_serializing)] #[builder(setter(skip), default = None)] pub last_value: Option<u32>, } impl Default for KeyOptions { fn default() -> Self { Self::builder().build() } } /// Options for checksum #[derive(Serialize, Deserialize, PartialEq, TypedBuilder, Clone)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct ChecksumOptions { /// By setting the optional query parameter withRevisions to true, then /// revision ids (_rev system attributes) are included in the /// checksumming. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] with_revision: Option<bool>, /// By providing the optional query parameter withData with a value of true, /// the user-defined document attributes will be included in the /// calculation too. /// /// Note: Including user-defined attributes will make /// the checksumming slower. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] with_data: Option<bool>, } impl Default for ChecksumOptions { fn default() -> Self { Self::builder().build() } } #[derive(Debug, Deserialize, Serialize, TypedBuilder, Clone)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct PropertiesOptions { /// If true then creating or changing a document will wait until the data /// has been synchronized to disk. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] wait_for_sync: Option<bool>, /* TODO need to implement this with feature gate between versions maybe * for ArangoDB 3.7 * schema: Option<SchemaRules>, */ } impl Default for PropertiesOptions { fn default() -> Self { Self::builder().build() } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/collection/mod.rs
src/collection/mod.rs
//! Collection level operations //! //! This mod contains struct and type of colleciton info and management, as well //! as document related operations. use std::{convert::TryFrom, sync::Arc}; use http::Request; use maybe_async::maybe_async; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_json::json; use url::Url; use options::*; use response::*; use crate::{ client::ClientExt, document::{ options::{InsertOptions, ReadOptions, RemoveOptions, ReplaceOptions, UpdateOptions}, response::DocumentResponse, Header, }, response::{deserialize_response, ArangoResult}, transaction::Transaction, ClientError, }; use super::{Database, Document}; use crate::transaction::TRANSACTION_HEADER; pub mod options; pub mod response; /// Represent a collection in Arango server that consists of documents/edges. /// /// It is uniquely identified by its /// collection identifier. It also has a unique name that clients should use to /// identify and access it. Collections can be renamed. This will change the /// collection name, but not the collection identifier. /// /// Collections have a type /// that is specified by the user when the collection is created. There are /// currently two types: document and edge. The default type is document. #[derive(Debug, Clone)] pub struct Collection<C: ClientExt> { id: String, name: String, collection_type: CollectionType, base_url: Url, document_base_url: Url, session: Arc<C>, } impl<'a, C: ClientExt> Collection<C> { /// Construct Collection given collection info from server /// /// Base url should be like `http://server:port/_db/mydb/_api/collection/{collection-name}` /// Document root should be like: http://server:port/_db/mydb/_api/document/ pub(crate) fn new<T: Into<String>, S: Into<String>>( name: T, id: S, collection_type: CollectionType, db_url: &Url, session: Arc<C>, ) -> Collection<C> { let name = name.into(); let path = format!("_api/collection/{}/", &name); let url = db_url.join(&path).unwrap(); let document_path = format!("_api/document/{}/", &name); let document_base_url = db_url.join(&document_path).unwrap(); Collection { name, id: id.into(), session, base_url: url, document_base_url, collection_type, } } pub(crate) fn from_response(database: &Database<C>, collection: &Info) -> Collection<C> { Self::new( &collection.name, &collection.id, collection.collection_type, database.url(), database.session(), ) } pub(crate) fn from_transaction_response( transaction: &Transaction<C>, collection: &Info, ) -> Collection<C> { Self::new( &collection.name, &collection.id, collection.collection_type, transaction.url(), transaction.session(), ) } pub fn collection_type(&self) -> CollectionType { self.collection_type } /// The collection identifier /// /// A collection identifier lets you refer to a collection in a database. It /// is a string value and is unique within the database. Up to including /// ArangoDB 1.1, the collection identifier has been a client’s primary /// means to access collections. Starting with ArangoDB 1.2, clients should /// instead use a collection’s unique name to access a collection instead of /// its identifier. ArangoDB currently uses 64bit unsigned integer values to /// maintain collection ids internally. When returning collection ids to /// clients, ArangoDB will put them into a string to ensure the collection /// id is not clipped by clients that do not support big integers. Clients /// should treat the collection ids returned by ArangoDB as opaque strings /// when they store or use them locally. // // Note: collection ids have been returned as integers up to including ArangoDB // 1.1 pub fn id(&self) -> &str { self.id.as_str() } /// The collection name /// /// A collection name identifies a collection in a database. It is a string /// and is unique within the database. Unlike the collection identifier it /// is supplied by the creator of the collection. The collection name must /// consist of letters, digits, and the _ (underscore) and - (dash) /// characters only. Please refer to Naming Conventions in ArangoDB for more /// information on valid collection names. pub fn name(&self) -> &str { self.name.as_str() } /// Collection url: http://server:port/_db/mydb/_api/collection/{collection-name} /// /// This url is used to work on the collection itself pub fn url(&self) -> &Url { &self.base_url } /// Document base url: http://server:port/_db/mydb/_api/document/{collection-name} /// /// This url is used to work with documents pub fn doc_url(&self) -> &Url { &self.document_base_url } /// HTTP Client used to query the server pub fn session(&self) -> Arc<C> { Arc::clone(&self.session) } /// Get the db of current collection pub fn db(&self) -> Database<C> { // Base url should be like `http://server:port/_db/mydb/_api/collection/{collection-name}` let mut paths = self.base_url.path_segments().unwrap(); // must be `_db` paths.next(); // must be db name let name = paths.next().unwrap(); Database::new(name, &self.url().join("/").unwrap(), self.session()) } /// Drop a collection /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn drop(self) -> Result<String, ClientError> { let url = self.base_url.join("").unwrap(); #[derive(Debug, Deserialize)] struct DropCollectionResponse { id: String, } let resp: DropCollectionResponse = deserialize_response(self.session.delete(url, "").await?.body())?; Ok(resp.id) } /// Truncate current collection /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn truncate(&self) -> Result<Info, ClientError> { let url = self.base_url.join("truncate").unwrap(); let resp: Info = deserialize_response(self.session.put(url, "").await?.body())?; Ok(resp) } /// Fetch the properties of collection /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn properties(&self) -> Result<Properties, ClientError> { let url = self.base_url.join("properties").unwrap(); let resp: Properties = deserialize_response(self.session.get(url, "").await?.body())?; Ok(resp) } /// Count the documents in this collection /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn document_count(&self) -> Result<Properties, ClientError> { let url = self.base_url.join("count").unwrap(); let resp: Properties = deserialize_response(self.session.get(url, "").await?.body())?; Ok(resp) } /// Fetch the statistics of a collection /// /// The result also contains the number of documents and additional /// statistical information about the collection. **Note**: This will /// always load the collection into memory. /// /// **Note**: collection data that are stored in the write-ahead log only /// are not reported in the results. When the write-ahead log is /// collected, documents might be added to journals and datafiles of /// the collection, which may modify the figures of the collection. /// /// Additionally, the file sizes of collection and index parameter JSON /// files are not reported. These files should normally have a size of a /// few bytes each. Please also note that the fileSize values are reported /// in bytes and reflect the logical file sizes. Some filesystems may use /// optimisations (e.g. sparse files) so that the actual physical file size /// is somewhat different. Directories and sub-directories may also require /// space in the file system, but this space is not reported in the /// fileSize results. /// /// That means that the figures reported do not reflect the actual disk /// usage of the collection with 100% accuracy. The actual disk usage of a /// collection is normally slightly higher than the sum of the reported /// fileSize values. Still the sum of the fileSize values can still be used /// as a lower bound approximation of the disk usage. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn statistics(&self) -> Result<Statistics, ClientError> { let url = self.base_url.join("figures").unwrap(); let resp: Statistics = deserialize_response(self.session.get(url, "").await?.body())?; Ok(resp) } /// Retrieve the collections revision id /// /// The revision id is a server-generated string that clients can use to /// check whether data in a collection has changed since the last revision /// check. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn revision_id(&self) -> Result<Revision, ClientError> { let url = self.base_url.join("revision").unwrap(); let resp: Revision = deserialize_response(self.session.get(url, "").await?.body())?; Ok(resp) } /// Fetch a checksum for the specified collection /// /// Will calculate a checksum of the meta-data (keys and optionally /// revision ids) and optionally the document data in the collection. /// The checksum can be used to compare if two collections on different /// ArangoDB instances contain the same contents. The current revision /// of the collection is returned too so one can make sure the checksums /// are calculated for the same state of data. /// /// By default, the checksum will only be calculated on the _key system /// attribute of the documents contained in the collection. For edge /// collections, the system attributes _from and _to will also be included /// in the calculation. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn checksum(&self) -> Result<Checksum, ClientError> { self.checksum_with_options(Default::default()).await } /// Fetch a checksum for the specified collection /// /// Will calculate a checksum of the meta-data (keys and optionally /// revision ids) and optionally the document data in the collection. /// The checksum can be used to compare if two collections on different /// ArangoDB instances contain the same contents. The current revision /// of the collection is returned too so one can make sure the checksums /// are calculated for the same state of data. /// /// By default, the checksum will only be calculated on the _key system /// attribute of the documents contained in the collection. For edge /// collections, the system attributes _from and _to will also be included /// in the calculation. /// /// By setting the optional query parameter withRevisions to true, then /// revision ids (_rev system attributes) are included in the /// checksumming. /// /// By providing the optional query parameter withData with a value of true, /// the user-defined document attributes will be included in the /// calculation too. /// /// Note: Including user-defined attributes will make /// the checksumming slower. this function would make a request to /// arango server. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn checksum_with_options( &self, options: ChecksumOptions, ) -> Result<Checksum, ClientError> { let mut url = self.base_url.join("checksum").unwrap(); let query = serde_qs::to_string(&options).unwrap(); url.set_query(Some(query.as_str())); let resp: Checksum = deserialize_response(self.session.get(url, "").await?.body())?; Ok(resp) } /// Load a collection into memory /// /// Returns the collection on success. /// /// The request body object might optionally contain the following /// attribute: /// /// - count /// /// If set, this controls whether the return value should include /// the number of documents in the collection. Setting count to false may /// speed up loading a collection. The default value for count is true. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn load(&self, count: bool) -> Result<Info, ClientError> { let url = self.base_url.join("load").unwrap(); let body = json!({ "count": count }); let resp: Info = deserialize_response(self.session.put(url, body.to_string()).await?.body())?; Ok(resp) } /// Remove a collection from memory /// /// This call does not delete any documents. You can use the collection /// afterwards; in which case it will be loaded into memory, again. /// /// **Warning**: The unload function is deprecated from version 3.8.0 /// onwards and is a no-op from version 3.9.0 onwards. It should no /// longer be used, as it may be removed in a future version of /// ArangoDB. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn unload(&self) -> Result<Info, ClientError> { let url = self.base_url.join("unload").unwrap(); let resp: Info = deserialize_response(self.session.put(url, "").await?.body())?; Ok(resp) } /// Load Indexes into Memory /// /// This route tries to cache all index entries of this collection into the /// main memory. Therefore it iterates over all indexes of the collection /// and stores the indexed values, not the entire document data, in memory. /// All lookups that could be found in the cache are much faster than /// lookups not stored in the cache so you get a nice performance boost. It /// is also guaranteed that the cache is consistent with the stored data. /// /// For the time being this function is only useful on RocksDB storage /// engine, as in MMFiles engine all indexes are in memory anyways. /// /// On RocksDB this function honors all memory limits, if the indexes you /// want to load are smaller than your memory limit this function /// guarantees that most index values are cached. If the index is larger /// than your memory limit this function will fill up values up to this /// limit and for the time being there is no way to control which indexes /// of the collection should have priority over others. /// /// On success this function returns an object with attribute result set to /// true /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn load_indexes(&self) -> Result<bool, ClientError> { let url = self.base_url.join("loadIndexesIntoMemory").unwrap(); let resp: ArangoResult<bool> = deserialize_response(self.session.put(url, "").await?.body())?; Ok(resp.unwrap()) } /// Change the properties of a collection /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn change_properties( &self, properties: PropertiesOptions, ) -> Result<Properties, ClientError> { let url = self.base_url.join("properties").unwrap(); let body = serde_json::to_string(&properties).unwrap(); let resp: Properties = deserialize_response(self.session.put(url, body).await?.body())?; Ok(resp) } /// Rename the collection /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn rename(&mut self, name: &str) -> Result<Info, ClientError> { let url = self.base_url.join("rename").unwrap(); let body = json!({ "name": name }); let resp: Info = deserialize_response(self.session.put(url, body.to_string()).await?.body())?; self.name = name.to_string(); self.base_url = self.base_url.join(&format!("../{}/", name)).unwrap(); Ok(resp) } /// Recalculate the document count of a collection /// /// **Note**: this method is specific for the RocksDB storage engine /// /// # Note /// this function would make a request to arango server. #[cfg(feature = "rocksdb")] #[maybe_async] pub async fn recalculate_count(&self) -> Result<bool, ClientError> { let url = self.base_url.join("recalculateCount").unwrap(); let resp: ArangoResult<bool> = deserialize_response(self.session.put(url, "").await?.body())?; Ok(resp.unwrap()) } /// Rotate the journal of a collection /// /// The current journal of the collection will be closed and made a /// read-only datafile. The purpose of the rotate method is to make the /// data in the file available for compaction (compaction is only performed /// for read-only datafiles, and not for journals). /// /// Saving new data in the collection subsequently will create a new /// journal file automatically if there is no current journal. /// /// This methods is not documented on 3.7 /// /// **Note**: this method is specific for the MMFiles storage engine, and /// there it is not available in a cluster. /// /// # Note /// this function would make a request to arango server. #[cfg(feature = "mmfiles")] #[maybe_async] pub async fn rotate_journal(&self) -> Result<bool, ClientError> { let url = self.base_url.join("rotate").unwrap(); let resp: ArangoResult<bool> = deserialize_response(self.session.put(url, "").await?.body())?; Ok(resp.unwrap()) } /// Create a new document from the document given in the body, unless /// there is already a document with the _key given. /// /// If no _key is given, a new unique _key is generated automatically. /// Possibly given _id and _rev attributes in the body are always ignored, /// the URL part or the query parameter collection respectively counts. /// /// If the document was created successfully, then the Location header /// contains the path to the newly created document. /// The Etag header field contains the revision of the document. /// Both are only set in the single document case. /// /// If silent is not set to true, the body of the response contains a JSON /// object with the following attributes: /// /// - _id contains the document identifier of the newly created document /// - _key contains the document key /// - _rev contains the document revision /// /// If the collection parameter waitForSync is false, then the call returns /// as soon as the document has been accepted. It will not wait until /// the documents have been synced to disk. /// /// Optionally, the query parameter waitForSync can be used to force /// synchronization of the document creation operation to disk even in /// case that the waitForSync flag had been disabled for the entire /// collection. Thus, the waitForSync query parameter can be used to /// force synchronization of just this specific operations. To use this, /// set the waitForSync parameter to true. If the waitForSync parameter is /// not specified or set to false, then the collection’s default /// waitForSync behavior is applied. The waitForSync query parameter /// cannot be used to disable synchronization for collections that have a /// default waitForSync value of true. /// /// If the query parameter returnNew is true, then, for each generated /// document, the complete new document is returned under the new attribute /// in the result. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn create_document<T>( &self, doc: T, insert_options: InsertOptions, ) -> Result<DocumentResponse<T>, ClientError> where T: Serialize + DeserializeOwned, { let mut url = self.document_base_url.join("").unwrap(); let body = serde_json::to_string(&doc)?; let query = serde_qs::to_string(&insert_options).unwrap(); url.set_query(Some(query.as_str())); let resp: DocumentResponse<T> = deserialize_response(self.session.post(url, body).await?.body())?; Ok(resp) } /// Read a single document with `_key` /// /// Returns the document identified by document-id. The returned document /// contains three special attributes: _id containing the document /// identifier, _key containing key which uniquely identifies a document in /// a given collection and _rev containing the revision. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn document<T>(&self, _key: &str) -> Result<Document<T>, ClientError> where T: Serialize + DeserializeOwned, { self.document_with_options(_key, Default::default()).await } /// Read a single document with options /// /// Returns the document identified by document-id. The returned document /// contains three special attributes: _id containing the document /// identifier, _key containing key which uniquely identifies a document in /// a given collection and _rev containing the revision. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn document_with_options<T>( &self, _key: &str, read_options: ReadOptions, ) -> Result<Document<T>, ClientError> where T: Serialize + DeserializeOwned, { let url = self.document_base_url.join(_key).unwrap(); let mut build = Request::get(url.to_string()); let header = make_header_from_options(read_options); if let Some(h) = header { build = build.header(h.0, h.1) } let req = build.body("".to_string()).unwrap(); let resp: Document<T> = deserialize_response(self.session.request(req).await?.body())?; Ok(resp) } /// Read a single document header /// /// Like GET, but only returns the header fields and not the body. You can /// use this call to get the current revision of a document or check if the /// document was deleted. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn document_header(&self, _key: &str) -> Result<Header, ClientError> { self.document_header_with_options(_key, Default::default()) .await } /// Read a single document header with options /// /// Like GET, but only returns the header fields and not the body. You can /// use this call to get the current revision of a document or check if the /// document was deleted. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn document_header_with_options( &self, _key: &str, read_options: ReadOptions, ) -> Result<Header, ClientError> { let url = self.document_base_url.join(_key).unwrap(); let mut build = Request::get(url.to_string()); let header = make_header_from_options(read_options); if let Some(h) = header { build = build.header(h.0, h.1) } let req = build.body("".to_string()).unwrap(); let resp: Header = deserialize_response(self.session.request(req).await?.body())?; Ok(resp) } /// Partially update a document /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn update_document<T>( &self, _key: &str, doc: T, update_options: UpdateOptions, ) -> Result<DocumentResponse<T>, ClientError> where T: Serialize + DeserializeOwned, { let mut url = self.document_base_url.join(_key).unwrap(); let body = serde_json::to_string(&doc)?; let query = serde_qs::to_string(&update_options).unwrap(); url.set_query(Some(query.as_str())); let resp: DocumentResponse<T> = deserialize_response(self.session.patch(url, body).await?.body())?; Ok(resp) } /// Replace a document /// /// Replaces the specified document with the one in the body, provided there /// is such a document and no precondition is violated. /// /// The value of the _key attribute as well as attributes used as sharding /// keys may not be changed. /// /// If the If-Match header is specified and the revision of the document in /// the database is unequal to the given revision, the precondition is /// violated. If If-Match is not given and ignoreRevs is false and there /// is a _rev attribute in the body and its value does not match the /// revision of the document in the database, the precondition is violated. /// If a precondition is violated, an HTTP 412 is returned. /// /// If the document exists and can be updated, then an HTTP 201 or an HTTP /// 202 is returned (depending on waitForSync, see below), the Etag header /// field contains the new revision of the document and the Location header /// contains a complete URL under which the document can be queried. /// /// Cluster only: The replace documents may contain values for the /// collection’s pre-defined shard keys. Values for the shard keys are /// treated as hints to improve performance. Should the shard keys values be /// incorrect ArangoDB may answer with a not found error. Optionally, /// the query parameter waitForSync can be used to force synchronization of /// the document replacement operation to disk even in case that the /// waitForSync flag had been disabled for the entire collection. Thus, the /// waitForSync query parameter can be used to force synchronization of just /// specific operations. To use this, set the waitForSync parameter to /// true. If the waitForSync parameter is not specified or set to false, /// then the collection’s default waitForSync behavior is applied. The /// waitForSync query parameter cannot be used to disable synchronization /// for collections that have a default waitForSync value of true. /// /// If silent is not set to true, the body of the response contains a JSON /// object with the information about the identifier and the revision. The /// attribute _id contains the known document-id of the updated document, /// _key contains the key which uniquely identifies a document in a given /// collection, and the attribute _rev contains the new document revision. /// /// If the query parameter returnOld is true, then the complete previous /// revision of the document is returned under the old attribute in the /// result. If the query parameter returnNew is true, then the complete /// new document is returned under the new attribute in the result. /// /// If the document does not exist, then a HTTP 404 is returned and the body /// of the response contains an error document. /// /// You can conditionally replace a document based on a target revision id /// by using the if-match HTTP header. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn replace_document<T>( &self, _key: &str, doc: T, replace_options: ReplaceOptions, if_match_header: Option<String>, ) -> Result<DocumentResponse<T>, ClientError> where T: Serialize + DeserializeOwned, { let mut url = self.document_base_url.join(_key).unwrap(); let body = serde_json::to_string(&doc)?; let query = serde_qs::to_string(&replace_options).unwrap(); url.set_query(Some(query.as_str())); let mut build = Request::put(url.to_string()); if let Some(if_match_value) = if_match_header { build = build.header("If-Match", if_match_value); } let req = build.body(body).unwrap(); let resp: DocumentResponse<T> = deserialize_response(self.session.request(req).await?.body())?; Ok(resp) } /// Remove a document /// /// If silent is not set to true, the body of the response contains a JSON /// object with the information about the identifier and the revision. The /// attribute _id contains the known document-id of the removed document, /// _key contains the key which uniquely identifies a document in a given /// collection, and the attribute _rev contains the document revision. /// /// If the waitForSync parameter is not specified or set to false, then the /// collection’s default waitForSync behavior is applied. The /// waitForSync query parameter cannot be used to disable /// synchronization for collections that have a default waitForSync value of /// true. /// /// If the query parameter returnOld is true, then the complete previous /// revision of the document is returned under the old attribute in the /// result. /// /// You can conditionally replace a document based on a target revision id /// by using the if-match HTTP header. /// /// # Note /// this function would make a request to arango server. #[maybe_async] pub async fn remove_document<T>( &self, _key: &str, remove_options: RemoveOptions, if_match_header: Option<String>, ) -> Result<DocumentResponse<T>, ClientError> where T: Serialize + DeserializeOwned, { let mut url = self.document_base_url.join(_key).unwrap(); let query = serde_qs::to_string(&remove_options).unwrap(); url.set_query(Some(query.as_str())); let mut build = Request::delete(url.to_string()); if let Some(if_match_value) = if_match_header { build = build.header("If-Match", if_match_value); } let req = build.body("".to_string()).unwrap(); let resp: DocumentResponse<T> = deserialize_response(self.session.request(req).await?.body())?; Ok(resp) } /// Returns a new Collection with its `session` updated with the transaction /// id pub fn clone_with_transaction(&self, transaction_id: String) -> Result<Self, ClientError> { let mut session = (*self.session).clone(); session .headers() .insert(TRANSACTION_HEADER, transaction_id.parse().unwrap()); Ok(Self { session: Arc::new(session), ..self.clone() }) } } /// Create header name and header value from read_options fn make_header_from_options( document_read_options: ReadOptions, ) -> Option<(http::header::HeaderName, http::header::HeaderValue)> { match document_read_options { ReadOptions::IfNoneMatch(value) => Some(( "If-None-Match".to_string().parse().unwrap(), http::HeaderValue::try_from(value).unwrap(), )), ReadOptions::IfMatch(value) => Some(( "If-Match".to_string().parse().unwrap(), http::HeaderValue::try_from(value).unwrap(), )), ReadOptions::NoHeader => None, } } #[derive( Debug, Clone, PartialEq, Eq, Copy, serde_repr::Serialize_repr, serde_repr::Deserialize_repr, )] #[repr(u8)] pub enum CollectionType { Document = 2, Edge = 3, }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/client/reqwest.rs
src/client/reqwest.rs
//! Reqwest HTTP client use std::convert::TryInto; #[cfg(any(feature = "reqwest_blocking"))] use ::reqwest::blocking::Client; #[cfg(any(feature = "reqwest_async"))] use ::reqwest::Client; use http::header::HeaderMap; use super::ClientExt; use crate::ClientError; use http::HeaderValue; #[derive(Debug, Clone)] pub struct ReqwestClient { pub client: Client, headers: HeaderMap, } #[maybe_async::maybe_async] impl ClientExt for ReqwestClient { fn new<U: Into<Option<HeaderMap>>>(headers: U) -> Result<Self, ClientError> { let client = Client::builder().gzip(true); let headers = match headers.into() { Some(h) => h, None => HeaderMap::new(), }; client .build() .map(|c| ReqwestClient { client: c, headers }) .map_err(|e| ClientError::HttpClient(format!("{:?}", e))) } fn headers(&mut self) -> &mut HeaderMap<HeaderValue> { &mut self.headers } async fn request( &self, mut request: http::Request<String>, ) -> Result<http::Response<String>, ClientError> { let headers = request.headers_mut(); for (header, value) in self.headers.iter() { if !headers.contains_key(header) { headers.insert(header, value.clone()); } } let req = request.try_into().unwrap(); let resp = self .client .execute(req) .await .map_err(|e| ClientError::HttpClient(format!("{:?}", e)))?; let status_code = resp.status(); let headers = resp.headers().clone(); let version = resp.version(); let content = resp .text() .await .map_err(|e| ClientError::HttpClient(format!("{:?}", e)))?; let mut build = http::Response::builder(); for header in headers.iter() { build = build.header(header.0, header.1); } build .status(status_code) .version(version) .body(content) .map_err(|e| ClientError::HttpClient(format!("{:?}", e))) } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/client/mod.rs
src/client/mod.rs
use http::{HeaderMap, Request, Response}; use url::Url; use crate::ClientError; #[cfg(any(all(feature = "reqwest_async", feature = "reqwest_blocking"),))] compile_error!(r#"Enabling both async and blocking version of reqwest client is not allowed."#); #[cfg(any(feature = "reqwest_async", feature = "reqwest_blocking",))] pub mod reqwest; #[cfg(any(feature = "surf_async"))] pub mod surf; #[maybe_async::maybe_async] pub trait ClientExt: Sync + Clone { fn new<U: Into<Option<HeaderMap>>>(headers: U) -> Result<Self, ClientError>; fn headers(&mut self) -> &mut HeaderMap; #[inline] async fn get<T>(&self, url: Url, text: T) -> Result<Response<String>, ClientError> where T: Into<String> + Send, { self.request(Request::get(url.to_string()).body(text.into()).unwrap()) .await } #[inline] async fn post<T>(&self, url: Url, text: T) -> Result<Response<String>, ClientError> where T: Into<String> + Send, { self.request(Request::post(url.to_string()).body(text.into()).unwrap()) .await } #[inline] async fn put<T>(&self, url: Url, text: T) -> Result<Response<String>, ClientError> where T: Into<String> + Send, { self.request(Request::put(url.to_string()).body(text.into()).unwrap()) .await } #[inline] async fn delete<T>(&self, url: Url, text: T) -> Result<Response<String>, ClientError> where T: Into<String> + Send, { self.request(Request::delete(url.to_string()).body(text.into()).unwrap()) .await } #[inline] async fn patch<T>(&self, url: Url, text: T) -> Result<Response<String>, ClientError> where T: Into<String> + Send, { self.request(Request::patch(url.to_string()).body(text.into()).unwrap()) .await } #[inline] async fn connect<T>(&self, url: Url, text: T) -> Result<Response<String>, ClientError> where T: Into<String> + Send, { self.request(Request::connect(url.to_string()).body(text.into()).unwrap()) .await } #[inline] async fn head<T>(&self, url: Url, text: T) -> Result<Response<String>, ClientError> where T: Into<String> + Send, { self.request(Request::head(url.to_string()).body(text.into()).unwrap()) .await } #[inline] async fn options<T>(&self, url: Url, text: T) -> Result<Response<String>, ClientError> where T: Into<String> + Send, { self.request(Request::options(url.to_string()).body(text.into()).unwrap()) .await } #[inline] async fn trace<T>(&self, url: Url, text: T) -> Result<Response<String>, ClientError> where T: Into<String> + Send, { self.request(Request::trace(url.to_string()).body(text.into()).unwrap()) .await } async fn request(&self, request: Request<String>) -> Result<Response<String>, ClientError>; }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/client/surf.rs
src/client/surf.rs
//! Surf HTTP client use std::str::FromStr; use http::{ header::{HeaderMap, HeaderValue}, Method, StatusCode, Version, }; use super::ClientExt; use crate::ClientError; #[derive(Debug, Clone)] pub struct SurfClient { headers: HeaderMap, } #[async_trait::async_trait] impl ClientExt for SurfClient { fn new<U: Into<Option<HeaderMap>>>(headers: U) -> Result<Self, ClientError> { let headers = match headers.into() { Some(h) => h, None => HeaderMap::new(), }; Ok(SurfClient { headers }) } fn headers(&mut self) -> &mut HeaderMap<HeaderValue> { &mut self.headers } async fn request( &self, request: http::Request<String>, ) -> Result<http::Response<String>, ClientError> { use ::surf::http::headers::HeaderName as SurfHeaderName; let method = request.method().clone(); let url = request.uri().to_owned().to_string(); let text = request.body(); let req = match method { Method::GET => ::surf::get(url), Method::POST => ::surf::post(url), Method::PUT => ::surf::put(url), Method::DELETE => ::surf::delete(url), Method::PATCH => ::surf::patch(url), Method::CONNECT => ::surf::connect(url), Method::HEAD => ::surf::head(url), Method::OPTIONS => ::surf::options(url), Method::TRACE => ::surf::trace(url), m @ _ => return Err(ClientError::HttpClient(format!("invalid method {}", m))), }; let req = self.headers.iter().fold(req, |req, (k, v)| { req.header( SurfHeaderName::from_str(k.as_str()).unwrap(), v.to_str().unwrap(), ) }); let req = request.headers().iter().fold(req, |req, (k, v)| { req.header( SurfHeaderName::from_str(k.as_str()).unwrap(), v.to_str().unwrap(), ) }); let mut resp = req .body(text.to_owned()) .await .map_err(|e| ClientError::HttpClient(format!("{:?}", e)))?; let status_code = resp.status(); let status = u16::from(status_code); let version = resp.version(); let content = resp .body_string() .await .map_err(|e| ClientError::HttpClient(format!("{:?}", e)))?; let mut build = http::Response::builder(); for (name, value) in resp.iter() { let mut iter = value.iter(); let acc = iter.next().map(|v| v.as_str()).unwrap_or("").to_owned(); let s = iter.fold(acc, |acc, x| format!("{};{}", acc, x.as_str())); build = build.header(name.as_str(), s); } let http_version = version.map(|v| match v { ::surf::http::Version::Http0_9 => Version::HTTP_09, ::surf::http::Version::Http1_0 => Version::HTTP_10, ::surf::http::Version::Http1_1 => Version::HTTP_11, ::surf::http::Version::Http2_0 => Version::HTTP_2, ::surf::http::Version::Http3_0 => Version::HTTP_3, _ => unreachable!(), }); let mut resp = http::response::Builder::from(build).status(StatusCode::from_u16(status).unwrap()); if version.is_some() { resp = resp.version(http_version.unwrap()); } resp.body(content) .map_err(|e| ClientError::HttpClient(format!("{:?}", e))) } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/document/response.rs
src/document/response.rs
//! Types of response related to document use serde::{de::Error as DeError, Deserialize, Deserializer}; use super::Header; /// Standard Response when having CRUD operation on document /// /// TODO could add more response variant as shown in official doc /// /// 200: is returned if the document was found /// /// 304: is returned if the “If-None-Match” header is given and the document has /// the same version /// /// 404: is returned if the document or collection was not /// found /// /// 412: is returned if an “If-Match” header is given and the found /// document has a different version. The response will also contain the found /// document’s current revision in the Etag header. pub enum DocumentResponse<T> { /// Silent is when there is empty object returned by the server Silent, /// Contain data after CRUD Response { header: Header, old: Option<T>, new: Option<T>, _old_rev: Option<String>, }, } /// Gives extra method on the DocumentResponse to quickly check what the server /// returns impl<T> DocumentResponse<T> { /// Should be true when the server send back an empty object {} pub fn is_silent(&self) -> bool { matches!(self, DocumentResponse::Silent) } /// Should be true if there is a response from the server pub fn has_response(&self) -> bool { matches!(self, DocumentResponse::Response { .. }) } /// Return the document header contained inside the response pub fn header(&self) -> Option<&Header> { if let DocumentResponse::Response { header, .. } = self { Some(header) } else { None } } /// Return the old document before changes pub fn old_doc(&self) -> Option<&T> { if let DocumentResponse::Response { old, .. } = self { old.as_ref() } else { None } } /// Return the new document pub fn new_doc(&self) -> Option<&T> { if let DocumentResponse::Response { new, .. } = self { new.as_ref() } else { None } } /// return the old revision of the document pub fn old_rev(&self) -> Option<&String> { if let DocumentResponse::Response { _old_rev, .. } = self { _old_rev.as_ref() } else { None } } } impl<'de, T> Deserialize<'de> for DocumentResponse<T> where T: Deserialize<'de>, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let mut obj = serde_json::Value::deserialize(deserializer)?; let json = obj .as_object_mut() .ok_or_else(|| DeError::custom("should be a json object"))?; if json.is_empty() { Ok(DocumentResponse::Silent) } else { let _id = json .remove("_id") .ok_or_else(|| DeError::missing_field("_id"))?; let _key = json .remove("_key") .ok_or_else(|| DeError::missing_field("_key"))?; let _rev = json .remove("_rev") .ok_or_else(|| DeError::missing_field("_rev"))?; let header: Header = Header { _id: serde_json::from_value(_id).map_err(DeError::custom)?, _key: serde_json::from_value(_key).map_err(DeError::custom)?, _rev: serde_json::from_value(_rev).map_err(DeError::custom)?, }; let old = json .remove("old") .map(T::deserialize) .transpose() .map_err(DeError::custom)?; let new = json .remove("new") .map(T::deserialize) .transpose() .map_err(DeError::custom)?; let _old_rev = json.remove("_old_rev").map(|v| v.to_string()); Ok(DocumentResponse::Response { header, old, new, _old_rev, }) } } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/document/options.rs
src/document/options.rs
//! Types of options related to document use serde::{Deserialize, Serialize}; use typed_builder::TypedBuilder; /// Options for document insertion. #[derive(Debug, Serialize, Deserialize, PartialEq, TypedBuilder, Clone)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct InsertOptions { /// Wait until document has been synced to disk. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] wait_for_sync: Option<bool>, /// Additionally return the complete new document under the attribute new in /// the result. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] return_new: Option<bool>, /// Additionally return the complete old document under the attribute old in /// the result. Only available if the overwrite option is used. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] return_old: Option<bool>, /// If set to true, an empty object will be returned as response. /// No meta-data will be returned for the created document. /// This option can be used to save some network traffic. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] silent: Option<bool>, /// If set to true, the insert becomes a replace-insert. /// If a document with the same _key already exists the new document is not /// rejected with unique constraint violated but will replace the old /// document. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] overwrite: Option<bool>, /// TODO add nice formatted documentation from official doc #[cfg(feature = "arango3_7")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] overwrite_mode: Option<OverwriteMode>, /// If the intention is to delete existing attributes with the update-insert /// command, the URL query parameter keepNull can be used with a value /// of false. This will modify the behavior of the patch command to /// remove any attributes from the existing document that are contained /// in the patch document with an attribute value of null. This option /// controls the update-insert behavior only. #[cfg(feature = "arango3_7")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] keep_null: Option<bool>, /// Controls whether objects (not arrays) will be merged if present in both /// the existing and the update-insert document. /// If set to false, the value in the patch document will overwrite the /// existing document’s value. If set to true, objects will be merged. /// The default is true. This option controls the update-insert behavior /// only. #[cfg(feature = "arango3_7")] #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] merge_objects: Option<bool>, } impl Default for InsertOptions { fn default() -> Self { Self::builder().build() } } /// Options for document update, #[derive(Debug, Serialize, Deserialize, PartialEq, TypedBuilder, Clone)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct UpdateOptions { /// If the intention is to delete existing attributes with the patch /// command, the URL query parameter keepNull can be used with a value of /// false. This will modify the behavior of the patch command to remove any /// attributes from the existing document that are contained in the patch /// document with an attribute value of null. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] keep_null: Option<bool>, /// Controls whether objects (not arrays) will be merged if present in both /// the existing and the patch document. If set to false, the value in the /// patch document will overwrite the existing document’s value. If set to /// true, objects will be merged. The default is true. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] merge_objects: Option<bool>, /// Wait until document has been synced to disk. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] wait_for_sync: Option<bool>, /// By default, or if this is set to true, the _rev attributes in the given /// document is ignored. If this is set to false, then the _rev /// attribute given in the body document is taken as a precondition. The /// document is only update if the current revision is the one specified. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] ignore_revs: Option<bool>, /// Additionally return the complete new document under the attribute new in /// the result. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] return_new: Option<bool>, /// Return additionally the complete previous revision of the changed /// document under the attribute old in the result. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] return_old: Option<bool>, /// If set to true, an empty object will be returned as response. /// No meta-data will be returned for the updated document. /// This option can be used to save some network traffic. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] silent: Option<bool>, } impl Default for UpdateOptions { fn default() -> Self { Self::builder().build() } } #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] #[serde(rename_all = "camelCase")] pub enum OverwriteMode { /// If a document with the specified _key value exists already, /// nothing will be done and no write operation will be carried out. /// The insert operation will return success in this case. /// This mode does not support returning the old document version using /// RETURN OLD. When using RETURN NEW, null will be returned in case the /// document already existed. Ignore, /// If a document with the specified _key value exists already, it will be /// overwritten with the specified document value. This mode will also /// be used when no overwrite mode is specified but the overwrite flag is /// set to true. Replace, /// If a document with the specified _key value exists already, it will be /// patched (partially updated) with the specified document value. /// The overwrite mode can be further controlled via the keepNull and /// mergeObjects parameters Update, /// if a document with the specified _key value exists already, return a /// unique constraint violation error so that the insert operation fails. /// This is also the default behavior in case the overwrite mode is not set, /// and the overwrite flag is false or not set either. /// /// keepNull (optional): If the intention is to delete existing attributes /// with the update-insert command, the URL query parameter keepNull can be /// used with a value of false. This will modify the behavior of the patch /// command to remove any attributes from the existing document that are /// contained in the patch document with an attribute value of null. /// This option controls the update-insert behavior only. /// /// mergeObjects (optional): Controls whether objects (not arrays) will be /// merged if present in both the existing and the update-insert document. /// If set to false, the value in the patch document will overwrite the /// existing document’s value. If set to true, objects will be merged. The /// default is true. This option controls the update-insert behavior only. /// TODO need to implement the two extra modes keepNull & mergeObjects Conflict, } /// Options for document replace, #[derive(Debug, Serialize, Deserialize, TypedBuilder, Clone)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct ReplaceOptions { /// Wait until document has been synced to disk. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] wait_for_sync: Option<bool>, /// By default, or if this is set to true, the _rev attributes in the given /// document is ignored. If this is set to false, then the _rev /// attribute given in the body document is taken as a precondition. The /// document is only replaced if the current revision is the one specified. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] ignore_revs: Option<bool>, /// Additionally return the complete new document under the attribute new in /// the result. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] return_new: Option<bool>, /// Additionally return the complete old document under the attribute old in /// the result. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] return_old: Option<bool>, /// If set to true, an empty object will be returned as response. /// No meta-data will be returned for the replaced document. /// This option can be used to save some network traffic. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] silent: Option<bool>, } impl Default for ReplaceOptions { fn default() -> Self { Self::builder().build() } } /// Options for document reading. #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub enum ReadOptions { /// If the “If-None-Match” header is given, then it must contain exactly one /// Etag. The document is returned, if it has a different revision than /// the given Etag. Otherwise an HTTP 304 is returned. IfNoneMatch(String), /// If the “If-Match” header is given, then it must contain exactly one /// Etag. The document is returned, if it has the same revision as the /// given Etag. Otherwise a HTTP 412 is returned. IfMatch(String), NoHeader, } impl Default for ReadOptions { fn default() -> Self { Self::NoHeader } } /// Options for document removes, #[derive(Debug, Serialize, Deserialize, TypedBuilder, Clone)] #[builder(doc)] #[serde(rename_all = "camelCase")] pub struct RemoveOptions { /// Wait until document has been synced to disk. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] wait_for_sync: Option<bool>, /// Additionally return the complete old document under the attribute old in /// the result. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] return_old: Option<bool>, /// If set to true, an empty object will be returned as response. /// No meta-data will be returned for the created document. /// This option can be used to save some network traffic. #[serde(skip_serializing_if = "Option::is_none")] #[builder(default, setter(strip_option))] silent: Option<bool>, } impl Default for RemoveOptions { fn default() -> Self { Self::builder().build() } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/src/document/mod.rs
src/document/mod.rs
//! Document level types //! //! This mod contains document related types. //! Operations are conducted on collection level struct use serde::{ de::{DeserializeOwned, Error as DeError}, Deserialize, Deserializer, Serialize, }; use std::ops::Deref; pub mod options; pub mod response; #[derive(Serialize, Deserialize, Debug)] pub struct Header { #[serde(skip_serializing_if = "String::is_empty")] pub _id: String, #[serde(skip_serializing_if = "String::is_empty")] pub _key: String, #[serde(skip_serializing_if = "String::is_empty")] pub _rev: String, } /// Structure that represents a document within its content and header #[derive(Serialize, Debug)] pub struct Document<T> { #[serde(flatten)] pub header: Header, #[serde(flatten)] pub document: T, } impl<T> Document<T> where T: Serialize + DeserializeOwned, { pub fn new(data: T) -> Self { Document { document: data, header: Header { _id: String::new(), _key: String::new(), _rev: String::new(), }, } } } impl<T> AsRef<T> for Document<T> { fn as_ref(&self) -> &T { &self.document } } impl<T> Deref for Document<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.document } } impl<'de, T> Deserialize<'de> for Document<T> where T: DeserializeOwned, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let mut obj = serde_json::Value::deserialize(deserializer)?; let json = obj .as_object_mut() .ok_or_else(|| DeError::custom("should be a json object"))?; let _id = json .get("_id") .ok_or_else(|| DeError::missing_field("_id"))?; let _key = json .get("_key") .ok_or_else(|| DeError::missing_field("_key"))?; let _rev = json .get("_rev") .ok_or_else(|| DeError::missing_field("_rev"))?; let header: Header = Header { _id: serde_json::from_value(_id.clone()).map_err(DeError::custom)?, _key: serde_json::from_value(_key.clone()).map_err(DeError::custom)?, _rev: serde_json::from_value(_rev.clone()).map_err(DeError::custom)?, }; let document = serde_json::from_value(obj).map_err(DeError::custom)?; Ok(Document { header, document }) } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/user.rs
tests/user.rs
#![allow(unused_imports)] #![allow(unused_parens)] use arangors::client::ClientExt; use log::{info, trace, warn}; use pretty_assertions::assert_eq; use serde_json::Value; use std::collections::HashMap; use crate::common::{get_root_user, root_connection}; use arangors::{ connection::Permission, user::{User, UserAccessLevel}, ArangoError, Connection, }; use common::{ connection, get_arangodb_host, get_normal_password, get_normal_user, test_root_and_normal, test_setup, }; pub mod common; #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_users_non_root() { test_setup(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let users = database.users().await; assert_eq!(users.is_err(), true); // Should return 403 Forbidden } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_users() { test_setup(); let conn = root_connection().await; let database = conn.db("test_db").await.unwrap(); let users = database.users().await; match users { Ok(users) => { assert_eq!(users.len(), 2); } Err(err) => { println!("error: {:?}", err); assert!(false, "Fail to get users: {:?}", err) } } } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_user_crud_operations() { test_setup(); let conn = root_connection().await; let database = conn.db("test_db").await.unwrap(); // Create the test user let create_user_res = database .create_user( User::builder() .username("creation_test_user".into()) .password(Some("test_password_123".into())) .active(true) .extra(None) .build(), ) .await; assert_eq!(create_user_res.is_ok(), true); // test if there are 3 users now let users = database.users().await; match users { Ok(users) => { assert_eq!(users.len(), 3); assert_eq!( users .iter() .any(|user| user.username == "creation_test_user"), true ); } Err(err) => { println!("error: {:?}", err); assert!(false, "Fail to get users: {:?}", err) } } // Update user let mut extra = HashMap::<String, Value>::new(); extra.insert( "test_key".into(), serde_json::from_str("[ \"test_value\" ]").unwrap(), ); let update_res = database .update_user( "creation_test_user".into(), User::builder() .username("creation_test_user".into()) .password(Some("test_password_1234".into())) .active(true) .extra(Some(extra)) .build(), ) .await; assert_eq!(update_res.is_ok(), true); // test if there are 3 users now let users = database.users().await; match users { Ok(users) => { // Still 3 users assert_eq!(users.len(), 3); // but now with updated username assert_eq!( users .iter() .any(|user| user.username == "creation_test_user"), true ); assert_eq!(users.iter().any(|user| user.extra.is_some()), true); } Err(err) => { println!("error: {:?}", err); assert!(false, "Fail to get users: {:?}", err) } } // Cleanup: delete temporary user let delete_res = database.delete_user("creation_test_user".into()).await; assert_eq!(delete_res.is_ok(), true); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_user_databases() { test_setup(); let conn = root_connection().await; let database = conn.db("test_db").await.unwrap(); // simple response let resp = database.user_databases(get_normal_user(), false).await; trace!("resp: {:?}", resp); assert_eq!(resp.is_ok(), true); // full response let resp = database.user_databases(get_root_user(), true).await; trace!("resp: {:?}", resp); assert_eq!(resp.is_ok(), true); // access-level for test_db let resp = database .user_db_access_level(get_root_user(), "test_db".into()) .await; trace!("resp: {:?}", resp); assert_eq!(resp.is_ok(), true); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_user_db_access_put() { test_setup(); let conn = root_connection().await; let database = conn.db("test_db").await.unwrap(); let resp = database .user_db_access_put(get_normal_user(), "test_db".into(), UserAccessLevel::None) .await; trace!("resp: {:?}", resp); assert_eq!(resp.is_ok(), true); let resp = database .user_db_access_put( get_normal_user(), "test_db".into(), UserAccessLevel::ReadWrite, ) .await; trace!("resp: {:?}", resp); assert_eq!(resp.is_ok(), true); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_user_db_collection_access_get() { test_setup(); let conn = root_connection().await; let database = conn.db("test_db").await.unwrap(); let resp = database .user_db_collection_access( get_normal_user(), "test_db".into(), "test_collection".into(), ) .await; trace!("resp: {:?}", resp); assert_eq!(resp.is_ok(), true); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_user_db_collection_access_put() { test_setup(); let conn = root_connection().await; let database = conn.db("test_db").await.unwrap(); let resp = database .user_db_collection_access_put( get_normal_user(), "test_db".into(), "test_collection".into(), UserAccessLevel::ReadOnly, ) .await; trace!("resp: {:?}", resp); assert_eq!(resp.is_ok(), true); let resp = database .user_db_collection_access_put( get_normal_user(), "test_db".into(), "test_collection".into(), UserAccessLevel::ReadWrite, ) .await; trace!("resp: {:?}", resp); assert_eq!(resp.is_ok(), true); }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/index.rs
tests/index.rs
#![allow(unused_imports)] #![allow(unused_parens)] use log::trace; use pretty_assertions::assert_eq; use serde_json::{json, Value}; use crate::common::{collection, connection}; use arangors::{ collection::{ options::{ChecksumOptions, PropertiesOptions}, response::Status, CollectionType, }, index::{Index, IndexSettings}, ClientError, Connection, Document, }; use common::{get_arangodb_host, get_normal_password, get_normal_user, test_setup}; pub mod common; #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_persistent_index() { test_setup(); let collection_name = "test_collection"; let index_name = "idx_persistent_test"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let index = Index::builder() .name(index_name) .fields(vec!["password".to_string()]) .settings(IndexSettings::Persistent { unique: true, sparse: false, deduplicate: false, }) .build(); let index = database .create_index(collection_name, &index) .await .unwrap(); let delete_result = database.delete_index(&index.id).await.unwrap(); assert!(index.id.len() > 0); assert_eq!(index.name, index_name.to_string()); assert_eq!(delete_result.id, index.id); if let IndexSettings::Persistent { unique, sparse, deduplicate, } = index.settings { assert_eq!(unique, true); assert_eq!(sparse, false); assert_eq!(deduplicate, false); } } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_hash_index() { test_setup(); let collection_name = "test_collection"; let index_name = "idx_hash_test"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let index = Index::builder() .name(index_name) .fields(vec!["password".to_string()]) .settings(IndexSettings::Hash { unique: false, sparse: true, deduplicate: true, }) .build(); let index = database .create_index(collection_name, &index) .await .unwrap(); let delete_result = database.delete_index(&index.id).await.unwrap(); assert!(index.id.len() > 0); assert_eq!(index.name, index_name.to_string()); assert_eq!(delete_result.id, index.id); if let IndexSettings::Hash { unique, sparse, deduplicate, } = index.settings { assert_eq!(unique, false); assert_eq!(sparse, true); assert_eq!(deduplicate, true); } } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_skiplist_index() { test_setup(); let collection_name = "test_collection"; let index_name = "idx_skiplist_test"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let index = Index::builder() .name(index_name) .fields(vec!["password".to_string()]) .settings(IndexSettings::Skiplist { unique: true, sparse: false, deduplicate: false, }) .build(); let index = database .create_index(collection_name, &index) .await .unwrap(); let delete_result = database.delete_index(&index.id).await.unwrap(); assert!(index.id.len() > 0); assert_eq!(index.name, index_name.to_string()); assert_eq!(delete_result.id, index.id); if let IndexSettings::Skiplist { unique, sparse, deduplicate, } = index.settings { assert_eq!(unique, true); assert_eq!(sparse, false); assert_eq!(deduplicate, false); } } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_geo_index() { test_setup(); let collection_name = "test_collection"; let index_name = "idx_geo_test"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let index = Index::builder() .name(index_name) .fields(vec!["password".to_string()]) .settings(IndexSettings::Geo { geo_json: false }) .build(); let index = database .create_index(collection_name, &index) .await .unwrap(); let delete_result = database.delete_index(&index.id).await.unwrap(); assert!(index.id.len() > 0); assert_eq!(index.name, index_name.to_string()); assert_eq!(delete_result.id, index.id); if let IndexSettings::Geo { geo_json } = index.settings { assert_eq!(geo_json, false); } } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_ttl_index() { test_setup(); let collection_name = "test_collection"; let index_name = "idx_ttl_test"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let index = Index::builder() .name(index_name) .fields(vec!["password".to_string()]) .settings(IndexSettings::Ttl { expire_after: 500 }) .build(); let index = database .create_index(collection_name, &index) .await .unwrap(); let delete_result = database.delete_index(&index.id).await.unwrap(); assert!(index.id.len() > 0); assert_eq!(index.name, index_name.to_string()); assert_eq!(delete_result.id, index.id); if let IndexSettings::Ttl { expire_after } = index.settings { assert_eq!(expire_after, 500); } } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_fulltext_index() { test_setup(); let collection_name = "test_collection"; let index_name = "idx_full_test"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let index = Index::builder() .name(index_name) .fields(vec!["password".to_string()]) .settings(IndexSettings::Fulltext { min_length: 100 }) .build(); let index = database .create_index(collection_name, &index) .await .unwrap(); let delete_result = database.delete_index(&index.id).await.unwrap(); assert!(index.id.len() > 0); assert_eq!(index.name, index_name.to_string()); assert_eq!(delete_result.id, index.id); if let IndexSettings::Fulltext { min_length } = index.settings { assert_eq!(min_length, 100); } } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_list_indexes() { test_setup(); let collection_name = "test_collection"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let list = database.indexes(collection_name).await.unwrap(); assert!(list.indexes.len() > 0); }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/analyzer.rs
tests/analyzer.rs
#![allow(unused_imports)] #![allow(unused_parens)] use crate::common::{collection, connection}; use log::{info, trace}; use maybe_async::maybe_async; use pretty_assertions::assert_eq; use std::collections::HashMap; use arangors::{ analyzer::{ AnalyzerCase, AnalyzerFeature, AnalyzerInfo, GeoJsonAnalyzerProperties, GeoJsonType, NgramAnalyzerProperties, NgramStreamType, NormAnalyzerProperties, PipelineAnalyzerProperties, PipelineAnalyzers, }, client::ClientExt, collection::{ options::{ChecksumOptions, PropertiesOptions}, response::Status, CollectionType, }, view::View, ClientError, Connection, Database, Document, }; use common::{get_arangodb_host, get_normal_password, get_normal_user, test_setup}; pub mod common; #[maybe_async] async fn create_norm_analyzer<C: ClientExt>( database: &Database<C>, analyzer_name: String, ) -> Result<AnalyzerInfo, ClientError> { let info = AnalyzerInfo::Norm { name: analyzer_name, features: Some(vec![AnalyzerFeature::Frequency, AnalyzerFeature::Norm]), properties: Some( NormAnalyzerProperties::builder() .locale("en.utf-8".to_string()) .case(AnalyzerCase::Lower) .build(), ), }; database.create_analyzer(info).await } #[maybe_async] async fn create_ngram_analyzer<C: ClientExt>( database: &Database<C>, analyzer_name: String, ) -> Result<AnalyzerInfo, ClientError> { let info = AnalyzerInfo::Ngram { name: analyzer_name, features: Some(vec![AnalyzerFeature::Frequency, AnalyzerFeature::Norm]), properties: Some( NgramAnalyzerProperties::builder() .min(2) .max(2) .preserve_original(false) .stream_type(NgramStreamType::Utf8) .build(), ), }; database.create_analyzer(info).await } #[maybe_async] async fn create_geo_analyzer<C: ClientExt>( database: &Database<C>, analyzer_name: String, ) -> Result<AnalyzerInfo, ClientError> { let info = AnalyzerInfo::Geojson { name: analyzer_name, features: Some(vec![AnalyzerFeature::Frequency, AnalyzerFeature::Norm]), properties: Some( GeoJsonAnalyzerProperties::builder() .r#type(GeoJsonType::Centroid) .build(), ), }; database.create_analyzer(info).await } #[maybe_async] async fn create_pipeline_analyzer<C: ClientExt>( database: &Database<C>, analyzer_name: String, ) -> Result<AnalyzerInfo, ClientError> { let norm_analyzer = PipelineAnalyzers::Norm { features: Some(vec![AnalyzerFeature::Frequency, AnalyzerFeature::Norm]), properties: Some( NormAnalyzerProperties::builder() .locale("en.utf-8".to_string()) .case(AnalyzerCase::Lower) .build(), ), }; let ngram_analyzer = PipelineAnalyzers::Ngram { features: Some(vec![AnalyzerFeature::Frequency, AnalyzerFeature::Norm]), properties: Some( NgramAnalyzerProperties::builder() .min(2) .max(2) .preserve_original(false) .stream_type(NgramStreamType::Utf8) .build(), ), }; let pipe = AnalyzerInfo::Pipeline { name: analyzer_name, properties: PipelineAnalyzerProperties::builder() .pipeline(vec![norm_analyzer, ngram_analyzer]) .build(), }; database.create_analyzer(pipe).await } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_create_and_drop_norm_analyzer() { test_setup(); let analyzer_name = "test_analyzer_norm_create".to_string(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let analyzer = create_norm_analyzer(&database, analyzer_name.clone()).await; trace!("{:?}", analyzer); assert_eq!(analyzer.is_err(), false); let result = database.drop_analyzer(&analyzer_name).await; assert_eq!(result.is_err(), false); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_create_and_drop_ngram_analyzer() { test_setup(); let analyzer_name = "test_analyzer_ngram_create".to_string(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let analyzer = create_ngram_analyzer(&database, analyzer_name.clone()).await; trace!("{:?}", analyzer); assert_eq!(analyzer.is_err(), false); let result = database.drop_analyzer(&analyzer_name).await; assert_eq!(result.is_err(), false); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_create_and_drop_geo_analyzer() { test_setup(); let analyzer_name = "test_analyzer_geo_create".to_string(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let analyzer = create_geo_analyzer(&database, analyzer_name.clone()).await; trace!("{:?}", analyzer); assert_eq!(analyzer.is_err(), false); let result = database.drop_analyzer(&analyzer_name).await; assert_eq!(result.is_err(), false); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_create_and_drop_pipeline_analyzer() { test_setup(); let analyzer_name = "test_analyzer_pipeline_create".to_string(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let analyzer = create_pipeline_analyzer(&database, analyzer_name.clone()).await; trace!("{:?}", analyzer); assert_eq!(analyzer.is_err(), false); let result = database.drop_analyzer(&analyzer_name).await; assert_eq!(result.is_err(), false); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_list_analyzer() { test_setup(); let analyzer_name = "test_analyzer_list".to_string(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let analyzer = create_norm_analyzer(&database, analyzer_name.clone()).await; trace!("{:?}", analyzer); assert_eq!(analyzer.is_err(), false); let analyzer = analyzer.unwrap(); let analyzers = database.list_analyzers().await; let views_analyzers = analyzers.unwrap(); let analyzer_found = views_analyzers.iter().find(|a| **a == analyzer); assert_eq!(analyzer_found.is_some(), true); let result = database.drop_analyzer(&analyzer_name).await; assert_eq!(result.is_err(), false); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_create_and_exists() { test_setup(); let analyzer_name = "test_analyzer_exists".to_string(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let analyzer = create_norm_analyzer(&database, analyzer_name.clone()).await; trace!("{:?}", analyzer); assert_eq!(analyzer.is_err(), false); let queried_analyzer = database.analyzer(&analyzer_name).await; assert_eq!(queried_analyzer.is_err(), false); assert_eq!(analyzer.unwrap(), queried_analyzer.unwrap()); let result = database.drop_analyzer(&analyzer_name).await; assert_eq!(result.is_err(), false); }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/view.rs
tests/view.rs
#![allow(unused_imports)] #![allow(unused_parens)] use std::collections::HashMap; use crate::common::{collection, connection}; use log::{info, trace}; use maybe_async::maybe_async; use pretty_assertions::assert_eq; use arangors::{ client::ClientExt, collection::{ options::{ChecksumOptions, PropertiesOptions}, response::Status, CollectionType, }, view::{ArangoSearchViewLink, ArangoSearchViewPropertiesOptions, View, ViewOptions}, ClientError, Connection, Database, Document, }; use common::{get_arangodb_host, get_normal_password, get_normal_user, test_setup}; pub mod common; #[maybe_async] async fn create_view<C: ClientExt>( database: &Database<C>, view_name: String, collection_name: String, ) -> Result<View, ClientError> { let mut links: HashMap<String, ArangoSearchViewLink> = HashMap::new(); links.insert( collection_name, ArangoSearchViewLink::builder() .include_all_fields(true) .build(), ); database .create_view( ViewOptions::builder() .name(view_name) .properties( ArangoSearchViewPropertiesOptions::builder() .links(links) .build(), ) .build(), ) .await } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_create_and_drop_view() { test_setup(); let collection_name = "test_collection".to_string(); let view_name = format!("{}_view_create", collection_name); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let view = create_view(&database, view_name, collection_name.clone()).await; trace!("{:?}", view); assert_eq!(view.is_err(), false); let result = database .drop_view(&format!("{}_view_create", collection_name)) .await; assert_eq!(result.is_err(), false); } // #[maybe_async::test( // any(feature = "reqwest_blocking"), // async(any(feature = "reqwest_async"), tokio::test), // async(any(feature = "surf_async"), async_std::test) // )] // async fn test_list_view() { // test_setup(); // let collection_name = "test_collection".to_string(); // let view_name = format!("{}_view_list", collection_name); // let conn = connection().await; // let database = conn.db("test_db").await.unwrap(); // let view = create_view(&database, view_name.clone(), // collection_name.clone()).await; // trace!("{:?}", view); // assert_eq!(view.is_err(), false); // let views = database.list_views().await; // trace!("{:?}", views); // assert_eq!(views.is_err(), false); // let views_list = views.unwrap(); // let view_found = views_list.iter().find(|vd| vd.name == // view_name.clone()); // assert_eq!(view_found.is_some(), true); // let result = database // .drop_view(&format!("{}_view_list", collection_name)) // .await; // assert_eq!(result.is_err(), false); // } // #[maybe_async::test( // any(feature = "reqwest_blocking"), // async(any(feature = "reqwest_async"), tokio::test), // async(any(feature = "surf_async"), async_std::test) // )] // async fn update_properties() { // test_setup(); // let collection_name = "test_collection".to_string(); // let view_name = format!("{}_view_update", collection_name); // let conn = connection().await; // let database = conn.db("test_db").await.unwrap(); // let view = create_view(&database, view_name.clone(), // collection_name.clone()).await; // trace!("{:?}", view); // assert_eq!(view.is_err(), false); // let updated_view = database // .update_view_properties( // &view_name, // ArangoSearchViewPropertiesOptions::builder() // .cleanup_interval_step(3) // .build(), // ) // .await; // trace!("{:?}", updated_view); // assert_eq!(updated_view.is_err(), false); // let result = database // .drop_view(&format!("{}_view_update", collection_name)) // .await; // assert_eq!(result.is_err(), false); // }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/document.rs
tests/document.rs
#![allow(unused_imports)] #![allow(unused_parens)] use log::trace; use pretty_assertions::assert_eq; use serde_json::{json, Value}; use arangors::{ document::{ options::{ InsertOptions, OverwriteMode, ReadOptions, RemoveOptions, ReplaceOptions, UpdateOptions, }, response::DocumentResponse, }, ClientError, Connection, Document, }; use common::{ collection, connection, get_arangodb_host, get_normal_password, get_normal_user, test_setup, }; use std::{convert::TryInto, ptr::null}; pub mod common; #[cfg(not(feature = "arango3_7"))] #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_post_create_document() { test_setup(); let collection_name = "test_collection_create_document"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let test_doc: Document<Value> = Document::new(json!({ "no":1 , "testDescription":"Trying to make unit test for createDocument but there are many cases to handle" })); // First test is to create a simple document without options let create = coll.create_document(test_doc, Default::default()).await; assert_eq!(create.is_ok(), true, "succeed create a document"); let result = create.unwrap(); assert_eq!(result.is_silent(), false); assert_eq!(result.has_response(), true); let header = result.header().unwrap(); assert_eq!( header._id.is_empty(), false, "We should get the id of the document" ); assert_eq!( header._rev.is_empty(), false, "We should get the revision of the document" ); assert_eq!( header._key.is_empty(), false, "We should get the key of the document" ); // Second test is to create a simple document with option to get the new // document back let test_doc: Document<Value> = Document::new(json!({ "no":2 , "testDescription":"Test with new" })); let create = coll .create_document(test_doc, InsertOptions::builder().return_new(true).build()) .await; assert_eq!(create.is_ok(), true, "succeed create a document"); let result = create.unwrap(); assert_eq!( result.new_doc().is_some(), true, "We should get the new document under the 'new' property" ); let doc = result.new_doc().unwrap(); assert_eq!(doc.document["testDescription"], "Test with new"); let header = result.header().unwrap(); assert_eq!(header._id.is_empty(), false); assert_eq!(header._rev.is_empty(), false); assert_eq!(header._key.is_empty(), false); let key = &header._key; // Third test is to update a simple document with option return old // Should not return anything according to doc if overWriteMode is not used for // now TODO update this test with overwriteMode later let test_doc: Document<Value> = Document::new(json!({ "no":2 , "_key" : key, "testDescription":"Test with old" })); let update = coll .create_document( test_doc, InsertOptions::builder() .return_old(true) .overwrite(true) .build(), ) .await; assert_eq!(update.is_ok(), true, "succeed update a document"); let result = update.unwrap(); assert_eq!(result.old_doc().is_some(), true); let old_doc = result.old_doc().unwrap(); assert_eq!( old_doc.document["testDescription"], "Test with new", "We should get the old document under the 'old' property" ); let header = result.header().unwrap(); assert_eq!(header._id.is_empty(), false,); assert_eq!(header._rev.is_empty(), false,); assert_eq!(header._key.is_empty(), false,); // Fourth testis about the silent option let test_doc: Document<Value> = Document::new(json!({ "no":2 , "testDescription":"Test with silent" })); let create = coll .create_document(test_doc, InsertOptions::builder().silent(true).build()) .await; assert_eq!(create.is_ok(), true, "succeed create a document silently"); let result = create.unwrap(); assert_eq!(result.is_silent(), true); coll.drop().await.expect("Should drop the collection"); } /// TODO need to use CI to validate this test #[cfg(any(feature = "arango3_7"))] #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_post_create_document_3_7() { test_setup(); let collection_name = "test_collection_create_document_3_7"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let test_doc: Document<Value> = Document::new(json!({ "no":1 , "testDescription":"Trying to make unit test for createDocument but there are many cases to handle" })); // First test is to create a simple document without options let create = coll.create_document(test_doc, Default::default()).await; assert_eq!(create.is_ok(), true, "succeed create a document"); let result = create.unwrap(); let header = result.header().unwrap(); assert_eq!( header._id.is_empty(), false, "We should get the id of the document" ); assert_eq!( header._rev.is_empty(), false, "We should get the revision of the document" ); assert_eq!( header._key.is_empty(), false, "We should get the key of the document" ); // Second test is to create a simple document with option to get the new // document back let test_doc: Document<Value> = Document::new(json!({ "no":2 , "testDescription":"Test with new" })); let create = coll .create_document(test_doc, InsertOptions::builder().return_new(true).build()) .await; assert_eq!(create.is_ok(), true, "succeed create a document"); let result = create.unwrap(); assert_eq!( result.return_new().is_some(), true, "we should get the new document under 'new' property" ); let doc = result.new_doc().unwrap(); assert_eq!(doc.document["testDescription"], "Test with new"); let header = result.header().unwrap(); assert_eq!(header._id.is_empty(), false); assert_eq!(header._rev.is_empty(), false); assert_eq!(header._key.is_empty(), false); let key = header._key; // Third test is to update a simple document with option return old // Should not return anything according to doc if overWriteMode is not used for // now TODO update this test with overwriteMode later let test_doc: Document<Value> = Document::new(json!({ "no":2 , "_key" : key, "testDescription":"Test with old" })); let update = coll .create_document( test_doc, InsertOptions::builder() .return_old(true) .overwrite(true) .build(), ) .await; let result = update.unwrap(); assert_eq!(result.old_doc().is_some(), true); let old_doc = result.old_doc().unwrap(); assert_eq!( old_doc.document["testDescription"], "Test with new", "We should get the old document under the 'old' property" ); let header = result.header().unwrap(); assert_eq!(header._id.is_empty(), false); assert_eq!(header._rev.is_empty(), false); assert_eq!(header._key.is_empty(), false); // Fourth testis about the silent option let test_doc: Document<Value> = Document::new(json!({ "no":2 , "testDescription":"Test with silent" })); let create = coll .create_document(test_doc, InsertOptions::builder().silent(true).build()) .await; let result = create.unwrap(); assert_eq!( result.is_silent(), true, "silent mode should not return old document" ); // Fifth test is about the overwrite _mode option ignore let test_doc: Document<Value> = Document::new(json!({ "no":2 , "_key" : key, "testDescription":"Test with overwrite mode" })); let update = coll .create_document( test_doc, InsertOptions::builder() .return_new(true) .overwrite_mode(OverwriteMode::Ignore), ) .await; let result = update.unwrap(); assert_eq!(result.new_doc().is_none(), true); assert_eq!(result.old_doc().is_none(), true); assert_eq!(result.header().is_none(), true); // Sixth test is about the overwrite _mode option replace let test_doc: Document<Value> = Document::new(json!({ "no":3 , "_key" : key, "testDescription":"Test with overwrite mode" })); let update = coll .create_document( test_doc, InsertOptions::builder().overwrite_mode(OverwriteMode::Replace), ) .await; let result = update.unwrap(); assert_eq!(result.old_doc().is_none(), true); assert_eq!( result.new_doc().is_none(), false, "we should get the new document when we use the overwriteMode = 'replace'" ); let doc = result.new_doc().unwrap(); assert_eq!(doc.document["no"], 3); assert_eq!(result.header().is_none(), false); // Seventh test is about the overwrite _mode option update let test_doc: Document<Value> = Document::new(json!({ "no":4 , "_key" : key, })); let update = coll .create_document( test_doc, InsertOptions::builder().overwrite_mode(OverwriteMode::Update), ) .await; let result = update.unwrap(); assert_eq!(result.old_doc().is_none(), true); assert_eq!(result.new_doc().is_none(), false); let doc = result.new_doc().unwrap(); assert_eq!(doc.document["no"], 4); assert_eq!(result.header().is_none(), false); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_read_document() { test_setup(); let collection_name = "test_collection_read_document"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let test_doc: Document<Value> = Document::new(json!({ "no":1 , "testDescription":"read a document" })); // First test is to read a simple document without options let create = coll.create_document(test_doc, Default::default()).await; assert_eq!(create.is_ok(), true, "succeed create a document"); let result = create.unwrap(); let header = result.header().unwrap(); let _key = &header._key; let _rev = &header._rev; let read = coll.document(_key.as_str()).await; let result: Document<Value> = read.unwrap(); assert_eq!(result.document["no"], 1); assert_eq!(result.document["testDescription"], "read a document"); // Test if we get the right doc when it does match let read: Result<Document<Value>, ClientError> = coll .document_with_options(_key.as_str(), ReadOptions::IfMatch(_rev.clone())) .await; assert_eq!(read.is_err(), false, "got the right document"); // Test if we get the 412 code response when there is no match let read: Result<Document<Value>, ClientError> = coll .document_with_options(_key.as_str(), ReadOptions::IfMatch("_dsdsds_d".to_string())) .await; // We should get a 412, for now for some reason the error is parsed as a // document todo fix how the reponse/error is built assert_eq!( read.is_err(), true, "we should get 412, got: {:?}", read.unwrap().document ); // todo need to test with with IfNoneMatch and 304 coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_read_document_header() { test_setup(); let collection_name = "test_collection_read_document_header"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let test_doc: Document<Value> = Document::new(json!({ "no":1 , "testDescription":"read a document" })); // First test is to read a simple document without options let create = coll.create_document(test_doc, Default::default()).await; assert_eq!(create.is_ok(), true, "succeed create a document"); let result = create.unwrap(); let header = result.header().unwrap(); let _key = &header._key; let _rev = &header._rev; let read = coll.document_header(_key.as_str()).await; assert_eq!( read.is_ok(), true, "We should get 200, got {:?}", read.err().unwrap() ); let result = read.unwrap(); assert_eq!( result._key, _key.to_string(), "We should got the key of the document : {:?}", result._key ); let read = coll .document_header_with_options(_key.as_str(), ReadOptions::IfMatch(_rev.clone())) .await; assert_eq!(read.is_ok(), true, "We should have the right header"); let result = read.unwrap(); assert_eq!( result._key, _key.to_string(), "We should have the right key, instead got {:?}", result._key ); let read = coll .document_header_with_options(_key.as_str(), ReadOptions::IfMatch("_dsdsds".to_string())) .await; assert_eq!( read.is_err(), true, "We should have an error and the right doc returned" ); let read = coll .document_header_with_options(_key.as_str(), ReadOptions::IfNoneMatch(_rev.clone())) .await; assert_eq!( read.is_err(), true, "the If-None-Match header is given and the document has the same version" ); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_patch_update_document() { test_setup(); let collection_name = "test_collection_update_document"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let test_doc: Document<Value> = Document::new(json!({ "no":1 , "testDescription":"update document" })); // First test is to update a simple document without options let create = coll.create_document(test_doc, Default::default()).await; assert_eq!(create.is_ok(), true, "succeed create a document"); let result = create.unwrap(); let header = result.header().unwrap(); let _key = &header._key; let update = coll .update_document( _key.as_str(), json!({ "no":2}), UpdateOptions::builder() .return_new(true) .return_old(true) .build(), ) .await; let result = update.unwrap(); let new_doc = result.new_doc().unwrap(); let old_doc = result.old_doc().unwrap(); assert_eq!(new_doc["no"], 2); assert_eq!(new_doc["testDescription"], "update document"); assert_eq!(old_doc["no"], 1); assert_eq!(old_doc["testDescription"], "update document"); let header = result.header().unwrap(); let _rev = &header._rev; let update = coll .update_document(_key.as_str(), json!({ "no":3}), Default::default()) .await; let result = update.unwrap(); assert_eq!( result.header().unwrap()._rev != _rev.to_string(), true, "We should get a different revision after update" ); // Test when we do not ignore_revs. W let replace = coll .update_document( _key.as_str(), json!({ "no":2 , "_rev" :"_dsds_dsds_dsds_" }), UpdateOptions::builder().ignore_revs(false).build(), ) .await; assert_eq!( replace.is_err(), true, "We should have precondition failed as we ask to replace the doc only if for the \ specified _rev in body" ); coll.drop().await.expect("Should drop the collection"); // todo do more test for merge objects and stuff } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_post_replace_document() { test_setup(); let collection_name = "test_collection_replace_document"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let test_doc: Document<Value> = Document::new(json!({ "no":1 , "testDescription":"update document" })); // First test is to replace simple document with new & old options let create = coll.create_document(test_doc, Default::default()).await; assert_eq!(create.is_ok(), true, "succeed create a document"); let result = create.unwrap(); let header = result.header().unwrap(); let _key = &header._key; let _rev = &header._rev; let replace = coll .replace_document( _key.as_str(), json!({ "no":2}), ReplaceOptions::builder() .return_new(true) .return_old(true) .build(), None, ) .await; let result = replace.unwrap(); let new_doc = result.new_doc().unwrap(); assert_eq!(new_doc["no"], 2, "We should get the property updated"); assert_eq!( new_doc["testDescription"].as_str().is_some(), false, "We should get the property removed since we did replace the original object with an \ object that do not have it" ); let old_doc = result.old_doc().unwrap(); assert_eq!( old_doc["no"], 1, "We should get the old property no with its old value" ); assert_eq!( old_doc["testDescription"], "update document", "We should get the old property testDescription with its old value" ); // Second test to try out the silence mode let replace = coll .replace_document( _key.as_str(), json!({ "no":2}), ReplaceOptions::builder().silent(true).build(), None, ) .await; let result = replace.unwrap(); assert_eq!(result.is_silent(), true, "We should not get any response"); // third test tro try out the if-match header let replace = coll .replace_document( _key.as_str(), json!({ "no":2}), Default::default(), Some(_rev.clone()), ) .await; assert_eq!( replace.is_err(), true, "We should have precondition failed as we ask to replace the doc only if match the \ specified _rev in header" ); let replace = coll .replace_document( _key.as_str(), json!({ "no":2 , "_rev" :_rev.clone() }), ReplaceOptions::builder().ignore_revs(false).build(), None, ) .await; assert_eq!( replace.is_err(), true, "We should have precondition failed as we ask to replace the doc only if match the \ specified _rev in body" ); coll.drop().await.expect("Should drop the collection"); // todo do more test } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_delete_remove_document() { test_setup(); let collection_name = "test_collection_remove_document"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let test_doc: Document<Value> = Document::new(json!({ "no":1 , "testDescription":"update document" })); // First test is to remove a simple document with old options let create: Result<DocumentResponse<Document<Value>>, ClientError> = coll.create_document(test_doc, Default::default()).await; assert_eq!(create.is_ok(), true, "succeed create a document"); let result = create.unwrap(); let header = result.header().unwrap(); let _key = &header._key; let _rev = &header._rev; let remove: Result<DocumentResponse<Value>, ClientError> = coll .remove_document( _key.as_str(), RemoveOptions::builder().return_old(true).build(), None, ) .await; let result = remove.unwrap(); assert_eq!( result.new_doc().is_none(), true, "we should never have new doc returned when using remove document" ); let old_doc = result.old_doc().unwrap(); assert_eq!( old_doc["no"], 1, "We should get the old property no with its old value" ); assert_eq!( old_doc["testDescription"], "update document", "We should get the old property testDescription with its old value" ); // Second test to try out the silence mode let test_doc: Document<Value> = Document::new(json!({ "no":1 , "testDescription":"update document" })); let create = coll.create_document(test_doc, Default::default()).await; let result = create.unwrap(); let header = result.header().unwrap(); let _key = &header._key; let _rev = &header._rev; let remove: Result<DocumentResponse<Value>, ClientError> = coll .remove_document( _key.as_str(), RemoveOptions::builder().silent(true).build(), None, ) .await; let result = remove.unwrap(); assert_eq!(result.is_silent(), true, "We should not get any response"); // third test to try out the If-Match header let test_doc: Document<Value> = Document::new(json!({ "no":1 , "testDescription":"update document" })); let create = coll.create_document(test_doc, Default::default()).await; let result = create.unwrap(); let header = result.header().unwrap(); let _key = &header._key; let _rev = &header._rev; let remove: Result<DocumentResponse<Value>, ClientError> = coll .remove_document( _key.as_str(), Default::default(), Some("_rere_dsds_DSds".to_string()), ) .await; assert_eq!( remove.is_err(), true, "We should have precondition failed as we ask to move the doc only if for the specified \ _rev in header" ); // Fourth test to check that we get error if we tried to remove a doc that has // already been removed or that does not exist let remove: Result<DocumentResponse<Value>, ClientError> = coll .remove_document(_key.as_str(), Default::default(), None) .await; assert_eq!(remove.is_err(), false, "We should remove the doc"); let remove: Result<DocumentResponse<Value>, ClientError> = coll .remove_document(_key.as_str(), Default::default(), None) .await; assert_eq!( remove.is_err(), true, "We should get 404 because we just have removed the doc before" ); coll.drop().await.expect("Should drop the collection"); // todo do more test } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_document_deserialization() { use serde::{Deserialize, Serialize}; #[derive(Debug, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct ItemWithHeader { #[serde(rename = "_id")] id: String, #[serde(rename = "_key")] key: String, #[serde(rename = "_rev")] rev: String, no: usize, } #[derive(Debug, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct Item { no: usize, } test_setup(); let collection_name = "test_document_deserialization"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let test_doc: Document<Value> = Document::new(json!({ "no":1 })); // First test is to read a simple document without options let create = coll.create_document(test_doc, Default::default()).await; assert_eq!(create.is_ok(), true, "succeed creating a document"); let result = create.unwrap(); let header = result.header().unwrap(); let _key = &header._key; let _rev = &header._rev; let read = coll.document(_key.as_str()).await; let result: Document<Item> = read.unwrap(); assert_eq!(result.document.no, 1); assert_eq!(result.header._key, header._key); assert_eq!(result.header._rev, header._rev); assert_eq!(result.header._id, header._id); let read = coll.document(_key.as_str()).await; let result: Document<ItemWithHeader> = read.unwrap(); assert_eq!(result.document.no, 1); assert_eq!(result.header._key, header._key); assert_eq!(result.header._rev, header._rev); assert_eq!(result.header._id, header._id); assert_eq!(result.key, header._key); assert_eq!(result.rev, header._rev); assert_eq!(result.id, header._id); }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/connection.rs
tests/connection.rs
#![allow(unused_imports)] #![allow(unused_parens)] use arangors::client::ClientExt; use pretty_assertions::assert_eq; use arangors::{connection::Permission, Connection}; use common::{ connection, get_arangodb_host, get_normal_password, get_normal_user, test_root_and_normal, test_setup, }; pub mod common; #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_list_databases() { test_setup(); let conn = connection().await; let dbs = conn.accessible_databases().await.unwrap(); assert_eq!(dbs.contains_key("test_db"), true); let db_permission = dbs.get("test_db").unwrap(); match db_permission { Permission::ReadOnly | Permission::NoAccess => { assert!(false, "Invalid permission {:?}", db_permission) } _ => {} }; } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_url() { test_setup(); let host = get_arangodb_host(); let user = get_normal_user(); let password = get_normal_password(); let conn = Connection::establish_jwt(&host, &user, &password) .await .unwrap(); let url: String = conn.url().clone().into(); assert_eq!(url, host) } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_database() { test_setup(); let conn = connection().await; let database = conn.db("test_db").await; assert_eq!(database.is_err(), false); let database = conn.db("test_db_non_exist").await; assert_eq!(database.is_err(), true); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_basic_auth() { test_setup(); let host = get_arangodb_host(); let user = get_normal_user(); let password = get_normal_password(); let conn = Connection::establish_jwt(&host, &user, &password) .await .unwrap(); let session = conn.session(); let resp = session.get(host.parse().unwrap(), "").await.unwrap(); let headers = resp.headers(); assert_eq!(headers.get("Server").unwrap(), "ArangoDB"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_jwt() { test_setup(); #[maybe_async::maybe_async] async fn jwt(user: String, passwd: String) { let host = get_arangodb_host(); let conn = Connection::establish_jwt(&host, &user, &passwd) .await .unwrap(); let session = conn.session(); let resp = session.get(host.parse().unwrap(), "").await.unwrap(); let headers = resp.headers(); assert_eq!(headers.get("Server").unwrap(), "ArangoDB"); } test_root_and_normal(jwt).await; }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/database.rs
tests/database.rs
#![allow(unused_imports)] #![allow(unused_parens)] use log::trace; use pretty_assertions::assert_eq; use arangors::Connection; use common::{ connection, get_arangodb_host, get_normal_password, get_normal_user, get_root_password, get_root_user, test_root_and_normal, test_setup, }; pub mod common; const NEW_DB_NAME: &str = "example"; #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_create_and_drop_database() { test_setup(); let host = get_arangodb_host(); let root_user = get_root_user(); let root_password = get_root_password(); let conn = Connection::establish_jwt(&host, &root_user, &root_password) .await .unwrap(); let result = conn.create_database(NEW_DB_NAME).await; if let Err(e) = result { assert!(false, "Fail to create database: {:?}", e) }; let result = conn.db(NEW_DB_NAME).await; assert_eq!(result.is_err(), false); let result = conn.drop_database(NEW_DB_NAME).await; if let Err(e) = result { assert!(false, "Fail to drop database: {:?}", e) }; let result = conn.db(NEW_DB_NAME).await; assert_eq!(result.is_err(), true); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_fetch_current_database_info() { test_setup(); #[maybe_async::maybe_async] async fn fetch_current_database(user: String, passwd: String) { let host = get_arangodb_host(); let conn = Connection::establish_jwt(&host, &user, &passwd) .await .unwrap(); let db = conn.db("test_db").await.unwrap(); let info = db.info().await; match info { Ok(info) => { trace!("{:?}", info); assert_eq!(info.is_system, false) } Err(e) => assert!(false, "Fail to fetch database: {:?}", e), } } test_root_and_normal(fetch_current_database).await; } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_version() { test_setup(); let conn = connection().await; let db = conn.db("test_db").await.unwrap(); let version = db.arango_version().await.unwrap(); trace!("{:?}", version); assert_eq!(version.license, "community"); assert_eq!(version.server, "arango"); let re = regex::Regex::new(r"3\.\d+\.\d+").unwrap(); assert_eq!( re.is_match(&version.version), true, "version: {}", version.version ); }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/aql.rs
tests/aql.rs
#![allow(unused_imports)] #![allow(unused_parens)] use pretty_assertions::assert_eq; use serde::{Deserialize, Serialize}; use arangors::{AqlQuery, Connection, Document}; use common::{connection, test_setup}; use crate::common::{get_arangodb_host, get_root_password, get_root_user}; pub mod common; #[derive(Serialize, Deserialize, Debug)] struct User { pub username: String, pub password: String, } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_aql_str() { test_setup(); let conn = connection().await; let db = conn.db("test_db").await.unwrap(); let result: Vec<Document<User>> = db .aql_str(r#"FOR i in test_collection FILTER i.username=="test2" return i"#) .await .unwrap(); assert_eq!(result.len(), 1); assert_eq!(result[0].document.password, "test2_pwd"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_aql() { test_setup(); let conn = connection().await; let db = conn.db("test_db").await.unwrap(); let aql = AqlQuery::builder() .query(r#"FOR i in test_collection FILTER i.username=="test2" return i"#) .build(); let result: Vec<Document<User>> = db.aql_query(aql).await.unwrap(); assert_eq!(result.len(), 1); assert_eq!(result[0].document.password, "test2_pwd"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_aql_bind_vars() { test_setup(); let conn = connection().await; let db = conn.db("test_db").await.unwrap(); let aql = AqlQuery::builder() .query(r#"FOR i in test_collection FILTER i.username==@username AND i.password==@password return i"#) .bind_var("username", "test2") .bind_var("password", "test2_pwd") .build(); let result: Vec<Document<User>> = db.aql_query(aql).await.unwrap(); assert_eq!(result.len(), 1); assert_eq!(result[0].document.password, "test2_pwd"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_aql_try_bind() { test_setup(); let conn = connection().await; let db = conn.db("test_db").await.unwrap(); let user = User { username: "test2".to_owned(), password: "test2_pwd".to_owned(), }; let aql = AqlQuery::builder() .query(r#"FOR i in test_collection FILTER i.username==@user.username AND i.password==@user.password return i"#) .try_bind("user", user) .unwrap() .build(); let result: Vec<Document<User>> = db.aql_query(aql).await.unwrap(); assert_eq!(result.len(), 1); assert_eq!(result[0].document.password, "test2_pwd"); }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/common.rs
tests/common.rs
#![allow(unused_imports)] #![allow(unused_parens)] use arangors::{connection::Connection, Collection, Database}; use std::{env, future::Future}; pub const ARANGODB_HOST: &str = "http://localhost:8529/"; pub const ROOT_USERNAME: &str = "root"; pub const ROOT_PASSWORD: &str = "KWNngteTps7XjrNv"; pub const NORMAL_USERNAME: &str = "username"; pub const NORMAL_PASSWORD: &str = "password"; pub fn get_root_user() -> String { env::var("ARANGO_ROOT_USER").unwrap_or(ROOT_USERNAME.to_owned()) } pub fn get_root_password() -> String { env::var("ARANGO_ROOT_PASSWORD").unwrap_or(ROOT_PASSWORD.to_owned()) } pub fn get_normal_user() -> String { env::var("ARANGO_USER").unwrap_or(NORMAL_USERNAME.to_owned()) } pub fn get_normal_password() -> String { env::var("ARANGO_PASSWORD").unwrap_or(NORMAL_PASSWORD.to_owned()) } pub fn get_arangodb_host() -> String { env::var("ARANGODB_HOST") .map(|s| format!("http://{}", s)) .unwrap_or(ARANGODB_HOST.to_owned()) } #[test] pub fn test_setup() { match env_logger::Builder::from_default_env() .is_test(true) .try_init() { _ => {} } } #[maybe_async::maybe_async] pub async fn connection() -> arangors::Connection { let host = get_arangodb_host(); let user = get_normal_user(); let password = get_normal_password(); Connection::establish_jwt(&host, &user, &password) .await .unwrap() } #[maybe_async::maybe_async] pub async fn root_connection() -> arangors::Connection { let host = get_arangodb_host(); let user = get_root_user(); let password = get_root_password(); Connection::establish_jwt(&host, &user, &password) .await .unwrap() } #[cfg(any(feature = "reqwest_async", feature = "reqwest_blocking"))] #[maybe_async::maybe_async] pub async fn collection<'a>( conn: &'a arangors::Connection, name: &str, ) -> Collection<arangors::client::reqwest::ReqwestClient> { let database = conn.db("test_db").await.unwrap(); match database.drop_collection(name).await { _ => {} }; database .create_collection(name) .await .expect("Fail to create the collection"); database.collection(name).await.unwrap() } #[cfg(feature = "surf_async")] #[maybe_async::maybe_async] pub async fn collection<'a>( conn: &'a arangors::Connection, name: &str, ) -> Collection<arangors::client::surf::SurfClient> { let database = conn.db("test_db").await.unwrap(); match database.drop_collection(name).await { _ => {} }; database .create_collection(name) .await .expect("Fail to create the collection"); database.collection(name).await.unwrap() } #[maybe_async::sync_impl] pub fn test_root_and_normal<T>(test: T) -> () where T: Fn(String, String) -> (), { test(get_root_user(), get_root_password()); test(get_normal_user(), get_normal_password()); } #[maybe_async::async_impl] pub async fn test_root_and_normal<T, F>(test: T) -> () where T: Fn(String, String) -> F, F: Future<Output = ()>, { test(get_root_user(), get_root_password()).await; test(get_normal_user(), get_normal_password()).await; }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/graph.rs
tests/graph.rs
#![allow(unused_imports)] #![allow(unused_parens)] use log::trace; use pretty_assertions::assert_eq; use serde_json::{json, Value}; use arangors::{ client::ClientExt, collection::{ options::{ChecksumOptions, PropertiesOptions}, response::Status, CollectionType, }, graph::*, ClientError, Connection, Database, Document, }; use common::{get_arangodb_host, get_normal_password, get_normal_user, test_setup}; use crate::common::{collection, connection}; pub mod common; #[maybe_async::maybe_async] async fn drop_all_graphs<C: ClientExt>(db: &Database<C>, names: Vec<&str>) { for name in names.iter() { drop_graph(db, name).await; } } #[maybe_async::maybe_async] async fn drop_graph<C: ClientExt>(db: &Database<C>, name: &str) { match db.drop_graph(name, false).await { Ok(()) => (), Err(err) => println!("Failed to drop graph: {:?}", err), } } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_simple_graph() { test_setup(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); // Cleanup drop_graph(&database, "test_graph").await; let graph = Graph::builder() .name("test_graph".to_string()) .edge_definitions(vec![EdgeDefinition { collection: "some_edge".to_string(), from: vec!["from_collection".to_string()], to: vec!["to_collection".to_string()], }]) .build(); let result = database.create_graph(graph, true).await.unwrap(); assert_eq!(result.name, "test_graph".to_string()); assert!(result.is_disjoint.is_none()); assert!(result.is_smart.is_none()); assert!(result.orphan_collections.is_empty()); assert!(result.options.is_none()); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_complex_graph() { test_setup(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); // Cleanup drop_graph(&database, "test_complex_graph").await; let graph = Graph::builder() .name("test_complex_graph".to_string()) .edge_definitions(vec![EdgeDefinition { collection: "some_edge".to_string(), from: vec!["from_collection".to_string()], to: vec!["to_collection".to_string()], }]) .orphan_collections(vec!["some_collection".to_string()]) .is_smart(Some(true)) .is_disjoint(Some(false)) .options(Some(GraphOptions { smart_graph_attribute: Some("region".to_string()), number_of_shards: Some(2), replication_factor: Some(10), write_concern: Some(8), })) .build(); let result = database.create_graph(graph, true).await.unwrap(); assert_eq!(result.name, "test_complex_graph".to_string()); assert_eq!(result.orphan_collections.len(), 1); // Would work only with Enterprise Edition // // assert_eq!(result.is_disjoint.unwrap(), false); // assert_eq!(result.is_smart.unwrap(), true); // assert!(result.options.is_some()); // let options = result.options.unwrap(); // assert_eq!(options.number_of_shards.unwrap(), 2); // assert_eq!(options.replication_factor.unwrap(),10); // assert_eq!(options.write_concern.unwrap(), 8); // assert_eq!(options.smart_graph_attribute.unwrap(), "region".to_string()); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_graph_retrieval() { test_setup(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); // Cleanup drop_all_graphs(&database, vec!["test_graph1", "test_graph2", "test_graph3"]).await; let graph1 = Graph::builder() .name("test_graph1".to_string()) .edge_definitions(vec![EdgeDefinition { collection: "some_edge1".to_string(), from: vec!["from_collection_1".to_string()], to: vec!["to_collection".to_string()], }]) .build(); let graph2 = Graph::builder() .name("test_graph2".to_string()) .edge_definitions(vec![EdgeDefinition { collection: "some_edge2".to_string(), from: vec!["from_collection_2".to_string()], to: vec!["to_collection".to_string()], }]) .build(); let graph3 = Graph::builder() .name("test_graph3".to_string()) .edge_definitions(vec![EdgeDefinition { collection: "some_edge3".to_string(), from: vec!["from_collection_3".to_string()], to: vec!["to_collection".to_string()], }]) .build(); database.create_graph(graph1, true).await.unwrap(); database.create_graph(graph2, true).await.unwrap(); database.create_graph(graph3, true).await.unwrap(); let count = database.graphs().await.unwrap(); trace!("received: {:?}", count); assert!(count.graphs.len() >= 3); let result = database.graph("test_graph2").await.unwrap(); assert_eq!(result.name, "test_graph2"); } // This tests the default value of `orphanCollections` which can't be optional // but can be empty #[test] fn minimal_serialization_works() { let json = json!( { "name": "GraphName", "edgeDefinitions": [] } ); let graph: Graph = serde_json::from_value(json).unwrap(); assert!(graph.orphan_collections.is_empty()); }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/collection.rs
tests/collection.rs
#![allow(unused_imports)] #![allow(unused_parens)] use log::trace; use pretty_assertions::assert_eq; use serde_json::{json, Value}; use crate::common::{collection, connection}; use arangors::{ collection::{ options::{ChecksumOptions, PropertiesOptions}, response::Status, CollectionType, }, ClientError, Connection, Document, }; use common::{get_arangodb_host, get_normal_password, get_normal_user, test_setup}; pub mod common; #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_collection() { test_setup(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let coll = database.accessible_collections().await; trace!("{:?}", coll); let coll = database.collection("test_collection").await; assert_eq!(coll.is_err(), false); let coll = database.collection("test_collection_non_exists").await; assert_eq!(coll.is_err(), true); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_db_from_collection() { test_setup(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let coll = database.accessible_collections().await; trace!("{:?}", coll); let coll = database.collection("test_collection").await.unwrap(); let db = coll.db(); assert_eq!(db.name(), database.name()); assert_eq!(db.url(), database.url()); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_create_and_drop_collection() { test_setup(); let collection_name = "test_collection_create_and_drop"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let coll = database.drop_collection(collection_name).await; assert_eq!( coll.is_err(), true, "The collection should have been drop previously" ); let coll = database.create_collection(collection_name).await; assert_eq!(coll.is_err(), false, "Fail to create the collection"); let coll = coll.unwrap(); assert_eq!( coll.collection_type(), CollectionType::Document, "Got Edge collection" ); let res = database.drop_collection(collection_name).await; assert_eq!(res.is_err(), false, "Fail to drop the collection"); let coll = database.create_collection(collection_name).await; assert_eq!(coll.is_err(), false, "Fail to create the collection"); let res = coll.unwrap().drop().await; assert_eq!(res.is_err(), false, "Fail to drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_create_and_drop_edge_collection() { test_setup(); let collection_name = "test_edge_collection_create_and_drop"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let coll = database.drop_collection(collection_name).await; assert_eq!( coll.is_err(), true, "The collection should have been drop previously" ); let coll = database.create_edge_collection(collection_name).await; assert_eq!(coll.is_err(), false, "Fail to create the collection"); let coll = coll.unwrap(); assert_eq!( coll.collection_type(), CollectionType::Edge, "Got Document collection" ); let res = database.drop_collection(collection_name).await; assert_eq!(res.is_err(), false, "Fail to drop the collection"); let coll = database.create_edge_collection(collection_name).await; assert_eq!(coll.is_err(), false, "Fail to create the collection"); let res = coll.unwrap().drop().await; assert_eq!(res.is_err(), false, "Fail to drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_truncate_collection() { test_setup(); let collection_name = "test_collection_truncate"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let res = coll.truncate().await; assert_eq!(res.is_ok(), true); let res = res.unwrap(); assert_eq!(res.is_system, false); assert_eq!(res.name, collection_name); assert_eq!(res.collection_type, CollectionType::Document); coll.drop().await.expect("Fail to drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_properties() { test_setup(); let collection_name = "test_collection_properties"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let properties = coll.properties().await; assert_eq!(properties.is_err(), false); let result = properties.unwrap(); assert_eq!(result.info.name, collection_name); #[cfg(feature = "rocksdb")] { assert_eq!(result.detail.cache_enabled, false); } #[cfg(feature = "mmfiles")] { assert_eq!(result.detail.is_volatile, false); assert_eq!(result.detail.do_compact, true); } assert_eq!(result.info.is_system, false); assert_eq!(result.detail.wait_for_sync, false); assert_eq!(result.detail.key_options.allow_user_keys, true); assert_eq!( result.detail.key_options.key_type, Some("traditional".to_string()) ); assert_eq!(result.detail.key_options.last_value, Some(0)); assert_eq!(result.info.status, Status::Loaded); assert_eq!(result.detail.write_concern, 1); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_document_count() { test_setup(); let collection_name = "test_collection_count"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let coll = collection(&conn, collection_name).await; let count = coll.document_count().await; let result = count.unwrap(); assert_eq!(result.info.count, Some(0)); assert_eq!(result.info.name, collection_name); #[cfg(features = "rocksdb")] assert_eq!(result.detail.cache_enabled, false); assert_eq!(result.info.is_system, false); assert_eq!(result.detail.wait_for_sync, false); assert_eq!(result.detail.key_options.allow_user_keys, true); assert_eq!( result.detail.key_options.key_type, Some("traditional".to_string()) ); assert_eq!(result.detail.key_options.last_value, Some(0)); assert_eq!(result.info.status, Status::Loaded); assert_eq!(result.detail.write_concern, 1); database .aql_str::<Value>(r#"INSERT { "name": "test_user" } INTO test_collection_count"#) .await .unwrap(); let updated_count = coll.document_count().await; let updated_result = updated_count.unwrap(); assert_eq!(updated_result.info.count, Some(1)); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_statistics() { test_setup(); let collection_name = "test_collection_statistics"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let statistics = coll.statistics().await; let result = statistics.unwrap(); assert_eq!(result.count, Some(0), "count"); assert_eq!(result.info.name, collection_name); #[cfg(feature = "rocksdb")] assert_eq!(result.detail.cache_enabled, false); assert_eq!(result.info.is_system, false); assert_eq!(result.detail.wait_for_sync, false, "wait for sync"); assert_eq!( result.detail.key_options.allow_user_keys, true, "allow user keys" ); assert_eq!( result.detail.key_options.key_type, Some("traditional".to_string()) ); assert_eq!(result.detail.key_options.last_value, Some(0), "last value"); assert_eq!(result.info.status, Status::Loaded); assert_eq!(result.detail.write_concern, 1); assert_eq!(result.figures.indexes.count, Some(1)); #[cfg(not(feature = "mmfiles"))] assert_eq!(result.figures.indexes.size, Some(0), "indexes size"); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_revision_id() { test_setup(); let collection_name = "test_collection_revision_id"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let revision = coll.revision_id().await; let result = revision.unwrap(); assert_eq!(result.info.name, collection_name); #[cfg(feature = "rocksdb")] assert_eq!(result.detail.cache_enabled, false); assert_eq!(result.info.is_system, false); assert_eq!(result.detail.wait_for_sync, false); assert_eq!(result.detail.key_options.allow_user_keys, true); assert_eq!( result.detail.key_options.key_type, Some("traditional".to_string()) ); assert_eq!(result.detail.key_options.last_value, Some(0)); assert_eq!(result.info.status, Status::Loaded); assert_eq!(result.detail.write_concern, 1); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_get_checksum() { test_setup(); let collection_name = "test_collection_checksum"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let coll = collection(&conn, collection_name).await; let checksum = coll.checksum().await; let result = checksum.unwrap(); assert_eq!(result.revision, "0"); assert_eq!(result.info.name, collection_name); assert_eq!(result.info.is_system, false); assert_eq!(result.info.status, Status::Loaded); assert_eq!(result.info.collection_type, CollectionType::Document); assert_eq!(result.checksum, "0"); assert_eq!(result.checksum.is_empty(), false); let options = ChecksumOptions::builder() .with_revision(true) .with_data(true) .build(); let checksum = coll.checksum_with_options(options).await; let updated_result = checksum.unwrap(); assert_eq!(updated_result.revision, "0"); assert_eq!(updated_result.info.name, collection_name); assert_eq!(updated_result.info.is_system, false); assert_eq!(updated_result.info.status, Status::Loaded); assert_eq!( updated_result.info.collection_type, CollectionType::Document ); assert_eq!(updated_result.checksum, "0"); assert_eq!(updated_result.checksum.is_empty(), false); database .aql_str::<Value>(r#"INSERT { "name": "test_user" } INTO test_collection_checksum"#) .await .unwrap(); let checksum = coll.checksum().await; let updated_result = checksum.unwrap(); let changed = if updated_result.revision != result.revision { true } else { false }; assert_eq!(changed, true); assert_eq!(updated_result.info.name, collection_name); assert_eq!(updated_result.info.is_system, false); assert_eq!(updated_result.info.status, Status::Loaded); assert_eq!( updated_result.info.collection_type, CollectionType::Document ); assert_eq!(updated_result.checksum.is_empty(), false); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_put_load() { test_setup(); let collection_name = "test_collection_load"; let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let coll = collection(&conn, collection_name).await; let load = coll.load(true).await; let result = load.unwrap(); assert_eq!(result.name, collection_name); assert_eq!(result.is_system, false); assert_eq!(result.count, Some(0)); assert_eq!(result.status, Status::Loaded); assert_eq!(result.collection_type, CollectionType::Document); let load = coll.load(false).await; let updated_result = load.unwrap(); assert_eq!(updated_result.name, collection_name); assert_eq!(updated_result.is_system, false); assert_eq!(updated_result.count, None); assert_eq!(updated_result.status, Status::Loaded); assert_eq!(updated_result.collection_type, CollectionType::Document); database .aql_str::<Value>(r#"INSERT { "name": "test_user" } INTO test_collection_load"#) .await .unwrap(); let load = coll.load(true).await; let updated_result = load.unwrap(); assert_eq!(updated_result.name, collection_name); assert_eq!(updated_result.is_system, false); assert_eq!(updated_result.count, Some(1)); assert_eq!(updated_result.status, Status::Loaded); assert_eq!(updated_result.collection_type, CollectionType::Document); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_put_unload() { test_setup(); let collection_name = "test_collection_unload"; let conn = connection().await; let db = conn.db("test_db").await.unwrap(); let version = db.arango_version().await.unwrap(); let re = regex::Regex::new(r"3\.(\d+)\.\d+").unwrap(); let coll = collection(&conn, collection_name).await; let unload = coll.unload().await; let result = unload.unwrap(); assert_eq!(result.name, collection_name); assert_eq!(result.is_system, false); assert_eq!(result.count, None); if re.captures(&version.version).unwrap()[1] .parse::<i32>() .unwrap() < 9 { assert!( matches!(result.status, Status::Unloaded | Status::Unloading), "wrong status: {:?}", result.status ); } else { assert!( matches!( result.status, Status::Unloaded | Status::Unloading | Status::Loaded ), "wrong status: {:?}", result.status ); } assert_eq!(result.collection_type, CollectionType::Document); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_put_load_indexes_into_memory() { test_setup(); let collection_name = "test_collection_load_indexes_into_memory"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let load_index = coll.load_indexes().await; let result = load_index.unwrap(); assert_eq!(result, true); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_put_changes_properties() { test_setup(); let collection_name = "test_collection_changes_properties"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let options = PropertiesOptions::builder().wait_for_sync(true).build(); let updated_properties = coll.change_properties(options).await; let result = updated_properties.unwrap(); assert_eq!(result.info.name, collection_name); #[cfg(feature = "rocksdb")] assert_eq!(result.detail.cache_enabled, false); assert_eq!(result.info.is_system, false); assert_eq!(result.detail.wait_for_sync, true); assert_eq!(result.detail.key_options.allow_user_keys, true); assert_eq!( result.detail.key_options.key_type, Some("traditional".to_string()) ); assert_eq!(result.detail.key_options.last_value, Some(0)); assert_eq!(result.info.status, Status::Loaded); assert_eq!(result.detail.write_concern, 1); coll.drop().await.expect("Should drop the collection"); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_put_rename() { test_setup(); let collection_name = "test_collection_rename"; let conn = connection().await; let mut coll = collection(&conn, collection_name).await; let new_name = "test_collection_renamed_2"; let renamed = coll.rename(new_name).await; let result = renamed.unwrap(); assert_eq!(coll.name(), new_name); assert_eq!(result.name, new_name); assert_eq!(result.is_system, false); assert_eq!(result.status, Status::Loaded); assert_eq!(result.collection_type, CollectionType::Document); coll.drop().await.expect("Should drop the collection"); } #[cfg(feature = "rocksdb")] #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_put_recalculate() { test_setup(); let collection_name = "test_collection_recalculate"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let recalculate = coll.recalculate_count().await; let result = recalculate.unwrap(); assert_eq!(result, true); coll.drop().await.expect("Should drop the collection"); } #[cfg(any(feature = "mmfiles"))] #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_put_rotate_journal() { test_setup(); let collection_name = "test_collection_rotate_journal"; let conn = connection().await; let coll = collection(&conn, collection_name).await; let _rotate = coll.rotate_journal().await; // TODO got error in arangoDB 3.7.1: expecting one of the actions 'load', // 'unload', 'truncate', 'properties', 'compact', 'rename', // 'loadIndexesIntoMemory' // assert_eq!(rotate.is_err(), true, "succeed rotating journal"); // if let ClientError::Arango(error) = rotate.unwrap_err() { // assert_eq!( // error.code(), // 400, // "Should be no journal, but now it's: {}", // error.message() // ) // } // assert_eq!(rotate.is_ok(), true, "fail to rotate journal: {:?}", rotate); // let result = rotate.unwrap(); // assert_eq!(result, true, "rotate result should be true"); coll.drop().await.expect("Should drop the collection"); }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/tests/transaction.rs
tests/transaction.rs
#![allow(unused_imports)] #![allow(unused_parens)] use log::trace; use maybe_async::maybe_async; use pretty_assertions::assert_eq; use serde_json::{json, Value}; use crate::common::{collection, connection}; use arangors::{ client::ClientExt, collection::{ options::{ChecksumOptions, PropertiesOptions}, response::Status, CollectionType, }, document::options::RemoveOptions, transaction::{ Status as TransactionStatus, Transaction, TransactionCollections, TransactionSettings, }, ClientError, Connection, Database, Document, }; use common::{get_arangodb_host, get_normal_password, get_normal_user, test_setup}; pub mod common; #[maybe_async] async fn create_transaction<C: ClientExt>( database: &Database<C>, collection_name: String, ) -> Result<Transaction<C>, ClientError> { database .begin_transaction( TransactionSettings::builder() .lock_timeout(60) .collections( TransactionCollections::builder() .write(vec![collection_name]) .build(), ) .build(), ) .await } #[maybe_async] async fn create_document<C: ClientExt>(tx: &Transaction<C>) -> Result<String, ClientError> { let test_doc: Document<Value> = Document::new(json!({ "user_name":"test21", "user_name":"test21_pwd", })); let collection = tx.collection("test_collection").await?; let document = collection .create_document(test_doc, Default::default()) .await?; let header = document.header().unwrap(); let _key = &header._key; Ok(_key.clone()) } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_start_transaction() { test_setup(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let tx_result = create_transaction(&database, "test_collection".to_string()).await; if tx_result.is_err() { log::error!("Error: {:?}", tx_result.as_ref().err()); } assert_eq!(tx_result.is_err(), false); let tx = tx_result.unwrap(); let status = tx.abort().await.unwrap(); trace!("{:?}", status); assert_eq!(status, TransactionStatus::Aborted); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_abort_transaction() { test_setup(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let tx_result = create_transaction(&database, "test_collection".to_string()).await; if tx_result.is_err() { log::error!("Error: {:?}", tx_result.as_ref().err()); } assert_eq!(tx_result.is_err(), false); let tx = tx_result.unwrap(); let key_result = create_document(&tx).await; assert_eq!(key_result.is_err(), false); let status = tx.abort().await.unwrap(); trace!("{:?}", status); assert_eq!(status, TransactionStatus::Aborted); let key = key_result.unwrap(); let collection = database.collection("test_collection").await.unwrap(); let doc = collection.document::<Value>(&key).await; assert_eq!(doc.is_err(), true); } #[maybe_async::test( any(feature = "reqwest_blocking"), async(any(feature = "reqwest_async"), tokio::test), async(any(feature = "surf_async"), async_std::test) )] async fn test_commit_transaction() { test_setup(); let conn = connection().await; let database = conn.db("test_db").await.unwrap(); let tx_result = create_transaction(&database, "test_collection".to_string()).await; if tx_result.is_err() { log::error!("Error: {:?}", tx_result.as_ref().err()); } assert_eq!(tx_result.is_err(), false); let tx = tx_result.unwrap(); let key_result = create_document(&tx).await; assert_eq!(key_result.is_err(), false); let status = tx.commit().await.unwrap(); trace!("{:?}", status); assert_eq!(status, TransactionStatus::Committed); let key = key_result.unwrap(); let collection = database.collection("test_collection").await.unwrap(); let doc = collection.document::<Value>(&key).await; assert_eq!(doc.is_ok(), true); let old_doc = collection .remove_document::<Value>( &key, RemoveOptions::builder().return_old(true).build(), None, ) .await; assert_eq!(old_doc.is_ok(), true); }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/analyzer.rs
examples/analyzer.rs
#![allow(unused_imports)] #![allow(unused_parens)] use anyhow::Error; use arangors::{ analyzer::{AnalyzerCase, AnalyzerFeature, AnalyzerInfo, NormAnalyzerProperties}, Connection, }; use std::collections::HashMap; const URL: &str = "http://localhost:8529"; #[cfg_attr(feature = "reqwest_async", tokio::main)] #[cfg_attr(feature = "surf_async", async_std::main)] #[cfg_attr(feature = "reqwest_blocking", maybe_async::must_be_sync)] async fn main() -> Result<(), Error> { let analyzer_name = "test_analyzer".to_string(); let conn = Connection::establish_jwt(URL, "username", "password").await?; let database = conn.db("test_db").await?; let info = AnalyzerInfo::Norm { name: analyzer_name.clone(), features: Some(vec![AnalyzerFeature::Frequency, AnalyzerFeature::Norm]), properties: Some( NormAnalyzerProperties::builder() .locale("en.utf-8".to_string()) .case(AnalyzerCase::Lower) .build(), ), }; database.create_analyzer(info).await?; database.drop_analyzer(&analyzer_name).await?; Ok(()) } #[cfg(not(any( feature = "reqwest_blocking", feature = "reqwest_async", feature = "surf_async" )))] fn main() {}
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/view.rs
examples/view.rs
#![allow(unused_imports)] #![allow(unused_parens)] use anyhow::Error; use arangors::{ view::{ArangoSearchViewLink, ArangoSearchViewPropertiesOptions, ViewOptions}, Connection, }; use std::collections::HashMap; const URL: &str = "http://localhost:8529"; #[cfg_attr(feature = "reqwest_async", tokio::main)] #[cfg_attr(feature = "surf_async", async_std::main)] #[cfg_attr(feature = "reqwest_blocking", maybe_async::must_be_sync)] async fn main() -> Result<(), Error> { let collection_name = "test_collection".to_string(); let conn = Connection::establish_jwt(URL, "username", "password").await?; let database = conn.db("test_db").await?; let mut links: HashMap<String, ArangoSearchViewLink> = HashMap::new(); links.insert( collection_name.clone(), ArangoSearchViewLink::builder() .include_all_fields(true) .build(), ); let view = database .create_view( ViewOptions::builder() .name(format!("{}_view", collection_name)) .properties( ArangoSearchViewPropertiesOptions::builder() .links(links) .build(), ) .build(), ) .await?; println!("{:?}", view); database .drop_view(&format!("{}_view", collection_name)) .await?; Ok(()) } #[cfg(not(any( feature = "reqwest_blocking", feature = "reqwest_async", feature = "surf_async" )))] fn main() {}
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/document.rs
examples/document.rs
#![allow(unused_imports)] #![allow(unused_parens)] use anyhow::Error; use arangors::{document::options::InsertOptions, Collection, Connection}; use arangors::document::{ options::{RemoveOptions, ReplaceOptions, UpdateOptions}, response::DocumentResponse, }; use serde::{Deserialize, Serialize}; use serde_json::json; const URL: &str = "http://localhost:8529"; #[derive(Serialize, Deserialize, Debug)] struct User { first_name: String, last_name: String, email: String, } #[cfg_attr(feature = "reqwest_async", tokio::main)] #[cfg_attr(feature = "surf_async", async_std::main)] #[cfg_attr(feature = "reqwest_blocking", maybe_async::must_be_sync)] async fn main() -> Result<(), Error> { let collection_name = "test_collection_document_example"; let conn = Connection::establish_jwt(URL, "username", "password").await?; let database = conn.db("test_db").await?; database.create_collection(collection_name).await.unwrap(); let collection = database.collection(collection_name).await?; let new_user = User { first_name: "John".to_string(), last_name: "Doe".to_string(), email: "john.doe@who".to_string(), }; // create a document let new_doc_response = collection .create_document(new_user, InsertOptions::builder().return_new(true).build()) .await .unwrap(); let new_doc = new_doc_response.new_doc(); eprintln!( "Your new document should have been created -> {:?} ", new_doc ); // update a document let header = new_doc_response.header().unwrap(); let _key = &header._key; let patch = json!({"last_name" : "Doh"}); let update_doc_response = collection .update_document( _key, patch, UpdateOptions::builder() .return_new(true) .return_old(true) .build(), ) .await .unwrap(); let new_doc = update_doc_response.new_doc(); eprintln!("John Doe is now John Doh -> {:?}", new_doc); let old_doc = update_doc_response.old_doc(); eprintln!("John Doh was called John Doe before -> {:?}", old_doc); // update a document with default option let header = update_doc_response.header().unwrap(); let old_rev = &header._rev; let patch = json!({"email" : "john.doh@who"}); // use Default::default() to set default options let update_doc_response = collection .update_document(_key, patch, Default::default()) .await .unwrap(); let header = update_doc_response.header().unwrap(); let _rev = &header._rev; if old_rev != _rev { eprintln!("John Doh has changed his address email"); } // replace a document let replace = User { first_name: "Bob".to_string(), last_name: "Johnson".to_string(), email: "bob.Johnson@internet".to_string(), }; let replace_doc_response = collection .replace_document( _key, replace, ReplaceOptions::builder() .return_new(true) .return_old(true) .build(), Some(_rev.to_string()), ) .await .unwrap(); let new_doc = replace_doc_response.new_doc(); eprintln!( "John Doh found his identity, his real name is Bob Johnson with email \ bob.Johnson@internet@-> {:?}", new_doc ); let old_doc = replace_doc_response.old_doc(); eprintln!( "Bob Johnson was called John Doh because he did not remember who he was -> {:?}", old_doc ); // remove a document let remove_doc_response: DocumentResponse<User> = collection .remove_document( _key, RemoveOptions::builder().return_old(true).build(), None, ) .await .unwrap(); let old_doc = remove_doc_response.old_doc(); eprintln!( "Bob Johnson has been removed from the Database which helps people to remember their \ identity -> {:?}", old_doc ); // cleanup database.drop_collection(collection_name).await.unwrap(); Ok(()) } #[cfg(not(any( feature = "reqwest_blocking", feature = "reqwest_async", feature = "surf_async" )))] fn main() {}
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/custom_client.rs
examples/custom_client.rs
#![allow(unused_imports)] #![allow(unused_parens)] //! You might want you stick with certain ecosystem like reqwest or actix-web, //! and don't want to mingle two ecosystem, since it significantly increase //! compile time and binary size. //! //! Arangors offers you flexibility to use any HTTP client you like for both //! async and sync implementation. Thanks to maybe_async, we can now unify async //! and sync API. //! //! Several implementations are provided: async `reqwest`, blocking `reqwest`, //! `surf`(async-std) and later `awc`. use anyhow::Error; use arangors::{client::ClientExt, ClientError}; use http::{HeaderMap, HeaderValue, Method}; #[cfg(feature = "reqwest_async")] use reqwest::Client; use url::Url; use arangors::GenericConnection; use std::convert::TryInto; /// when use async http client, `blocking` feature MUST be disabled // This cfg is only to make rust compiler happy in Github Action, you can just ignore it #[cfg(feature = "reqwest_async")] #[derive(Debug, Clone)] pub struct ReqwestClient(pub Client, HeaderMap); /// you can also use macro: maybe_async::async_impl, with which the whole code /// block will just vanish when you enabled `blocking` feature. /// Also, the API of reqwest is almost the same for async and sync. You can also /// use maybe_async::maybe_async to remove async/await keyword in need, and just /// import reqwesat::Client and rewest::blocking::Client respectively in async /// and sync implementation. See `arangors::client::reqwest` source code. // This cfg is only to make rust compiler happy in Github Action, you can just ignore it #[cfg(feature = "reqwest_async")] #[async_trait::async_trait] impl ClientExt for ReqwestClient { fn new<U: Into<Option<HeaderMap>>>(headers: U) -> Result<Self, ClientError> { let client = Client::builder().gzip(true); let headers = match headers.into() { Some(h) => h, None => HeaderMap::new(), }; client .build() .map(|c| ReqwestClient(c, headers)) .map_err(|e| ClientError::HttpClient(format!("{:?}", e))) } fn headers(&mut self) -> &mut HeaderMap<HeaderValue> { &mut self.1 } async fn request( &self, mut request: http::Request<String>, ) -> Result<http::Response<String>, ClientError> { let headers = request.headers_mut(); for (header, value) in self.1.iter() { if !headers.contains_key(header) { headers.insert(header, value.clone()); } } let req = request.try_into().unwrap(); let resp = self .0 .execute(req) .await .map_err(|e| ClientError::HttpClient(format!("{:?}", e)))?; let status_code = resp.status(); let headers = resp.headers().clone(); let version = resp.version(); let content = resp .text() .await .map_err(|e| ClientError::HttpClient(format!("{:?}", e)))?; let mut build = http::Response::builder(); for header in headers.iter() { build = build.header(header.0, header.1); } http::response::Builder::from(build) .status(status_code) .version(version) .body(content) .map_err(|e| ClientError::HttpClient(format!("{:?}", e))) } } // This cfg is only to make rust compiler happy in Github Action, you can just // ignore it #[cfg(feature = "reqwest_async")] #[tokio::main] async fn main() -> Result<(), Error> { const URL: &str = "http://localhost:8529"; let conn = GenericConnection::<ReqwestClient>::establish_jwt(URL, "username", "password").await?; // from here the API is the same let db = conn.db("test_db").await?; let info = db.info().await?; println!("{:?}", info); Ok(()) } #[cfg(not(feature = "reqwest_async"))] fn main() {}
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/database.rs
examples/database.rs
#![allow(unused_imports)] #![allow(unused_parens)] use anyhow::Error; use arangors::Connection; const URL: &str = "http://localhost:8529"; #[cfg_attr(feature = "reqwest_async", tokio::main)] #[cfg_attr(feature = "surf_async", async_std::main)] #[cfg_attr(feature = "reqwest_blocking", maybe_async::must_be_sync)] async fn main() -> Result<(), Error> { let conn = Connection::establish_jwt(URL, "username", "password").await?; let database = conn.db("test_db").await?; let collections = database.accessible_collections().await?; println!("{:?}", collections); let collections = database.accessible_collections().await?; println!("{:?}", collections); let info = database.info().await?; println!("{:?}", info); Ok(()) } #[cfg(not(any( feature = "reqwest_blocking", feature = "reqwest_async", feature = "surf_async" )))] fn main() {}
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/aql.rs
examples/aql.rs
//! Sync and async version share the exactly same API, except that async API //! must be awaited. #![allow(unused_imports)] #![allow(unused_parens)] use std::io::Write; use serde::{Deserialize, Serialize}; use serde_json::value::Value; use arangors::{AqlQuery, Connection}; use std::collections::HashMap; const URL: &str = "http://localhost:8529"; #[derive(Serialize, Deserialize, Debug)] struct User { first_name: String, last_name: String, email: String, } #[cfg_attr(feature = "reqwest_async", tokio::main)] #[cfg_attr(feature = "surf_async", async_std::main)] #[cfg_attr(feature = "reqwest_blocking", maybe_async::must_be_sync)] async fn main() { env_logger::init(); let conn = Connection::establish_jwt(URL, "username", "password") .await .unwrap(); let database = conn.db("test_db").await.unwrap(); let aql = AqlQuery::builder() .query("FOR u IN test_collection LIMIT 3 RETURN u") .build(); println!("{:?}", aql); println!("{:?}", serde_json::to_string(&aql).unwrap()); let resp: Vec<Value> = database.aql_query(aql).await.unwrap(); println!("{:?}", resp); let collection = "test_collection"; let user = User { first_name: "John".to_string(), last_name: "Doe".to_string(), email: "john.doe@who.com".to_string(), }; // use bind_var for any struct that can be converted into serde_json::Value let json = serde_json::to_value(&user).unwrap(); let aql = AqlQuery::builder() .query("INSERT @user INTO @@collection LET result = NEW RETURN result") .bind_var("@collection", collection) .bind_var("user", json) .build(); let result: Vec<User> = database.aql_query(aql).await.unwrap(); println!("{:?}", result); let jane_doe = User { first_name: "Jane".to_string(), last_name: "Doe".to_string(), email: "jane.done@who.com".to_string(), }; // use try_bind for any serializable struct let aql = AqlQuery::builder() .query("INSERT @user INTO @@collection LET result = NEW RETURN result") .bind_var("@collection", collection) .try_bind("user", jane_doe) .unwrap() .build(); let result: Vec<User> = database.aql_query(aql).await.unwrap(); println!("{:?}", result); let homer_simpson = User { first_name: "Homer".to_string(), last_name: "Simpson".to_string(), email: "homer.sompson@springfield.com".to_string(), }; let mut map: HashMap<&str, Value> = HashMap::new(); map.insert("@collection", Value::from(collection)); map.insert("user", serde_json::to_value(homer_simpson).unwrap()); // use bind_vars to pass a HashMap of bind variables let aql = AqlQuery::builder() .query("INSERT @user INTO @@collection LET result = NEW RETURN result") .bind_vars(map) .build(); let result: Vec<User> = database.aql_query(aql).await.unwrap(); println!("{:?}", result); } #[cfg(not(any( feature = "reqwest_blocking", feature = "reqwest_async", feature = "surf_async" )))] fn main() {}
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/collection.rs
examples/collection.rs
#![allow(unused_imports)] #![allow(unused_parens)] use anyhow::Error; use arangors::Connection; const URL: &str = "http://localhost:8529"; #[cfg_attr(feature = "reqwest_async", tokio::main)] #[cfg_attr(feature = "surf_async", async_std::main)] #[cfg_attr(feature = "reqwest_blocking", maybe_async::must_be_sync)] async fn main() -> Result<(), Error> { let collection_name = "test_collection_create_and_drop"; let conn = Connection::establish_jwt(URL, "username", "password").await?; let database = conn.db("test_db").await?; let coll = database.drop_collection(collection_name).await; println!("Should fail: {:?}", coll); let coll = database.create_collection(collection_name).await; println!("{:?}", coll); let collection = database.collection(collection_name).await?; println!("id: {:?}", collection.id()); println!("name: {:?}", collection.name()); println!("collection_type: {:?}", collection.collection_type()); let coll = database.drop_collection(collection_name).await; println!("{:?}", coll); Ok(()) } #[cfg(not(any( feature = "reqwest_blocking", feature = "reqwest_async", feature = "surf_async" )))] fn main() {}
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/transaction.rs
examples/transaction.rs
#![allow(unused_imports)] #![allow(unused_parens)] use anyhow::Error; use arangors::{ transaction::{TransactionCollections, TransactionSettings}, Connection, Document, }; use log::info; use serde_json::{json, Value}; const URL: &str = "http://localhost:8529"; #[cfg_attr(feature = "reqwest_async", tokio::main)] #[cfg_attr(feature = "surf_async", async_std::main)] #[cfg_attr(feature = "reqwest_blocking", maybe_async::must_be_sync)] async fn main() -> Result<(), Error> { let conn = Connection::establish_jwt(URL, "username", "password").await?; let database = conn.db("test_db").await?; let tx = database .begin_transaction( TransactionSettings::builder() .lock_timeout(60000) .wait_for_sync(true) .collections( TransactionCollections::builder() .write(vec!["test_collection".to_owned()]) .build(), ) .build(), ) .await?; let transactions = database.list_transactions().await?; info!("Transactions: {:?}", transactions); let test_doc: Document<Value> = Document::new(json!({ "user_name":"test21", "user_name":"test21_pwd", })); let collection = tx.collection("test_collection").await?; let document = collection .create_document(test_doc, Default::default()) .await?; let header = document.header().unwrap(); let _key = &header._key; info!("Key: {}", _key); tx.abort().await?; Ok(()) }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/reqwest_rustls/src/client.rs
examples/reqwest_rustls/src/client.rs
//! Reqwest HTTP client use std::convert::TryInto; use ::reqwest::Client; use http::header::HeaderMap; use arangors::client::ClientExt; use arangors::transaction::TRANSACTION_HEADER; use arangors::ClientError; #[derive(Debug, Clone)] pub struct ReqwestClient { pub client: Client, pub headers: HeaderMap, } #[async_trait::async_trait] impl ClientExt for ReqwestClient { fn headers(&mut self) -> &mut HeaderMap<HeaderValue> { &mut self.headers } fn new<U: Into<Option<HeaderMap>>>(headers: U) -> Result<Self, ClientError> { let client = Client::builder().gzip(true); let headers = match headers.into() { Some(h) => h, None => HeaderMap::new(), }; client .default_headers(headers.clone()) .build() .map(|c| ReqwestClient { client: c, headers }) .map_err(|e| ClientError::HttpClient(format!("{:?}", e))) } fn clone_with_transaction(&self, transaction_id: String) -> Result<Self, ClientError> { let mut headers = HeaderMap::new(); for (name, value) in self.headers.iter() { headers.insert(name, value.clone()); } headers.insert(TRANSACTION_HEADER, transaction_id.parse().unwrap()); ReqwestClient::new(headers) } async fn request( &self, request: http::Request<String>, ) -> Result<http::Response<String>, ClientError> { let req = request.try_into().unwrap(); let resp = self .client .execute(req) .await .map_err(|e| ClientError::HttpClient(format!("{:?}", e)))?; let status_code = resp.status(); let headers = resp.headers().clone(); let version = resp.version(); let content = resp .text() .await .map_err(|e| ClientError::HttpClient(format!("{:?}", e)))?; let mut build = http::Response::builder(); for header in headers.iter() { build = build.header(header.0, header.1); } build .status(status_code) .version(version) .body(content) .map_err(|e| ClientError::HttpClient(format!("{:?}", e))) } }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/reqwest_rustls/src/main.rs
examples/reqwest_rustls/src/main.rs
//! An example to use reqwest with rustls. //! //! 1. use vanilla arangors without any http client implementation by disabling //! `reqwest_async`, `reqwest_blocking` and `surf_async` on arangors. //! 2. implement custom reqwest client and enable `rustls` feature gate like in `src/client.rs`. //! 3. use custom client with `arangors::GenericConnection`. mod client; use arangors::GenericConnection; use self::client::ReqwestClient; #[tokio::main] async fn main() -> Result<(), anyhow::Error> { const URL: &str = "http://localhost:8529"; // use custom Reqwest Client let conn = GenericConnection::<ReqwestClient>::establish_jwt(URL, "username", "password").await?; let db = conn.db("test_db").await?; let info = db.info().await?; println!("{:?}", info); Ok(()) }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
fMeow/arangors
https://github.com/fMeow/arangors/blob/4ee57cfdce34a504d94108dedce5abc11809de87/examples/blocking/src/main.rs
examples/blocking/src/main.rs
// exact same API except that you don't need to await fn main() -> Result<(), Error> { let conn = Connection::establish_jwt(URL, "username", "password")?; let database = conn.db("test_db")?; let collections = database.accessible_collections()?; println!("{:?}", collections); let collections = database.accessible_collections()?; println!("{:?}", collections); let info = database.info()?; println!("{:?}", info); Ok(()) }
rust
MIT
4ee57cfdce34a504d94108dedce5abc11809de87
2026-01-04T20:24:21.210590Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/lib.rs
library/src/lib.rs
/*! This crate provides an easy-to-use wrapper around `WebRTC` and `DataChannels` for a peer to peer connections. # Overview As creator of [agar.io](https://agar.io) famously stated [`WebRTC` is hard](https://news.ycombinator.com/item?id=13264952). This library aims to help, by abstracting away all the setup, and providing a simple way to send and receive messages over the data channel. It's as easy as providing address to a signaling server instance from [accompanying crate](https://docs.rs/wasm-peers-signaling-server/latest/wasm_peers_signaling_server/) and specifying two callbacks. One for when a connection opens, and one for when a message is received. After that you can send messages back and forth without worrying about the implementation details. Library contains three network , [one-to-one](one_to_one), which creates an equal connection between two peers, [one-to-many](one_to_many), which specifies a host and arbitrary number of clients and [many-to-many](many_to_many) that creates connection for each pair of peers and allows sending messages to any of them. */ #![allow( clippy::module_name_repetitions, clippy::future_not_send, // false positive in WASM (single threaded) context )] // clippy WARN level lints #![warn( // missing_docs, clippy::cargo, clippy::pedantic, // clippy::nursery, clippy::dbg_macro, clippy::unwrap_used, clippy::integer_division, clippy::large_include_file, clippy::map_err_ignore, // clippy::missing_docs_in_private_items, clippy::panic, clippy::todo, clippy::undocumented_unsafe_blocks, clippy::unimplemented, clippy::unreachable )] // clippy WARN level lints, that can be upgraded to DENY if preferred #![warn( clippy::float_arithmetic, clippy::arithmetic_side_effects, clippy::modulo_arithmetic, clippy::as_conversions, clippy::assertions_on_result_states, clippy::clone_on_ref_ptr, clippy::create_dir, clippy::default_union_representation, clippy::deref_by_slicing, clippy::empty_drop, clippy::empty_structs_with_brackets, clippy::exit, clippy::filetype_is_file, clippy::float_cmp_const, clippy::if_then_some_else_none, clippy::indexing_slicing, clippy::let_underscore_must_use, clippy::lossy_float_literal, clippy::pattern_type_mismatch, clippy::string_slice, clippy::try_err )] // clippy DENY level lints, they always have a quick fix that should be preferred #![deny( clippy::wildcard_imports, clippy::multiple_inherent_impl, clippy::rc_buffer, clippy::rc_mutex, clippy::rest_pat_in_fully_bound_structs, clippy::same_name_method, clippy::self_named_module_files, clippy::separated_literal_suffix, clippy::shadow_unrelated, clippy::str_to_string, clippy::string_add, clippy::string_to_string, clippy::unnecessary_self_imports, clippy::unneeded_field_pattern, clippy::unseparated_literal_suffix, clippy::verbose_file_reads )] pub(crate) mod constants; mod error; #[cfg(feature = "many-to-many")] pub mod many_to_many; #[cfg(feature = "one-to-many")] pub mod one_to_many; #[cfg(feature = "one-to-one")] pub mod one_to_one; mod utils; pub use error::{Error, Result}; pub use utils::{get_random_session_id, ConnectionType}; pub use wasm_peers_protocol::{SessionId, UserId};
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/error.rs
library/src/error.rs
pub type Result<T> = anyhow::Result<T>; pub type Error = anyhow::Error;
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/utils.rs
library/src/utils.rs
use anyhow::anyhow; use js_sys::{Array, Object, Reflect}; use log::debug; use wasm_bindgen::closure::Closure; use wasm_bindgen::{JsCast, JsValue}; use wasm_bindgen_futures::JsFuture; use wasm_peers_protocol::SessionId; use web_sys::{RtcConfiguration, RtcPeerConnection, RtcSdpType, RtcSessionDescriptionInit}; /// Returns a new `SessionId` instance that can be used to identify a session by signaling server. #[must_use] pub fn get_random_session_id() -> SessionId { SessionId::new(uuid::Uuid::new_v4().as_u128()) } /// Specifies what kind of peer connection to create #[derive(Debug, Clone)] pub enum ConnectionType { /// Within local network Local, /// Setup with STUN server, WAN capabilities but can fail Stun { urls: String }, /// Setup with STUN and TURN servers and fallback to TURN if needed, most stable connection StunAndTurn { stun_urls: String, turn_urls: String, username: String, credential: String, }, } pub fn create_peer_connection( connection_type: &ConnectionType, ) -> crate::Result<RtcPeerConnection> { match *connection_type { ConnectionType::Local => RtcPeerConnection::new() .map_err(|err| anyhow!("failed to create RTC peer connection: {:?}", err)), ConnectionType::Stun { ref urls } => { let ice_servers = Array::new(); { let server_entry = Object::new(); Reflect::set(&server_entry, &"urls".into(), &urls.into()).map_err(|err| { anyhow!( "failed to set 'urls' key on turn server entry object: {:?}", err ) })?; ice_servers.push(&server_entry); } let mut rtc_configuration = RtcConfiguration::new(); rtc_configuration.ice_servers(&ice_servers); RtcPeerConnection::new_with_configuration(&rtc_configuration) .map_err(|err| anyhow!("failed to create RTC peer connection: {:?}", err)) } ConnectionType::StunAndTurn { ref stun_urls, ref turn_urls, ref username, ref credential, } => { let ice_servers = Array::new(); { let stun_server_entry = Object::new(); Reflect::set(&stun_server_entry, &"urls".into(), &stun_urls.into()).map_err( |err| { anyhow!( "failed to set 'urls' key on turn server entry object: {:?}", err ) }, )?; ice_servers.push(&stun_server_entry); } { let turn_server_entry = Object::new(); Reflect::set(&turn_server_entry, &"urls".into(), &turn_urls.into()).map_err( |err| { anyhow!( "failed to set 'urls' key on turn server entry object: {:?}", err ) }, )?; Reflect::set(&turn_server_entry, &"username".into(), &username.into()).map_err( |err| { anyhow!( "failed to set 'username' key on turn server entry object: {:?}", err ) }, )?; Reflect::set(&turn_server_entry, &"credential".into(), &credential.into()) .map_err(|err| { anyhow!( "failed to set 'credential' key on turn server entry object: {:?}", err ) })?; ice_servers.push(&turn_server_entry); } let mut rtc_configuration = RtcConfiguration::new(); rtc_configuration.ice_servers(&ice_servers); RtcPeerConnection::new_with_configuration(&rtc_configuration) .map_err(|err| anyhow!("failed to create RTC peer connection: {:?}", err)) } } } pub async fn create_sdp_offer(peer_connection: &RtcPeerConnection) -> crate::Result<String> { let offer = JsFuture::from(peer_connection.create_offer()) .await .map_err(|error| { anyhow!( "failed to create an SDP offer: {}", error.as_string().unwrap_or_default() ) })?; let offer = Reflect::get(&offer, &JsValue::from_str("sdp")) .map_err(|err| { anyhow!( "failed to get value for 'sdp' key from offer object: {:?}", err ) })? .as_string() .ok_or_else(|| anyhow!("no 'sdp' key in offer object"))?; let mut local_session_description = RtcSessionDescriptionInit::new(RtcSdpType::Offer); local_session_description.sdp(&offer); JsFuture::from(peer_connection.set_local_description(&local_session_description)) .await .map_err(|error| { anyhow!( "failed to set local description: {}", error.as_string().unwrap_or_default() ) })?; Ok(offer) } pub async fn create_sdp_answer( peer_connection: &RtcPeerConnection, offer: String, ) -> crate::Result<String> { let mut remote_session_description = RtcSessionDescriptionInit::new(RtcSdpType::Offer); remote_session_description.sdp(&offer); JsFuture::from(peer_connection.set_remote_description(&remote_session_description)) .await .map_err(|err| anyhow!("failed to set remote session description: {:?}", err))?; let answer = JsFuture::from(peer_connection.create_answer()) .await .map_err(|err| anyhow!("failed to create SDP answer: {:?}", err))?; let answer = Reflect::get(&answer, &JsValue::from_str("sdp")) .map_err(|err| { anyhow!( "failed to get value for 'sdp' key from answer object: {:?}", err ) })? .as_string() .ok_or_else(|| anyhow!("failed to represent object value as string"))?; let mut local_session_description = RtcSessionDescriptionInit::new(RtcSdpType::Answer); local_session_description.sdp(&answer); JsFuture::from(peer_connection.set_local_description(&local_session_description)) .await .map_err(|err| anyhow!("failed to set local description: {:?}", err))?; Ok(answer) } pub fn set_peer_connection_on_negotiation_needed(peer_connection: &RtcPeerConnection) { let on_negotiation_needed: Box<dyn FnMut()> = Box::new(move || { debug!("on negotiation needed event occurred"); }); let on_negotiation_needed = Closure::wrap(on_negotiation_needed); peer_connection.set_onnegotiationneeded(Some(on_negotiation_needed.as_ref().unchecked_ref())); on_negotiation_needed.forget(); } pub fn set_peer_connection_on_ice_gathering_state_change(peer_connection: &RtcPeerConnection) { let peer_connection_clone = peer_connection.clone(); let on_ice_gathering_state_change: Box<dyn FnMut()> = Box::new(move || { debug!( "ice gathering state: {:?}", peer_connection_clone.ice_gathering_state() ); }); let on_ice_gathering_state_change = Closure::wrap(on_ice_gathering_state_change); peer_connection.set_onicegatheringstatechange(Some( on_ice_gathering_state_change.as_ref().unchecked_ref(), )); on_ice_gathering_state_change.forget(); } #[cfg(test)] mod test { use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; use web_sys::{RtcIceConnectionState, RtcIceGatheringState}; use super::*; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn test_create_stun_peer_connection_is_successful() { let peer_connection = create_peer_connection(&ConnectionType::Local) .expect("creating peer connection failed!"); assert_eq!( peer_connection.ice_connection_state(), RtcIceConnectionState::New ); assert_eq!( peer_connection.ice_gathering_state(), RtcIceGatheringState::New ); } #[wasm_bindgen_test] async fn test_create_sdp_offer_is_successful() { let peer_connection = RtcPeerConnection::new().expect("failed to create peer connection"); let _offer = create_sdp_offer(&peer_connection) .await .expect("failed to create SDP offer"); assert!(peer_connection.local_description().is_some()); } #[wasm_bindgen_test] async fn test_create_sdp_answer_is_successful() { let peer_connection = RtcPeerConnection::new().expect("failed to create peer connection"); let offer = create_sdp_offer(&peer_connection) .await .expect("failed to create SDP offer"); let _answer = create_sdp_answer(&peer_connection, offer) .await .expect("failed to create SDP answer"); assert!(peer_connection.local_description().is_some()); assert!(peer_connection.remote_description().is_some()); } }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/constants.rs
library/src/constants.rs
pub(crate) const DEFAULT_MAX_RETRANSMITS: u16 = 10;
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/one_to_many/callbacks.rs
library/src/one_to_many/callbacks.rs
use js_sys::Uint8Array; use log::{debug, error, info}; use serde::de::DeserializeOwned; use wasm_bindgen::closure::Closure; use wasm_bindgen::{JsCast, JsValue}; use wasm_peers_protocol::one_to_many::SignalMessage; use wasm_peers_protocol::{IceCandidate, SessionId, UserId}; use web_sys::{ MessageEvent, RtcDataChannel, RtcDataChannelEvent, RtcPeerConnection, RtcPeerConnectionIceEvent, WebSocket, }; use crate::one_to_many::{websocket_handler, NetworkManager}; /// Also calls: /// * `set_data_channel_on_open` /// * `set_data_channel_on_message` /// * `set_data_channel_on_error` pub fn set_peer_connection_on_data_channel<T: DeserializeOwned>( peer_connection: &RtcPeerConnection, client_id: UserId, network_manager: NetworkManager, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, ) { let on_open_callback_clone = on_open_callback; let on_message_callback_clone = on_message_callback; let on_datachannel: Box<dyn FnMut(RtcDataChannelEvent)> = Box::new(move |data_channel_event: RtcDataChannelEvent| { info!("received data channel"); let data_channel = data_channel_event.channel(); set_data_channel_on_open(&data_channel, client_id, on_open_callback_clone.clone()); set_data_channel_on_error(&data_channel); set_data_channel_on_message( &data_channel, client_id, on_message_callback_clone.clone(), ); if let Some(connection) = network_manager .inner .borrow_mut() .connections .get_mut(&client_id) { connection.data_channel = Some(data_channel); } else { error!("no connection for given id: {}", client_id); } }); let on_datachannel = Closure::wrap(on_datachannel); peer_connection.set_ondatachannel(Some(on_datachannel.as_ref().unchecked_ref())); on_datachannel.forget(); } /// handle message sent by signaling server pub fn set_websocket_on_message<T: DeserializeOwned>( websocket: &WebSocket, network_manager: NetworkManager, max_retransmits: u16, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, is_host: bool, ) { let on_message_callback = { let websocket = websocket.clone(); let on_message_callback: Box<dyn FnMut(MessageEvent)> = Box::new(move |ev: MessageEvent| { let Ok(message) = ev.data().dyn_into::<Uint8Array>().map(|v| v.to_vec()) else { error!("failed to convert message to Uint8Array"); return; }; let Ok(message) = rmp_serde::from_slice(message.as_slice()) else { error!("failed to deserialize message"); return; }; let network_manager = network_manager.clone(); let websocket = websocket.clone(); let on_open_callback_clone = on_open_callback.clone(); let on_message_callback_clone = on_message_callback.clone(); wasm_bindgen_futures::spawn_local(async move { if let Err(err) = websocket_handler::handle_websocket_message( network_manager, message, websocket, max_retransmits, on_open_callback_clone, on_message_callback_clone, is_host, ) .await { error!("failed to handle websocket message: {}", err); } }); }); Closure::wrap(on_message_callback) }; websocket.set_onmessage(Some(on_message_callback.as_ref().unchecked_ref())); on_message_callback.forget(); } /// once web socket is open, send a request to start or join a session pub fn set_websocket_on_open(websocket: &WebSocket, session_id: SessionId, is_host: bool) { let on_open_callback = { let websocket = websocket.clone(); let on_open_callback: Box<dyn FnMut(JsValue)> = Box::new(move |_| { let signal_message = SignalMessage::SessionJoin(session_id, is_host); let signal_message = rmp_serde::to_vec(&signal_message).expect("failed serializing SignalMessage"); if let Err(err) = websocket.send_with_u8_array(&signal_message) { error!("failed to send signal message: {:?}", err); } }); Closure::wrap(on_open_callback) }; websocket.set_onopen(Some(on_open_callback.as_ref().unchecked_ref())); on_open_callback.forget(); } pub fn set_data_channel_on_message<T: DeserializeOwned>( data_channel: &RtcDataChannel, client_id: UserId, mut on_message_callback: impl FnMut(UserId, T) + 'static, ) { let on_message_callback: Box<dyn FnMut(MessageEvent)> = Box::new(move |ev: MessageEvent| { let message = ev.data().dyn_into::<Uint8Array>().ok(); if let Some(message) = message.and_then(|t| rmp_serde::from_slice(&t.to_vec()).ok()) { debug!("message from datachannel (will call on_message)"); on_message_callback(client_id, message); } }); let on_message_callback = Closure::wrap(on_message_callback); data_channel.set_onmessage(Some(on_message_callback.as_ref().unchecked_ref())); on_message_callback.forget(); } pub fn set_data_channel_on_error(data_channel: &RtcDataChannel) { let on_error: Box<dyn FnMut(JsValue)> = Box::new(move |data_channel_error| { error!("data channel error: {:?}", data_channel_error); }); let on_error = Closure::wrap(on_error); data_channel.set_onerror(Some(on_error.as_ref().unchecked_ref())); on_error.forget(); } pub fn set_data_channel_on_open( data_channel: &RtcDataChannel, client_id: UserId, mut on_open_callback: impl FnMut(UserId) + 'static, ) { let on_open_callback: Box<dyn FnMut(JsValue)> = Box::new(move |_| { debug!("data channel is now open, calling on_open!"); on_open_callback(client_id); }); let on_open_callback = Closure::wrap(on_open_callback); data_channel.set_onopen(Some(on_open_callback.as_ref().unchecked_ref())); on_open_callback.forget(); } pub fn set_peer_connection_on_ice_connection_state_change(peer_connection: &RtcPeerConnection) { let peer_connection_clone = peer_connection.clone(); let on_ice_connection_state_change: Box<dyn FnMut()> = Box::new(move || { debug!( "connection state change: {:?}", peer_connection_clone.ice_connection_state() ); }); let on_ice_connection_state_change = Closure::wrap(on_ice_connection_state_change); peer_connection.set_oniceconnectionstatechange(Some( on_ice_connection_state_change.as_ref().unchecked_ref(), )); on_ice_connection_state_change.forget(); } pub fn set_peer_connection_on_ice_candidate( peer_connection: &RtcPeerConnection, client_id: UserId, websocket_clone: WebSocket, session_id_clone: SessionId, ) { let on_ice_candidate: Box<dyn FnMut(RtcPeerConnectionIceEvent)> = Box::new(move |ev: RtcPeerConnectionIceEvent| { if let Some(candidate) = ev.candidate() { let signaled_candidate = IceCandidate { candidate: candidate.candidate(), sdp_mid: candidate.sdp_mid(), sdp_m_line_index: candidate.sdp_m_line_index(), }; debug!("signaled candidate: {:#?}", signaled_candidate); let signal_message = SignalMessage::IceCandidate(session_id_clone, client_id, signaled_candidate); let signal_message = rmp_serde::to_vec(&signal_message).expect("failed to serialize SignalMessage"); if let Err(err) = websocket_clone.send_with_u8_array(&signal_message) { error!("failed to send one of the ICE candidates: {:?}", err); } } }); let on_ice_candidate = Closure::wrap(on_ice_candidate); peer_connection.set_onicecandidate(Some(on_ice_candidate.as_ref().unchecked_ref())); on_ice_candidate.forget(); }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/one_to_many/websocket_handler.rs
library/src/one_to_many/websocket_handler.rs
use anyhow::anyhow; use log::{debug, error, info}; use serde::de::DeserializeOwned; use wasm_bindgen_futures::JsFuture; use wasm_peers_protocol::one_to_many::SignalMessage; use wasm_peers_protocol::{SessionId, UserId}; use web_sys::{ RtcDataChannelInit, RtcIceCandidate, RtcIceCandidateInit, RtcSdpType, RtcSessionDescriptionInit, WebSocket, }; use crate::one_to_many::callbacks::{ set_data_channel_on_error, set_data_channel_on_message, set_data_channel_on_open, set_peer_connection_on_data_channel, set_peer_connection_on_ice_candidate, set_peer_connection_on_ice_connection_state_change, }; use crate::one_to_many::{Connection, NetworkManager}; use crate::utils::{ create_peer_connection, create_sdp_answer, create_sdp_offer, set_peer_connection_on_ice_gathering_state_change, set_peer_connection_on_negotiation_needed, }; /// Basically a finite state machine spread across host, client and signaling server /// handling each step in session and then `WebRTC` setup. pub async fn handle_websocket_message<T: DeserializeOwned>( network_manager: NetworkManager, message: SignalMessage, websocket: WebSocket, max_retransmits: u16, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, is_host: bool, ) -> crate::Result<()> { match message { SignalMessage::SessionJoin(_session_id, _peer_id) => { error!("error, SessionStartOrJoin should only be sent by peers to signaling server"); } SignalMessage::SessionReady(session_id, peer_id) => { session_ready( network_manager, websocket, max_retransmits, on_open_callback, on_message_callback, is_host, session_id, peer_id, ) .await?; } SignalMessage::SdpOffer(session_id, peer_id, offer) => { sdp_offer( network_manager, websocket, on_open_callback, on_message_callback, is_host, session_id, peer_id, offer, ) .await?; } SignalMessage::SdpAnswer(session_id, user_id, answer) => { let peer_connection = network_manager .inner .borrow() .connections .get(&user_id) .map(Clone::clone) .map(|connection| connection.peer_connection) .ok_or_else(|| { anyhow!( "(is_host: {}) no connection to send answer for given user_id: {:?}", is_host, &user_id ) })?; let mut remote_session_description = RtcSessionDescriptionInit::new(RtcSdpType::Answer); remote_session_description.sdp(&answer); JsFuture::from(peer_connection.set_remote_description(&remote_session_description)) .await .expect("failed to set remote description"); debug!( "received answer from peer and set remote description: {}, {:?}", answer, session_id ); } SignalMessage::IceCandidate(_session_id, user_id, ice_candidate) => { let peer_connection = network_manager .inner .borrow() .connections .get(&user_id) .map(Clone::clone) .map(|connection| connection.peer_connection) .ok_or_else(|| { anyhow!( "no connection to send ice candidate to for given user_id: {:?}", &user_id ) })?; debug!("peer received ice candidate: {:?}", &ice_candidate); let mut rtc_candidate = RtcIceCandidateInit::new(""); rtc_candidate.candidate(&ice_candidate.candidate); rtc_candidate.sdp_m_line_index(ice_candidate.sdp_m_line_index); rtc_candidate.sdp_mid(ice_candidate.sdp_mid.as_deref()); let rtc_candidate = RtcIceCandidate::new(&rtc_candidate) .map_err(|err| anyhow!("failed to create RTC ICE candidate: {:?}", err))?; JsFuture::from( peer_connection.add_ice_candidate_with_opt_rtc_ice_candidate(Some(&rtc_candidate)), ) .await .map_err(|err| { anyhow!( "failed to add ice candidate with optional RTC ICE candidate: {:?}", err ) })?; debug!("added ice candidate {:?}", ice_candidate); } SignalMessage::Error(session_id, user_id, error) => { error!( "signaling server returned error: session id: {session_id:?}, user_id: \ {user_id:?}, error: {error}", ); } } Ok(()) } #[allow(clippy::too_many_arguments)] async fn session_ready<T: DeserializeOwned>( network_manager: NetworkManager, websocket: WebSocket, max_retransmits: u16, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, is_host: bool, session_id: SessionId, peer_id: UserId, ) -> crate::Result<()> { info!( "peer received info that session with {:?} is ready {:?}", peer_id, session_id ); let peer_connection = create_peer_connection(&network_manager.inner.borrow().connection_type)?; set_peer_connection_on_data_channel( &peer_connection, peer_id, network_manager.clone(), on_open_callback.clone(), on_message_callback.clone(), ); set_peer_connection_on_ice_candidate(&peer_connection, peer_id, websocket.clone(), session_id); set_peer_connection_on_ice_connection_state_change(&peer_connection); set_peer_connection_on_ice_gathering_state_change(&peer_connection); set_peer_connection_on_negotiation_needed(&peer_connection); let mut init = RtcDataChannelInit::new(); init.max_retransmits(max_retransmits); init.ordered(false); let data_channel = peer_connection .create_data_channel_with_data_channel_dict(&format!("{}-{}", session_id, peer_id), &init); set_data_channel_on_open(&data_channel, peer_id, on_open_callback.clone()); set_data_channel_on_error(&data_channel); set_data_channel_on_message(&data_channel, peer_id, on_message_callback.clone()); let offer = create_sdp_offer(&peer_connection).await?; let signal_message = SignalMessage::SdpOffer(session_id, peer_id, offer); let signal_message = rmp_serde::to_vec(&signal_message)?; websocket .send_with_u8_array(&signal_message) .map_err(|err| anyhow!("failed to send message across the websocket: {:?}", err))?; network_manager.inner.borrow_mut().connections.insert( peer_id, Connection::new(peer_connection.clone(), Some(data_channel.clone())), ); debug!( "(is_host: {}) sent an offer to {:?} successfully", is_host, peer_id ); Ok(()) } #[allow(clippy::too_many_arguments)] async fn sdp_offer<T: DeserializeOwned>( network_manager: NetworkManager, websocket: WebSocket, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, is_host: bool, session_id: SessionId, peer_id: UserId, offer: String, ) -> crate::Result<()> { // non-host peer received an offer let peer_connection = create_peer_connection(&network_manager.inner.borrow().connection_type)?; set_peer_connection_on_data_channel( &peer_connection, peer_id, network_manager.clone(), on_open_callback.clone(), on_message_callback.clone(), ); set_peer_connection_on_ice_candidate(&peer_connection, peer_id, websocket.clone(), session_id); set_peer_connection_on_ice_connection_state_change(&peer_connection); set_peer_connection_on_ice_gathering_state_change(&peer_connection); set_peer_connection_on_negotiation_needed(&peer_connection); network_manager .inner .borrow_mut() .connections .insert(peer_id, Connection::new(peer_connection.clone(), None)); debug!( "(is_host: {}) added connection for {:?} successfully", is_host, peer_id ); let answer = create_sdp_answer(&peer_connection, offer) .await .expect("failed to create SDP answer"); debug!( "received an offer from {:?} and created an answer: {}", peer_id, answer ); let signal_message = SignalMessage::SdpAnswer(session_id, peer_id, answer); let signal_message = rmp_serde::to_vec(&signal_message).expect("failed to serialize SignalMessage"); websocket .send_with_u8_array(&signal_message) .expect("failed to send SPD answer to signaling server"); Ok(()) } // #[cfg(test)] // mod test { // use super::*; // use mockall::mock; // use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; // use web_sys::{RtcIceConnectionState, RtcIceGatheringState}; // use wasm_peers_protocol::SessionId; // // wasm_bindgen_test_configure!(run_in_browser); // // mock! { // WebSocket {} // } // // // #[wasm_bindgen_test] // async fn test_handle_session_ready_signal_is_successful() { // let message = SignalMessage::SessionReady(SessionId::new("dummy-session-id".to_string()), true); // let peer_connection = RtcPeerConnection::new().unwrap(); // // // TODO(tkarwowski): this should be mocked, but how do you pass a mock to a function expecting different type? // // I could introduce a trait, implement it for web_sys::WebSocket and MockWebSocket as well, // // but that's a lot of work... // // This is a integration test for now. // let websocket = WebSocket::new("ws://0.0.0.0:9001/ws") // .expect("local signaling server instance was not found"); // websocket.set_binary_type(web_sys::BinaryType::Arraybuffer); // // // FIXME(tkarwowski): this fails because peer_connection state gets modified in other tests // handle_websocket_message(message, peer_connection.clone(), websocket) // .await // .unwrap(); // assert!(peer_connection.local_description().is_some()); // } // }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/one_to_many/mod.rs
library/src/one_to_many/mod.rs
/*! Library module for the one-to-many topology in client-server architecture. There can be exactly one instance of [`MiniServer`] and arbitrary number of [`MiniClient`]'s connected to the same session. A [`RtcPeerConnection`] with an accompanying [`RtcDataChannel`] will be established between the [`MiniServer`] and each of the [`MiniClient`]'s. [`MiniServer`] can decide whether to send a message to a single peer, identified by [`UserId`] returned by signaling server during connection establishment method, with [`MiniServer::send_message`], or to fire to all clients with [`MiniServer::send_message_to_all`]. [`MiniClient`] only has an option to message the host with [`MiniClient::send_message_to_host`]. # Example This example shows three peers connecting, with one being a dedicated host. Host waits for both peers to connect and only then sends `ping` messages to both and clients independently respond with `pong` messages. ``` use wasm_peers::one_to_many::{MiniClient, MiniServer}; use wasm_peers::ConnectionType; use std::cell::RefCell; use std::rc::Rc; use wasm_peers_protocol::SessionId; use web_sys::console; const SIGNALING_SERVER_URL: &str = "ws://0.0.0.0:9001/one-to-many"; const STUN_SERVER_URL: &str = "stun:openrelay.metered.ca:80"; let mut server = MiniServer::new( SIGNALING_SERVER_URL, SessionId::new(1), ConnectionType::Stun { urls: STUN_SERVER_URL.to_string() }, ) .unwrap(); let server_open_connections_count = Rc::new(RefCell::new(0)); let server_clone = server.clone(); let server_on_open = { let server_open_connections_count = server_open_connections_count.clone(); move |user_id| { console::log_1(&format!("connection to user established: {:?}", user_id).into()); *server_open_connections_count.borrow_mut() += 1; if *server_open_connections_count.borrow() == 2 { server_clone.send_message_to_all("ping!"); } } }; let server_on_message = { move |user_id, message: &str| { console::log_1( &format!( "server received message from client {:?}: {}", user_id, message ) .into(), ); } }; server.start(server_on_open, server_on_message); let client_generator = || { let mut client = MiniClient::new( SIGNALING_SERVER_URL, SessionId::new(1), ConnectionType::Stun { urls: STUN_SERVER_URL.to_string() }, ) .unwrap(); let client_on_open = || { /* do nothing */ }; let client_clone = client.clone(); let client_on_message = { move |message| { console::log_1(&format!("client received message: {}", message).into()); client_clone.send_message_to_host("pong!").unwrap(); } }; client.start(client_on_open, client_on_message); }; client_generator(); client_generator(); ``` */ mod callbacks; mod websocket_handler; use std::cell::RefCell; use std::collections::HashMap; use std::rc::Rc; use anyhow::anyhow; use serde::de::DeserializeOwned; use serde::Serialize; use wasm_peers_protocol::{SessionId, UserId}; use web_sys::{RtcDataChannel, RtcPeerConnection, WebSocket}; use crate::constants::DEFAULT_MAX_RETRANSMITS; use crate::one_to_many::callbacks::{set_websocket_on_message, set_websocket_on_open}; use crate::ConnectionType; #[derive(Debug, Clone)] struct Connection { peer_connection: RtcPeerConnection, data_channel: Option<RtcDataChannel>, } impl Connection { const fn new(peer_connection: RtcPeerConnection, data_channel: Option<RtcDataChannel>) -> Self { Self { peer_connection, data_channel, } } } #[derive(Debug)] struct NetworkManagerInner { session_id: SessionId, websocket: WebSocket, connection_type: ConnectionType, is_host: bool, connections: HashMap<UserId, Connection>, } #[derive(Debug, Clone)] pub struct NetworkManager { inner: Rc<RefCell<NetworkManagerInner>>, } impl NetworkManager { /// Creates an instance with all resources required to create a connection. /// Requires an address of an signaling server instance, /// session id by which it will identify connecting pair of peers and type of connection. /// /// # Errors /// This function errs if opening a `WebSocket` connection to URL provided by `signaling_server_url` fails. pub fn new( signaling_server_url: &str, session_id: SessionId, connection_type: ConnectionType, is_host: bool, ) -> crate::Result<Self> { let websocket = WebSocket::new(signaling_server_url).map_err(|err| { anyhow!( "failed to create connection with signaling server on {}: {:?}", signaling_server_url, err ) })?; websocket.set_binary_type(web_sys::BinaryType::Arraybuffer); Ok(Self { inner: Rc::new(RefCell::new(NetworkManagerInner { session_id, websocket, connection_type, is_host, connections: HashMap::new(), })), }) } pub fn start<T: DeserializeOwned>( &mut self, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, ) { self.start_with_retransmits( DEFAULT_MAX_RETRANSMITS, on_open_callback, on_message_callback, ); } pub fn start_with_retransmits<T: DeserializeOwned>( &mut self, max_retransmits: u16, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, ) { let websocket = self.inner.borrow().websocket.clone(); let session_id = self.inner.borrow().session_id; let is_host = self.inner.borrow().is_host; set_websocket_on_open(&websocket, session_id, is_host); set_websocket_on_message( &websocket, self.clone(), max_retransmits, on_open_callback, on_message_callback, is_host, ); } /// Send message to a connected client-user identified by unique [`UserId`] /// /// # Errors /// This function can err if: /// - sending of the message was tried before data channel was established or, /// - sending of the message failed. pub fn send_message<T: Serialize + ?Sized>( &self, user_id: UserId, message: &T, ) -> crate::Result<()> { let message = rmp_serde::to_vec(message)?; self.inner .borrow() .connections .get(&user_id) .ok_or_else(|| anyhow!("no connection for user {}", user_id))? .data_channel .as_ref() .ok_or_else(|| anyhow!("no data channel setup yet for user {}", user_id))? .send_with_u8_array(&message) .map_err(|err| anyhow!("failed to send string: {:?}", err)) } /// Send message to a all connected client-users. /// /// # Errors /// It might fail if the connection is not yet set up /// and thus should only be called after `on_open_callback` triggers. /// Otherwise it will result in an error: /// - if sending of the message was tried before data channel was established or, /// - if sending of the message failed. pub fn send_message_to_all<T: Serialize + ?Sized>(&self, message: &T) -> crate::Result<()> { let message = rmp_serde::to_vec(message)?; for data_channel in self .inner .borrow() .connections .values() .filter_map(|connection| connection.data_channel.as_ref()) { // TODO(tkarwowski): some may fail, should we return a list results? let _result = data_channel.send_with_u8_array(&message); } Ok(()) } } /// Abstraction over `WebRTC` peer-to-peer connection. /// Structure representing server in client-server topology. /// /// `WebRTC` data channel communication abstracted to a single class. /// All setup is handled internally, you must only provide callbacks /// for when the connection opens and for handling incoming messages. /// It also provides a method of sending data to the other end of the connection. /// /// Only works with [wasm-peers-signaling-server](https://docs.rs/wasm-peers-signaling-server/latest/wasm_peers_signaling_server/) instance, /// whose full address must be provided. /// /// Start-up flow is divided into two methods [`MiniServer::new`] and [`MiniServer::start`] /// to allow possibility of referring to network manger itself from the callbacks. /// /// This class is a pointer to the underlying resource and can be cloned freely. #[derive(Debug, Clone)] pub struct MiniServer { inner: NetworkManager, } impl MiniServer { /// Creates an instance with all resources required to create a connections to client-peers. /// Requires an address of an signaling server instance, /// session id by which it will identify connecting pair of peers and type of connection. /// /// # Errors /// This function errs if opening a `WebSocket` connection to URL provided by `signaling_server_url` fails. pub fn new( signaling_server_url: &str, session_id: SessionId, connection_type: ConnectionType, ) -> crate::Result<Self> { Ok(Self { inner: NetworkManager::new(signaling_server_url, session_id, connection_type, true)?, }) } /// Second part of the setup that begins the actual connection. /// Requires specifying a callbacks that are guaranteed to run /// when the connection opens and on each message received. /// It takes [`UserId`] as an argument which helps identify which client-peer. pub fn start<T: DeserializeOwned>( &mut self, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, ) { self.inner.start(on_open_callback, on_message_callback); } pub fn start_with_retransmits<T: DeserializeOwned>( &mut self, max_retransmits: u16, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, ) { self.inner .start_with_retransmits(max_retransmits, on_open_callback, on_message_callback); } /// Sends message over established data channel with a single client-peer represented by /// the [`UserId`] returned by signaling server during connection establishment. /// /// # Errors /// It might fail if the connection is not yet set up /// and thus should only be called after `on_open_callback` triggers. /// Otherwise it will result in an error: /// - if sending of the message was tried before data channel was established or, /// - if sending of the message failed. pub fn send_message<T: Serialize + ?Sized>( &self, user_id: UserId, message: &T, ) -> crate::Result<()> { self.inner.send_message(user_id, message) } /// Convenience function that sends the same message to all connected client-peers. /// /// # Errors /// It might fail if the connection is not yet set up /// and thus should only be called after `on_open_callback` triggers. /// Otherwise it will result in an error: /// - if sending of the message was tried before data channel was established or, /// - if sending of the message failed. pub fn send_message_to_all<T: Serialize + ?Sized>(&self, message: &T) -> crate::Result<()> { self.inner.send_message_to_all(message) } } /// Abstraction over `WebRTC` peer-to-peer connection. /// Same as [`MiniServer`], but representing clients in client-server topology. #[derive(Debug, Clone)] pub struct MiniClient { inner: NetworkManager, } impl MiniClient { /// Same as [`MiniServer::new`] /// /// # Errors /// This function errs if opening a `WebSocket` connection to URL provided by `signaling_server_url` fails. pub fn new( signaling_server_url: &str, session_id: SessionId, connection_type: ConnectionType, ) -> crate::Result<Self> { Ok(Self { inner: NetworkManager::new(signaling_server_url, session_id, connection_type, false)?, }) } pub fn start<T: DeserializeOwned>( &mut self, mut on_open_callback: impl FnMut() + Clone + 'static, mut on_message_callback: impl FnMut(T) + Clone + 'static, ) { let on_open_callback = move |_| on_open_callback(); let on_message_callback = move |_, message| on_message_callback(message); self.inner.start(on_open_callback, on_message_callback); } /// Same as [`MiniServer::start`], but callbacks don't take `UserId` argument, as it will always be host. pub fn start_with_retransmits<T: DeserializeOwned>( &mut self, max_retransmits: u16, mut on_open_callback: impl FnMut() + Clone + 'static, mut on_message_callback: impl FnMut(T) + Clone + 'static, ) { let on_open_callback = move |_| on_open_callback(); let on_message_callback = move |_, message| on_message_callback(message); self.inner .start_with_retransmits(max_retransmits, on_open_callback, on_message_callback); } /// Way of communicating with peer-server /// Send message to the other end of the connection. /// It might fail if the connection is not yet set up /// and thus should only be called after `on_open_callback` triggers. /// Otherwise it will result in an error. /// /// # Errors /// It might fail if the connection is not yet set up /// and thus should only be called after `on_open_callback` triggers. /// Otherwise it will result in an error: /// - if sending of the message was tried before data channel was established or, /// - if sending of the message failed. pub fn send_message_to_host<T: Serialize + ?Sized>(&self, message: &T) -> crate::Result<()> { self.inner.send_message_to_all(message) } }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/one_to_one/callbacks.rs
library/src/one_to_one/callbacks.rs
use js_sys::Uint8Array; use log::{debug, error, info}; use serde::de::DeserializeOwned; use wasm_bindgen::closure::Closure; use wasm_bindgen::{JsCast, JsValue}; use wasm_peers_protocol::one_to_one::SignalMessage; use wasm_peers_protocol::{IceCandidate, SessionId}; use web_sys::{ MessageEvent, RtcDataChannel, RtcDataChannelEvent, RtcPeerConnection, RtcPeerConnectionIceEvent, WebSocket, }; use crate::one_to_one::{websocket_handler, NetworkManager}; /// also calls: /// * `set_data_channel_on_open` /// * `set_data_channel_on_message` /// * `set_data_channel_on_error` pub fn set_peer_connection_on_data_channel<T: DeserializeOwned>( peer_connection: &RtcPeerConnection, network_manager: NetworkManager, on_open_callback: impl FnMut() + Clone + 'static, on_message_callback: impl FnMut(T) + Clone + 'static, ) { let on_datachannel: Box<dyn FnMut(RtcDataChannelEvent)> = Box::new(move |data_channel_event: RtcDataChannelEvent| { info!("received data channel"); let data_channel = data_channel_event.channel(); set_data_channel_on_open(&data_channel, on_open_callback.clone()); set_data_channel_on_error(&data_channel); set_data_channel_on_message(&data_channel, on_message_callback.clone()); network_manager.inner.borrow_mut().data_channel = Some(data_channel); }); let on_datachannel = Closure::wrap(on_datachannel); peer_connection.set_ondatachannel(Some(on_datachannel.as_ref().unchecked_ref())); on_datachannel.forget(); } /// handle message sent by signaling server pub fn set_websocket_on_message(websocket: &WebSocket, peer_connection: RtcPeerConnection) { { let on_message_callback = { let websocket = websocket.clone(); let peer_connection = peer_connection; let on_message_callback: Box<dyn FnMut(MessageEvent)> = Box::new(move |ev: MessageEvent| { let Ok(message) = ev.data().dyn_into::<Uint8Array>().map(|v| v.to_vec()) else { error!("failed to convert message to Uint8Array"); return; }; let Ok(message) = rmp_serde::from_slice(message.as_slice()) else { error!("failed to deserialize message"); return; }; let websocket_clone = websocket.clone(); let peer_connection_clone = peer_connection.clone(); wasm_bindgen_futures::spawn_local(async move { if let Err(err) = websocket_handler::handle_websocket_message( message, peer_connection_clone, websocket_clone, ) .await { error!("error handling websocket message: {:?}", err); } }); }); Closure::wrap(on_message_callback) }; websocket.set_onmessage(Some(on_message_callback.as_ref().unchecked_ref())); on_message_callback.forget(); } } /// once web socket is open, send a request to start or join a session pub fn set_websocket_on_open(websocket: &WebSocket, session_id: SessionId) { { let websocket_clone = websocket.clone(); let on_open_callback: Box<dyn FnMut(JsValue)> = Box::new(move |_| { let signal_message = SignalMessage::SessionJoin(session_id); let signal_message = rmp_serde::to_vec(&signal_message).expect("failed serializing SignalMessage"); websocket_clone .send_with_u8_array(&signal_message) .expect("failed sending start-or-join message to the websocket"); }); let on_open_callback = Closure::wrap(on_open_callback); websocket.set_onopen(Some(on_open_callback.as_ref().unchecked_ref())); on_open_callback.forget(); } } pub fn set_data_channel_on_message<T: DeserializeOwned>( data_channel: &RtcDataChannel, mut on_message_callback: impl FnMut(T) + 'static, ) { let on_message_callback: Box<dyn FnMut(MessageEvent)> = Box::new(move |ev: MessageEvent| { let message = ev.data().dyn_into::<Uint8Array>().ok(); if let Some(message) = message.and_then(|t| rmp_serde::from_slice(&t.to_vec()).ok()) { debug!("message from datachannel (will call on_message)"); on_message_callback(message); } }); let on_message_callback = Closure::wrap(on_message_callback); data_channel.set_onmessage(Some(on_message_callback.as_ref().unchecked_ref())); on_message_callback.forget(); } pub fn set_data_channel_on_error(data_channel: &RtcDataChannel) { let on_error: Box<dyn FnMut(JsValue)> = Box::new(move |data_channel_error| { error!("data channel error: {:?}", data_channel_error); }); let on_error = Closure::wrap(on_error); data_channel.set_onerror(Some(on_error.as_ref().unchecked_ref())); on_error.forget(); } pub fn set_data_channel_on_open( data_channel: &RtcDataChannel, mut on_open_callback: impl FnMut() + 'static, ) { let on_open_callback: Box<dyn FnMut(JsValue)> = Box::new(move |_| { debug!("data channel is now open, calling on_open!"); on_open_callback(); }); let on_open_callback = Closure::wrap(on_open_callback); data_channel.set_onopen(Some(on_open_callback.as_ref().unchecked_ref())); on_open_callback.forget(); } pub fn set_peer_connection_on_ice_connection_state_change(peer_connection: &RtcPeerConnection) { let peer_connection_clone = peer_connection.clone(); let on_ice_connection_state_change: Box<dyn FnMut()> = Box::new(move || { debug!( "connection state change: {:?}", peer_connection_clone.ice_connection_state() ); }); let on_ice_connection_state_change = Closure::wrap(on_ice_connection_state_change); peer_connection.set_oniceconnectionstatechange(Some( on_ice_connection_state_change.as_ref().unchecked_ref(), )); on_ice_connection_state_change.forget(); } pub fn set_peer_connection_on_ice_candidate( peer_connection: &RtcPeerConnection, websocket: WebSocket, session_id: SessionId, ) { let on_ice_candidate: Box<dyn FnMut(RtcPeerConnectionIceEvent)> = Box::new(move |ev: RtcPeerConnectionIceEvent| { if let Some(candidate) = ev.candidate() { let signaled_candidate = IceCandidate { candidate: candidate.candidate(), sdp_mid: candidate.sdp_mid(), sdp_m_line_index: candidate.sdp_m_line_index(), }; debug!("signaled candidate: {:#?}", signaled_candidate); let signal_message = SignalMessage::IceCandidate(session_id, signaled_candidate); let signal_message = rmp_serde::to_vec(&signal_message).expect("failed to serialize SignalMessage"); websocket .send_with_u8_array(&signal_message) .unwrap_or_else(|_| error!("failed to send one of the ICE candidates")); } }); let on_ice_candidate = Closure::wrap(on_ice_candidate); peer_connection.set_onicecandidate(Some(on_ice_candidate.as_ref().unchecked_ref())); on_ice_candidate.forget(); }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/one_to_one/websocket_handler.rs
library/src/one_to_one/websocket_handler.rs
use ::log::{debug, error, info}; use anyhow::anyhow; use wasm_bindgen_futures::JsFuture; use wasm_peers_protocol::one_to_one::SignalMessage; use web_sys::{ RtcIceCandidate, RtcIceCandidateInit, RtcPeerConnection, RtcSdpType, RtcSessionDescriptionInit, WebSocket, }; use crate::utils::{create_sdp_answer, create_sdp_offer}; /// Basically a state spread across host, client and signaling server, /// handling each step in session and then `WebRTC` setup. pub async fn handle_websocket_message( message: SignalMessage, peer_connection: RtcPeerConnection, websocket: WebSocket, ) -> crate::Result<()> { match message { SignalMessage::SessionJoin(_session_id) => { error!("error, SessionStartOrJoin should only be sent by peers to signaling server"); } SignalMessage::SessionReady(session_id, is_host) => { info!("peer received info that session is ready {:?}", session_id); if is_host { let offer = create_sdp_offer(&peer_connection).await?; let signal_message = SignalMessage::SdpOffer(session_id, offer); let signal_message = rmp_serde::to_vec(&signal_message)?; websocket .send_with_u8_array(&signal_message) .map_err(|err| { anyhow!("failed to send message across the websocket: {:?}", err) })?; debug!("(is_host: {}) sent an offer successfully", is_host); } } SignalMessage::SdpOffer(session_id, offer) => { let answer = create_sdp_answer(&peer_connection, offer).await?; debug!("received an offer and created an answer: {}", answer); let signal_message = SignalMessage::SdpAnswer(session_id, answer); let signal_message = rmp_serde::to_vec(&signal_message)?; if let Err(err) = websocket.send_with_u8_array(&signal_message) { error!("failed to send signal message: {:?}", err); } } SignalMessage::SdpAnswer(session_id, answer) => { let mut remote_session_description = RtcSessionDescriptionInit::new(RtcSdpType::Answer); remote_session_description.sdp(&answer); JsFuture::from(peer_connection.set_remote_description(&remote_session_description)) .await .expect("failed to set remote descripiton"); debug!( "received answer from peer and set remote description: {}, {:?}", answer, session_id ); } SignalMessage::IceCandidate(_session_id, ice_candidate) => { debug!("peer received ice candidate: {:?}", &ice_candidate); let mut rtc_candidate = RtcIceCandidateInit::new(""); rtc_candidate.candidate(&ice_candidate.candidate); rtc_candidate.sdp_m_line_index(ice_candidate.sdp_m_line_index); rtc_candidate.sdp_mid(ice_candidate.sdp_mid.as_deref()); let rtc_candidate = RtcIceCandidate::new(&rtc_candidate).expect("failed to create new RtcIceCandidate"); JsFuture::from( peer_connection.add_ice_candidate_with_opt_rtc_ice_candidate(Some(&rtc_candidate)), ) .await .expect("failed to add ICE candidate"); debug!("added ice candidate {:?}", ice_candidate); } SignalMessage::Error(session_id, error) => { error!( "signaling server returned error: session id: {:?}, error:{}", session_id, error ); } } Ok(()) } // // TODO(tkarwowski): uncomment once mocks work // #[cfg(test)] // mod test { // use super::*; // use mockall::mock; // use wasm_bindgen_test::wasm_bindgen_test_configure; // // use wasm_peers_protocol::SessionId; // // wasm_bindgen_test_configure!(run_in_browser); // // mock! { // WebSocket {} // } // // #[wasm_bindgen_test] // async fn test_handle_session_ready_signal_is_successful() { // let message = // SignalMessage::SessionReady(SessionId::new("dummy-session-id".to_string()), true); // let peer_connection = RtcPeerConnection::new().unwrap(); // // // TODO(tkarwowski): this should be mocked, but how do you pass a mock to a function expecting different type? // // I could introduce a trait, implement it for web_sys::WebSocket and MockWebSocket as well, // // but that's a lot of work... // // This is a integration test for now. // let websocket = WebSocket::new("ws://0.0.0.0:9001/ws") // .expect("local signaling server instance was not found"); // websocket.set_binary_type(web_sys::BinaryType::Arraybuffer); // // // FIXME(tkarwowski): this fails because peer_connection state gets modified in other tests // handle_websocket_message(message, peer_connection.clone(), websocket) // .await // .unwrap(); // assert!(peer_connection.local_description().is_some()); // } // }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/one_to_one/mod.rs
library/src/one_to_one/mod.rs
/*! Library module for one-to-one network topology in simple tunnel connection. After connection is established both peers are treated equally and have an opportunity to send messages with [`NetworkManager::send_message`] method. # Example This example shows two peers sending `ping` and `pong` messages to each other. ``` use wasm_peers::{ConnectionType, SessionId}; use wasm_peers::one_to_one::NetworkManager; use web_sys::console; const SIGNALING_SERVER_URL: &str = "ws://0.0.0.0:9001/one-to-one"; const STUN_SERVER_URL: &str = "stun:openrelay.metered.ca:80"; let session_id = SessionId::new(1); let mut peer1 = NetworkManager::new( SIGNALING_SERVER_URL, session_id.clone(), &ConnectionType::Stun { urls: STUN_SERVER_URL.to_string() }, ) .unwrap(); let peer1_clone = peer1.clone(); let peer1_on_open = move || peer1_clone.send_message("ping!").unwrap(); let peer1_on_message = { move |message| { console::log_1(&format!("peer1 received message: {}", message).into()); } }; peer1.start(peer1_on_open, peer1_on_message); let mut peer2 = NetworkManager::new( SIGNALING_SERVER_URL, session_id, &ConnectionType::Stun { urls: STUN_SERVER_URL.to_string() }, ) .unwrap(); let peer2_on_open = || { /* do nothing */ }; let peer2_clone = peer2.clone(); let peer2_on_message = { move |message| { console::log_1(&format!("peer2 received message: {}", message).into()); peer2_clone.send_message("pong!").unwrap(); } }; peer2.start(peer2_on_open, peer2_on_message); ``` */ use std::cell::RefCell; use std::rc::Rc; use anyhow::anyhow; use log::debug; use serde::de::DeserializeOwned; use serde::Serialize; use wasm_peers_protocol::SessionId; use web_sys::{RtcDataChannel, RtcDataChannelInit, RtcPeerConnection, WebSocket}; use crate::constants::DEFAULT_MAX_RETRANSMITS; use crate::one_to_one::callbacks::{ set_data_channel_on_error, set_data_channel_on_message, set_data_channel_on_open, set_peer_connection_on_data_channel, set_peer_connection_on_ice_candidate, set_peer_connection_on_ice_connection_state_change, set_websocket_on_message, set_websocket_on_open, }; use crate::utils::{ create_peer_connection, set_peer_connection_on_ice_gathering_state_change, set_peer_connection_on_negotiation_needed, ConnectionType, }; mod callbacks; mod websocket_handler; #[derive(Debug, Clone)] pub struct NetworkManagerInner { session_id: SessionId, websocket: WebSocket, peer_connection: RtcPeerConnection, pub data_channel: Option<RtcDataChannel>, } /// Abstraction over `WebRTC` peer-to-peer connection. /// Structure representing one of two equal peers. /// /// `WebRTC` data channel communication abstracted to a single class. /// All setup is handled internally, you must only provide callbacks /// for when the connection opens and for handling incoming messages. /// It also provides a method of sending data to the other end of the connection. /// /// Only works with [wasm-peers-signaling-server](https://docs.rs/wasm-peers-signaling-server/latest/wasm_peers_signaling_server/) instance, /// whose full address must be provided. /// /// Start-up flow is divided into two methods [`NetworkManager::new`] and [`NetworkManager::start`] /// to allow possibility of referring to network manger itself from the callbacks. /// /// This class is a pointer to the underlying resource and can be cloned freely. #[derive(Debug, Clone)] pub struct NetworkManager { pub inner: Rc<RefCell<NetworkManagerInner>>, } impl NetworkManager { /// Creates an instance with all resources required to create a connection. /// Requires an address of an signaling server instance, /// session id by which it will identify connecting pair of peers and type of connection. /// /// # Errors /// This function errs if opening a `WebSocket` connection to URL provided by `signaling_server_url` fails. pub fn new( signaling_server_url: &str, session_id: SessionId, connection_type: &ConnectionType, ) -> crate::Result<Self> { let peer_connection = create_peer_connection(connection_type)?; let websocket = WebSocket::new(signaling_server_url).map_err(|err| { anyhow!( "failed to create connection with signaling server on {}: {:?}", signaling_server_url, err ) })?; websocket.set_binary_type(web_sys::BinaryType::Arraybuffer); Ok(Self { inner: Rc::new(RefCell::new(NetworkManagerInner { session_id, websocket, peer_connection, data_channel: None, })), }) } /// Second part of the setup that begins the actual connection. /// Requires specifying a callbacks that are guaranteed to run /// when the connection opens and on each message received. pub fn start<T: DeserializeOwned>( &mut self, on_open_callback: impl FnMut() + Clone + 'static, on_message_callback: impl FnMut(T) + Clone + 'static, ) { self.start_with_retransmits( DEFAULT_MAX_RETRANSMITS, on_open_callback, on_message_callback, ); } pub fn start_with_retransmits<T: DeserializeOwned>( &mut self, max_retransmits: u16, on_open_callback: impl FnMut() + Clone + 'static, on_message_callback: impl FnMut(T) + Clone + 'static, ) { let NetworkManagerInner { websocket, peer_connection, session_id, .. } = self.inner.borrow().clone(); let mut init = RtcDataChannelInit::new(); init.max_retransmits(max_retransmits); init.ordered(false); let data_channel = peer_connection .create_data_channel_with_data_channel_dict(&session_id.to_string(), &init); debug!( "data_channel created with label: {:?}", data_channel.label() ); set_data_channel_on_open(&data_channel, on_open_callback.clone()); set_data_channel_on_error(&data_channel); set_data_channel_on_message(&data_channel, on_message_callback.clone()); self.inner.borrow_mut().data_channel = Some(data_channel); set_peer_connection_on_data_channel( &peer_connection, self.clone(), on_open_callback, on_message_callback, ); set_peer_connection_on_ice_candidate(&peer_connection, websocket.clone(), session_id); set_peer_connection_on_ice_connection_state_change(&peer_connection); set_peer_connection_on_ice_gathering_state_change(&peer_connection); set_peer_connection_on_negotiation_needed(&peer_connection); set_websocket_on_open(&websocket, session_id); set_websocket_on_message(&websocket, peer_connection); } fn datachannel(&self) -> crate::Result<RtcDataChannel> { Ok(self .inner .borrow() .data_channel .as_ref() .ok_or_else(|| anyhow!("no data channel set on instance yet"))? .clone()) } /// Send message to the other end of the connection. /// /// # Errors /// It might fail if the connection is not yet set up /// and thus should only be called after `on_open_callback` triggers. /// Otherwise it will result in an error: /// - if sending of the message was tried before data channel was established or, /// - if sending of the message failed. pub fn send_message<T: Serialize + ?Sized>(&self, message: &T) -> crate::Result<()> { let message = rmp_serde::to_vec(message)?; self.datachannel()? .send_with_u8_array(&message) .map_err(|err| anyhow!("failed to send string: {:?}", err)) } }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/src/many_to_many/mod.rs
library/src/many_to_many/mod.rs
/*! Library module for implementation of the many-to-many topology of peer communication. Each peer in session is an equal, with ability to send and receive messages from any other peer. Unlike with one-to-many topology, any peer can leave at any time without compromising the network. To identify peers you should store [`UserId`] accessible inside `on_open_callback` in some custom structure. Then you can use it in [`NetworkManager::send_message`] to specify exactly which peer should receive the message. # Example In this example we create 3 peers that all establish connection with each other. Each of the peers will send a `ping` message to each new connection. Also each peer will respond with a `pong` response. Overall we will expect 6 `ping` and 6 `pong` messages (3 connections, both peers in each). ``` use wasm_peers::many_to_many::NetworkManager; use wasm_peers::{ConnectionType, SessionId}; use std::cell::RefCell; use std::rc::Rc; use web_sys::console; // there should be a signaling server from accompanying crate listening on this port const SIGNALING_SERVER_URL: &str = "ws://0.0.0.0:9001/one-to-many"; const STUN_SERVER_URL: &str = "stun:openrelay.metered.ca:80"; let peer_generator = || { let mut peer = NetworkManager::new( SIGNALING_SERVER_URL, SessionId::new(1), ConnectionType::Stun { urls: STUN_SERVER_URL.to_string() }, ) .expect("failed to connect to signaling server"); let peer_on_open = { let peer = peer.clone(); move |user_id| { console::log_1(&format!("connection to peer established: {:?}", user_id).into()); if let Err(err) = peer.send_message(user_id, "ping!") { console::log_1(&format!("failed to send message: {:?}", err).into()); } } }; let peer_on_message = { let peer = peer.clone(); move |user_id, message: String| { console::log_1( &format!( "peer received message from other peer {:?}: {}", user_id, message ) .into(), ); if let Err(err) = peer.send_message(user_id, &"pong!".to_owned()) { console::log_1(&format!("failed to send message: {:?}", err).into()); } } }; peer.start(peer_on_open, peer_on_message); }; peer_generator(); peer_generator(); peer_generator(); ``` */ use serde::de::DeserializeOwned; use serde::Serialize; use wasm_peers_protocol::{SessionId, UserId}; use crate::one_to_many::NetworkManager as OneToManyNetworkManager; use crate::ConnectionType; /// Abstraction over `WebRTC` peer-to-peer connection. /// Structure representing equal peer in many-to-many topology. /// /// `WebRTC` data channel communication abstracted to a single class. /// All setup is handled internally, you must only provide callbacks /// for when the connection opens and for handling incoming messages. /// It also provides a method of sending data to the other end of the connection. /// /// Only works with [wasm-peers-signaling-server](https://docs.rs/wasm-peers-signaling-server/latest/wasm_peers_signaling_server/) instance, /// whose full address must be provided. /// /// Start-up flow is divided into two methods [`NetworkManager::new`] and [`NetworkManager::start`] /// to allow possibility of referring to network manger itself from the callbacks. /// /// This class is a pointer to the underlying resource and can be cloned freely. #[derive(Debug, Clone)] pub struct NetworkManager { inner: OneToManyNetworkManager, } impl NetworkManager { /// Creates an instance with all resources required to create a connections to other peers. /// Requires an address of an signaling server instance, /// session id by which it will identify connecting other peers and type of connection. /// /// # Errors /// This function errs if opening a `WebSocket` connection to URL provided by `signaling_server_url` fails. pub fn new( signaling_server_url: &str, session_id: SessionId, connection_type: ConnectionType, ) -> crate::Result<Self> { Ok(Self { inner: OneToManyNetworkManager::new( signaling_server_url, session_id, connection_type, true, )?, }) } /// Second part of the setup that begins the actual connection. /// Requires specifying a callbacks that are guaranteed to run /// when a new connection opens and on each message received. /// It takes [`UserId`] as an argument which helps identify sending peer. pub fn start<T: DeserializeOwned>( &mut self, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, ) { self.inner.start(on_open_callback, on_message_callback); } pub fn start_with_retransmits<T: DeserializeOwned>( &mut self, max_retransmits: u16, on_open_callback: impl FnMut(UserId) + Clone + 'static, on_message_callback: impl FnMut(UserId, T) + Clone + 'static, ) { self.inner .start_with_retransmits(max_retransmits, on_open_callback, on_message_callback); } /// Sends message over established data channel to a single peer represented by /// the [`UserId`] returned by signaling server during connection establishment. /// /// # Errors /// It might fail if the connection is not yet set up /// and thus should only be called after `on_open_callback` triggers. /// Otherwise it will result in an error: /// - if sending of the message was tried before data channel was established or, /// - if sending of the message failed. pub fn send_message<T: Serialize + ?Sized>( &self, user_id: UserId, message: &T, ) -> crate::Result<()> { self.inner.send_message(user_id, message) } /// Send message to a all connected client-users. /// /// # Errors /// It might fail if the connection is not yet set up /// and thus should only be called after `on_open_callback` triggers. /// Otherwise it will result in an error: /// - if sending of the message was tried before data channel was established or, /// - if sending of the message failed. pub fn send_message_to_all<T: Serialize + ?Sized>(&self, message: &T) -> crate::Result<()> { self.inner.send_message_to_all(message) } }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/tests/many_to_many.rs
library/tests/many_to_many.rs
//! Test suite for the Web and headless browsers. #![cfg(target_arch = "wasm32")] use std::cell::RefCell; use std::rc::Rc; use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; use wasm_peers::many_to_many::NetworkManager; use wasm_peers::{ConnectionType, SessionId}; use web_sys::console; const SIGNALING_SERVER_URL: &str = "ws://0.0.0.0:9001/one-to-many"; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn network_manager_starts_successfully() { let mut server = NetworkManager::new( SIGNALING_SERVER_URL, SessionId::new(1234), ConnectionType::Local, ) .unwrap(); server.start(|_| {}, |_, _: ()| {}); } #[wasm_bindgen_test] fn single_message_passes_between_all() { let opened_connections_count = Rc::new(RefCell::new(0)); let received_messages_count = Rc::new(RefCell::new(0)); let peer_generator = || { let mut server = NetworkManager::new( SIGNALING_SERVER_URL, SessionId::new(1234), ConnectionType::Local, ) .unwrap(); let server_clone = server.clone(); let opened_connections_count = opened_connections_count.clone(); let server_on_open = { move |user_id| { console::log_1(&format!("connection to user established: {:?}", user_id).into()); *opened_connections_count.borrow_mut() += 1; server_clone .send_message(user_id, &"ping!".to_owned()) .unwrap(); } }; let server_clone = server.clone(); let received_messages_count = received_messages_count.clone(); let server_on_message = { move |user_id, message: String| { console::log_1( &format!( "server received message from client {:?}: {}", user_id, message ) .into(), ); *received_messages_count.borrow_mut() += 1; server_clone .send_message(user_id, &"pong!".to_owned()) .unwrap(); } }; server.start(server_on_open, server_on_message); }; peer_generator(); peer_generator(); peer_generator(); peer_generator(); // assert!(*client_received_message.borrow()); // assert!(*server_received_message.borrow()); }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/tests/one_to_many.rs
library/tests/one_to_many.rs
//! Test suite for the Web and headless browsers. #![cfg(target_arch = "wasm32")] use std::cell::RefCell; use std::rc::Rc; use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; use wasm_peers::one_to_many::{MiniClient, MiniServer}; use wasm_peers::{ConnectionType, SessionId}; use web_sys::console; const SIGNALING_SERVER_URL: &str = "ws://0.0.0.0:9001/one-to-many"; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn network_manager_starts_successfully() { let mut server = MiniServer::new( SIGNALING_SERVER_URL, SessionId::new(1234), ConnectionType::Local, ) .unwrap(); server.start(|_| {}, |_, _: ()| {}); } #[wasm_bindgen_test] fn single_message_passes_both_ways() { let server_received_message = Rc::new(RefCell::new(false)); let client_received_message = Rc::new(RefCell::new(false)); let mut server = MiniServer::new( SIGNALING_SERVER_URL, SessionId::new(1234), ConnectionType::Local, ) .unwrap(); let server_open_connections_count = Rc::new(RefCell::new(0)); let server_clone = server.clone(); let server_on_open = { move |user_id| { console::log_1(&format!("connection to user established: {:?}", user_id).into()); *server_open_connections_count.borrow_mut() += 1; if *server_open_connections_count.borrow() == 2 { server_clone .send_message_to_all(&"ping!".to_owned()) .unwrap(); } } }; let server_on_message = { move |user_id, message: String| { console::log_1( &format!( "server received message from client {:?}: {}", user_id, message ) .into(), ); *server_received_message.borrow_mut() = true; } }; server.start(server_on_open, server_on_message); let client_generator = || { let mut client = MiniClient::new( SIGNALING_SERVER_URL, SessionId::new(1234), ConnectionType::Local, ) .unwrap(); let client_on_open = || { /* do nothing */ }; let client_clone = client.clone(); let client_on_message = { let client_received_message = client_received_message.clone(); move |message: String| { console::log_1(&format!("client received message: {}", message).into()); client_clone .send_message_to_host(&"pong!".to_owned()) .unwrap(); *client_received_message.borrow_mut() = true; } }; client.start(client_on_open, client_on_message); }; client_generator(); client_generator(); // assert!(*client_received_message.borrow()); // assert!(*server_received_message.borrow()); }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/library/tests/one_to_one.rs
library/tests/one_to_one.rs
//! Test suite for the Web and headless browsers. #![cfg(target_arch = "wasm32")] use std::cell::RefCell; use std::rc::Rc; use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; use wasm_peers::one_to_one::NetworkManager; use wasm_peers::{ConnectionType, SessionId}; use web_sys::console; const SIGNALING_SERVER_URL: &str = "ws://0.0.0.0:9001/one-to-one"; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn network_manager_starts_successfully() { let mut server = NetworkManager::new( SIGNALING_SERVER_URL, SessionId::new(1234), &ConnectionType::Local, ) .unwrap(); server.start(|| {}, |_: ()| {}); } #[wasm_bindgen_test] fn single_message_passes_both_ways() { let server_received_message = Rc::new(RefCell::new(false)); let client_received_message = Rc::new(RefCell::new(false)); let mut server = NetworkManager::new( SIGNALING_SERVER_URL, SessionId::new(1234), &ConnectionType::Local, ) .unwrap(); let server_clone = server.clone(); let server_on_open = move || server_clone.send_message("ping!").unwrap(); let server_on_message = { let server_received_message = server_received_message; move |message: String| { console::log_1(&format!("server received message: {}", message).into()); *server_received_message.borrow_mut() = true; } }; server.start(server_on_open, server_on_message); let mut client = NetworkManager::new( SIGNALING_SERVER_URL, SessionId::new(1234), &ConnectionType::Local, ) .unwrap(); let client_on_open = || { /* do nothing */ }; let client_clone = client.clone(); let client_on_message = { let client_received_message = client_received_message; move |message: String| { console::log_1(&format!("client received message: {}", message).into()); client_clone.send_message("pong!").unwrap(); *client_received_message.borrow_mut() = true; } }; client.start(client_on_open, client_on_message); // assert!(*client_received_message.borrow()); // assert!(*server_received_message.borrow()); }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/xtask/src/main.rs
xtask/src/main.rs
#![allow(clippy::cargo_common_metadata)] use std::process; use clap::{Parser, Subcommand}; use color_eyre::Result; use xshell::{cmd, Shell}; #[derive(Parser)] #[command(author, version, about, long_about = None)] struct Cli { #[command(subcommand)] command: Command, } #[derive(Subcommand)] enum Command { Fmt, Check, Clippy, Run, Test, Doc, PreCommit, PublishDocker { tag: String }, PublishAws { tag: String }, } const AWS_PUBLIC_ECR_ACCOUNT_URI: &str = "public.ecr.aws/2j7p7g8d"; fn main() -> Result<()> { color_eyre::install()?; let cli = Cli::parse(); let sh = Shell::new()?; match &cli.command { Command::Fmt => fmt(&sh)?, Command::Check => check(&sh)?, Command::Clippy => clippy(&sh)?, Command::Run => run(&sh)?, Command::Test => test(&sh)?, Command::Doc => doc(&sh)?, Command::PreCommit => pre_commit(&sh)?, Command::PublishDocker { tag } => publish_docker(&sh, tag)?, Command::PublishAws { tag } => publish_aws(&sh, tag)?, }; Ok(()) } fn fmt(sh: &Shell) -> Result<()> { Ok(cmd!(sh, "cargo +nightly fmt").run()?) } fn check(sh: &Shell) -> Result<()> { Ok(cmd!(sh, "cargo check --all-targets --all-features --workspace").run()?) } fn clippy(sh: &Shell) -> Result<()> { Ok(cmd!(sh, "cargo clippy --all-targets --all-features --workspace").run()?) } fn run(sh: &Shell) -> Result<()> { Ok(cmd!(sh, "cargo run --package wasm-peers-signaling-server").run()?) } fn test(sh: &Shell) -> Result<()> { cmd!(sh, "cargo build --package wasm-peers-signaling-server").run()?; let mut server = process::Command::new("./target/debug/wasm-peers-signaling-server") .current_dir(project_root::get_project_root()?) .spawn()?; let result = || -> Result<()> { let current_dir = sh.current_dir(); sh.change_dir(project_root::get_project_root()?.join("library/")); cmd!(sh, "wasm-pack test --headless --firefox").run()?; cmd!(sh, "wasm-pack test --headless --chrome").run()?; sh.change_dir(current_dir); Ok(()) }(); server.kill()?; result } fn doc(sh: &Shell) -> Result<()> { Ok(cmd!(sh, "cargo doc --no-deps --all-features").run()?) } fn pre_commit(sh: &Shell) -> Result<()> { for cmd in [fmt, check, test, doc] { cmd(sh)?; } Ok(()) } fn publish_docker(sh: &Shell, tag: &str) -> Result<()> { cmd!(sh, "docker login").run()?; cmd!(sh, "docker build -t wasm-peers/signaling-server .").run()?; cmd!( sh, "docker tag wasm-peers/signaling-server tomkarw/wasm-peers-signaling-server:{tag}" ) .run()?; cmd!( sh, "docker tag wasm-peers/signaling-server tomkarw/wasm-peers-signaling-server:latest" ) .run()?; cmd!(sh, "docker push tomkarw/wasm-peers-signaling-server:{tag}").run()?; cmd!(sh, "docker push tomkarw/wasm-peers-signaling-server:latest").run()?; Ok(()) } fn publish_aws(sh: &Shell, tag: &str) -> Result<()> { #[rustfmt::skip] cmd!( sh, "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin {AWS_PUBLIC_ECR_ACCOUNT_URI}" ) .run()?; cmd!(sh, "docker build -t wasm-peers/signaling-server .").run()?; #[rustfmt::skip] cmd!( sh, "docker tag wasm-peers/signaling-server {AWS_PUBLIC_ECR_ACCOUNT_URI}/wasm-peers/signaling-server:{tag}" ) .run()?; #[rustfmt::skip] cmd!( sh, "docker tag wasm-peers/signaling-server {AWS_PUBLIC_ECR_ACCOUNT_URI}/wasm-peers/signaling-server:latest" ) .run()?; #[rustfmt::skip] cmd!( sh, "docker push {AWS_PUBLIC_ECR_ACCOUNT_URI}/wasm-peers/signaling-server:{tag}" ) .run()?; cmd!( sh, "docker push {AWS_PUBLIC_ECR_ACCOUNT_URI}/wasm-peers/signaling-server:latest" ) .run()?; Ok(()) }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/signaling-server/src/router.rs
signaling-server/src/router.rs
use axum::extract::{State, WebSocketUpgrade}; use axum::response::Response; use axum::routing::get; use axum::Router; use crate::{many_to_many, one_to_many, one_to_one}; #[derive(Default, Clone)] pub struct ServerState { one_to_one_connections: one_to_one::Connections, one_to_one_sessions: one_to_one::Sessions, one_to_many_connections: one_to_many::Connections, one_to_many_sessions: one_to_many::Sessions, many_to_many_connections: many_to_many::Connections, many_to_many_sessions: many_to_many::Sessions, } #[allow(clippy::unused_async)] async fn health_handler() -> &'static str { "OK" } #[allow(clippy::unused_async)] async fn one_to_one_handler(State(state): State<ServerState>, ws: WebSocketUpgrade) -> Response { ws.on_upgrade(move |socket| { one_to_one::user_connected( socket, state.one_to_one_connections, state.one_to_one_sessions, ) }) } #[allow(clippy::unused_async)] async fn one_to_many_handler(State(state): State<ServerState>, ws: WebSocketUpgrade) -> Response { ws.on_upgrade(move |socket| { one_to_many::user_connected( socket, state.one_to_many_connections, state.one_to_many_sessions, ) }) } #[allow(clippy::unused_async)] async fn many_to_many_handler(State(state): State<ServerState>, ws: WebSocketUpgrade) -> Response { ws.on_upgrade(move |socket| { many_to_many::user_connected( socket, state.many_to_many_connections, state.many_to_many_sessions, ) }) } pub fn create(server_state: ServerState) -> Router { Router::new() .route("/health", get(health_handler)) .route("/one-to-one", get(one_to_one_handler)) .route("/one-to-many", get(one_to_many_handler)) .route("/many-to-many", get(many_to_many_handler)) .with_state(server_state) }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/signaling-server/src/lib.rs
signaling-server/src/lib.rs
#![allow( clippy::module_name_repetitions, clippy::significant_drop_tightening // shows up only in CI, can't be reproduced locally, should be brought back )] // clippy WARN level lints #![warn( // missing_docs, clippy::cargo, clippy::pedantic, // clippy::nursery, clippy::dbg_macro, clippy::unwrap_used, clippy::integer_division, clippy::large_include_file, clippy::map_err_ignore, // clippy::missing_docs_in_private_items, clippy::panic, clippy::todo, clippy::undocumented_unsafe_blocks, clippy::unimplemented, clippy::unreachable )] // clippy WARN level lints, that can be upgraded to DENY if preferred #![warn( clippy::float_arithmetic, clippy::arithmetic_side_effects, clippy::modulo_arithmetic, clippy::as_conversions, clippy::assertions_on_result_states, clippy::clone_on_ref_ptr, clippy::create_dir, clippy::default_union_representation, clippy::deref_by_slicing, clippy::empty_drop, clippy::empty_structs_with_brackets, clippy::exit, clippy::filetype_is_file, clippy::float_cmp_const, clippy::if_then_some_else_none, clippy::indexing_slicing, clippy::let_underscore_must_use, clippy::lossy_float_literal, clippy::pattern_type_mismatch, clippy::string_slice, clippy::try_err )] // clippy DENY level lints, they always have a quick fix that should be preferred #![deny( clippy::wildcard_imports, clippy::multiple_inherent_impl, clippy::rc_buffer, clippy::rc_mutex, clippy::rest_pat_in_fully_bound_structs, clippy::same_name_method, clippy::self_named_module_files, clippy::separated_literal_suffix, clippy::shadow_unrelated, clippy::str_to_string, clippy::string_add, clippy::string_to_string, clippy::unnecessary_self_imports, clippy::unneeded_field_pattern, clippy::unseparated_literal_suffix, clippy::verbose_file_reads )] mod error; pub mod many_to_many; pub mod one_to_many; pub mod one_to_one; pub mod router; pub use error::{Error, Result};
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/signaling-server/src/many_to_many.rs
signaling-server/src/many_to_many.rs
use std::collections::{HashMap, HashSet}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use anyhow::anyhow; use axum::extract::ws::{Message, WebSocket}; use futures_util::{SinkExt, StreamExt, TryFutureExt}; use log::{error, info, warn}; use tokio::sync::{mpsc, RwLock}; use tokio_stream::wrappers::UnboundedReceiverStream; use wasm_peers_protocol::one_to_many::SignalMessage; use wasm_peers_protocol::{SessionId, UserId}; #[derive(Default, Debug)] pub struct Session { pub users: HashSet<UserId>, } pub type Connections = Arc<RwLock<HashMap<UserId, mpsc::UnboundedSender<Message>>>>; pub type Sessions = Arc<RwLock<HashMap<SessionId, Session>>>; static NEXT_USER_ID: AtomicU64 = AtomicU64::new(1); pub async fn user_connected(ws: WebSocket, connections: Connections, sessions: Sessions) { let user_id = UserId::new(NEXT_USER_ID.fetch_add(1, Ordering::Relaxed)); info!("new user connected: {:?}", user_id); let (mut user_ws_tx, mut user_ws_rx) = ws.split(); let (tx, rx) = mpsc::unbounded_channel(); let mut rx = UnboundedReceiverStream::new(rx); tokio::task::spawn(async move { while let Some(message) = rx.next().await { user_ws_tx .send(message) .unwrap_or_else(|e| error!("websocket send error: {}", e)) .await; } }); connections.write().await.insert(user_id, tx); while let Some(result) = user_ws_rx.next().await { let msg = match result { Ok(msg) => msg, Err(e) => { error!("websocket error (id={:?}): {}", user_id, e); break; } }; if let Err(err) = user_message(user_id, msg, &connections, &sessions).await { error!("error while handling user message: {}", err); } } error!("user disconnected: {:?}", user_id); user_disconnected(user_id, &connections, &sessions).await; } async fn user_message( sender_id: UserId, msg: Message, connections: &Connections, sessions: &Sessions, ) -> crate::Result<()> { let request = rmp_serde::from_slice::<SignalMessage>(msg.into_data().as_ref())?; info!("message received from user {:?}: {:?}", sender_id, request); match request { SignalMessage::SessionJoin(session_id, _) => { let mut sessions_writer = sessions.write().await; let session = sessions_writer .entry(session_id) .or_insert_with(Session::default); let connections_reader = connections.read().await; // start connections with all already present users for client_id in &session.users { { let host_response = SignalMessage::SessionReady(session_id, *client_id); let host_response = rmp_serde::to_vec(&host_response)?; connections_reader .get(&sender_id) .ok_or(anyhow!("host not in connections"))? .send(Message::Binary(host_response))?; } } session.users.insert(sender_id); } // pass offer to the other user in session without changing anything SignalMessage::SdpOffer(session_id, recipient_id, offer) => { let response = SignalMessage::SdpOffer(session_id, sender_id, offer); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or(anyhow!("tried to send offer to non existing user"))? .send(Message::Binary(response))?; } // pass answer to the other user in session without changing anything SignalMessage::SdpAnswer(session_id, recipient_id, answer) => { let response = SignalMessage::SdpAnswer(session_id, sender_id, answer); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or(anyhow!("tried to send answer to non existing user"))? .send(Message::Binary(response))?; } SignalMessage::IceCandidate(session_id, recipient_id, candidate) => { let response = SignalMessage::IceCandidate(session_id, sender_id, candidate); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or(anyhow!("tried to send ice candidate to non existing user"))? .send(Message::Binary(response))?; } SignalMessage::SessionReady(session_id, recipient_id) => { let response = SignalMessage::SessionReady(session_id, sender_id); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or(anyhow!("tried to send ready message to non existing user"))? .send(Message::Binary(response))?; } SignalMessage::Error(session_id, recipient_id, error) => { warn!( "error message received from user {:?}: {:?}", sender_id, error ); let response = SignalMessage::Error(session_id, sender_id, error); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or(anyhow!("tried to send ready message to non existing user"))? .send(Message::Binary(response))?; } } Ok(()) } async fn user_disconnected(user_id: UserId, connections: &Connections, sessions: &Sessions) { connections.write().await.remove(&user_id); let mut session_to_delete = None; let mut sessions = sessions.write().await; for (session_id, session) in sessions.iter_mut() { if session.users.contains(&user_id) { session.users.remove(&user_id); } if session.users.is_empty() { session_to_delete = Some(*session_id); break; } } // remove session if it's empty if let Some(session_id) = session_to_delete { sessions.remove(&session_id); } }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/signaling-server/src/one_to_many.rs
signaling-server/src/one_to_many.rs
use std::collections::{HashMap, HashSet}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use anyhow::anyhow; use axum::extract::ws::{Message, WebSocket}; use futures_util::{SinkExt, StreamExt, TryFutureExt}; use log::{error, info}; use tokio::sync::{mpsc, RwLock}; use tokio_stream::wrappers::UnboundedReceiverStream; use wasm_peers_protocol::one_to_many::SignalMessage; use wasm_peers_protocol::{SessionId, UserId}; #[derive(Default, Debug)] pub struct Session { pub host: Option<UserId>, pub users: HashSet<UserId>, } pub type Connections = Arc<RwLock<HashMap<UserId, mpsc::UnboundedSender<Message>>>>; pub type Sessions = Arc<RwLock<HashMap<SessionId, Session>>>; static NEXT_USER_ID: AtomicU64 = AtomicU64::new(1); pub async fn user_connected(ws: WebSocket, connections: Connections, sessions: Sessions) { let user_id = UserId::new(NEXT_USER_ID.fetch_add(1, Ordering::Relaxed)); info!("new user connected: {:?}", user_id); let (mut user_ws_tx, mut user_ws_rx) = ws.split(); let (tx, rx) = mpsc::unbounded_channel(); let mut rx = UnboundedReceiverStream::new(rx); tokio::task::spawn(async move { while let Some(message) = rx.next().await { user_ws_tx .send(message) .unwrap_or_else(|e| error!("websocket send error: {}", e)) .await; } }); connections.write().await.insert(user_id, tx); while let Some(result) = user_ws_rx.next().await { let msg = match result { Ok(msg) => msg, Err(e) => { error!("websocket error (id={:?}): {}", user_id, e); break; } }; if let Err(err) = user_message(user_id, msg, &connections, &sessions).await { error!("error while handling user message: {}", err); } } error!("user disconnected: {:?}", user_id); user_disconnected(user_id, &connections, &sessions).await; } async fn user_message( sender_id: UserId, msg: Message, connections: &Connections, sessions: &Sessions, ) -> crate::Result<()> { let request = rmp_serde::from_slice::<SignalMessage>(msg.into_data().as_ref())?; info!("message received from user {:?}: {:?}", sender_id, request); match request { SignalMessage::SessionJoin(session_id, is_host) => { let mut sessions_writer = sessions.write().await; let session = sessions_writer .entry(session_id) .or_insert_with(Session::default); let connections_reader = connections.read().await; if is_host && session.host.is_none() { session.host = Some(sender_id); // start connections with all already present users for client_id in &session.users { { let host_response = SignalMessage::SessionReady(session_id, *client_id); let host_response = rmp_serde::to_vec(&host_response)?; connections_reader .get(&sender_id) .ok_or(anyhow!("host not in connections"))? .send(Message::Binary(host_response))?; } } } else if is_host && session.host.is_some() { error!("connecting user wants to be a host, but host is already present!"); // TODO: proceed with connecting user as a normal user } else { // connect new user with host session.users.insert(sender_id); // TODO: wait for host instead of ignoring connecting users if let Some(host_id) = session.host { let host_response = SignalMessage::SessionReady(session_id, sender_id); let host_response = rmp_serde::to_vec(&host_response)?; connections_reader .get(&host_id) .ok_or(anyhow!("host not in connections"))? .send(Message::Binary(host_response))?; } } } // pass offer to the other user in session without changing anything SignalMessage::SdpOffer(session_id, recipient_id, offer) => { let response = SignalMessage::SdpOffer(session_id, sender_id, offer); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or(anyhow!("tried to send offer to non existing user"))? .send(Message::Binary(response))?; } // pass answer to the other user in session without changing anything SignalMessage::SdpAnswer(session_id, recipient_id, answer) => { let response = SignalMessage::SdpAnswer(session_id, sender_id, answer); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or(anyhow!("tried to send answer to non existing user"))? .send(Message::Binary(response))?; } SignalMessage::IceCandidate(session_id, recipient_id, candidate) => { let response = SignalMessage::IceCandidate(session_id, sender_id, candidate); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or_else(|| anyhow!("no sender for given id"))? .send(Message::Binary(response))?; } _ => {} } Ok(()) } async fn user_disconnected(user_id: UserId, connections: &Connections, sessions: &Sessions) { connections.write().await.remove(&user_id); let mut session_to_delete = None; for (session_id, session) in sessions.write().await.iter_mut() { if session.host == Some(user_id) { session.host = None; } else if session.users.contains(&user_id) { session.users.remove(&user_id); } if session.host.is_none() && session.users.is_empty() { session_to_delete = Some(*session_id); break; } } // remove session if it's empty if let Some(session_id) = session_to_delete { sessions.write().await.remove(&session_id); } }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/signaling-server/src/one_to_one.rs
signaling-server/src/one_to_one.rs
use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use anyhow::anyhow; use axum::extract::ws::{Message, WebSocket}; use futures_util::{SinkExt, StreamExt}; use log::{error, info}; use tokio::sync::{mpsc, RwLock}; use tokio_stream::wrappers::UnboundedReceiverStream; use wasm_peers_protocol::one_to_one::SignalMessage; use wasm_peers_protocol::{SessionId, UserId}; pub struct Session { pub first: Option<UserId>, pub second: Option<UserId>, pub offer_received: bool, } pub type Connections = Arc<RwLock<HashMap<UserId, mpsc::UnboundedSender<Message>>>>; pub type Sessions = Arc<RwLock<HashMap<SessionId, Session>>>; static NEXT_USER_ID: AtomicU64 = AtomicU64::new(1); pub async fn user_connected(ws: WebSocket, connections: Connections, sessions: Sessions) { let user_id = UserId::new(NEXT_USER_ID.fetch_add(1, Ordering::Relaxed)); info!("new user connected: {:?}", user_id); let (mut user_ws_tx, mut user_ws_rx) = ws.split(); let (tx, rx) = mpsc::unbounded_channel(); let mut rx = UnboundedReceiverStream::new(rx); tokio::task::spawn(async move { while let Some(message) = rx.next().await { user_ws_tx .send(message) .await .unwrap_or_else(|e| error!("websocket send error: {}", e)); } }); connections.write().await.insert(user_id, tx); while let Some(result) = user_ws_rx.next().await { let msg = match result { Ok(msg) => msg, Err(err) => { error!("websocket error (user_id={:?}): {}", user_id, err); break; } }; if let Err(err) = user_message(user_id, msg, &connections, &sessions).await { error!("user_message error: {}", err); } } error!("user disconnected: {:?}", user_id); user_disconnected(user_id, &connections, &sessions).await; } async fn user_message( user_id: UserId, msg: Message, connections: &Connections, sessions: &Sessions, ) -> crate::Result<()> { let request = rmp_serde::from_slice::<SignalMessage>(msg.into_data().as_ref())?; info!("message received from user {:?}: {:?}", user_id, request); match request { SignalMessage::SessionJoin(session_id) => { session_join(sessions, connections, user_id, session_id).await?; } // pass offer to the other user in session without changing anything SignalMessage::SdpOffer(session_id, offer) => { sdp_offer(sessions, connections, user_id, session_id, offer).await?; } // pass answer to the other user in session without changing anything SignalMessage::SdpAnswer(session_id, answer) => { let sessions = sessions.read().await; let session = sessions .get(&session_id) .ok_or_else(|| anyhow!("no such session: {:?}", &session_id))?; let recipient_id = if Some(user_id) == session.first { session.second } else { session.first } .ok_or_else(|| anyhow!("missing second user in session: {:?}", &session_id))?; let response = SignalMessage::SdpAnswer(session_id, answer); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or_else(|| anyhow!("no sender for given recipient_id"))? .send(Message::Binary(response))?; } SignalMessage::IceCandidate(session_id, candidate) => { let sessions = sessions.read().await; let session = sessions .get(&session_id) .ok_or_else(|| anyhow!("no such session: {:?}", &session_id))?; let recipient_id = if Some(user_id) == session.first { session.second } else { session.first } .ok_or_else(|| anyhow!("missing second user in session: {:?}", &session_id))?; let response = SignalMessage::IceCandidate(session_id, candidate); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or_else(|| anyhow!("no sender for given recipient_id"))? .send(Message::Binary(response))?; } other => { error!("received unexpected signal message: {:?}", other); } } Ok(()) } async fn session_join( sessions: &Sessions, connections: &Connections, user_id: UserId, session_id: SessionId, ) -> crate::Result<()> { let mut sessions = sessions.write().await; match sessions.entry(session_id) { // on first user in session - create session object and store connecting user id Entry::Vacant(entry) => { entry.insert(Session { first: Some(user_id), second: None, offer_received: false, }); } // on second user - add him to existing session and notify users that session is ready Entry::Occupied(mut entry) => { entry.get_mut().second = Some(user_id); let first_response = SignalMessage::SessionReady(session_id, true); let first_response = rmp_serde::to_vec(&first_response)?; let second_response = SignalMessage::SessionReady(session_id, false); let second_response = rmp_serde::to_vec(&second_response)?; let connections_reader = connections.read().await; if let Some(first_id) = entry.get().first { connections_reader .get(&first_id) .ok_or_else(|| anyhow!("no sender for given id"))? .send(Message::Binary(first_response))?; connections_reader .get(&user_id) .ok_or_else(|| anyhow!("no sender for given id"))? .send(Message::Binary(second_response))?; } } } Ok(()) } async fn sdp_offer( sessions: &Sessions, connections: &Connections, user_id: UserId, session_id: SessionId, offer: String, ) -> crate::Result<()> { let mut sessions = sessions.write().await; let session = sessions .get_mut(&session_id) .ok_or_else(|| anyhow!("no such session: {:?}", &session_id))?; if session.offer_received { info!( "offer already sent by the the peer, ignoring the second offer: {:?}", session_id ); } else { session.offer_received = true; } let recipient_id = if Some(user_id) == session.first { session.second } else { session.first } .ok_or_else(|| anyhow!("missing second user in session: {:?}", &session_id))?; let response = SignalMessage::SdpOffer(session_id, offer); let response = rmp_serde::to_vec(&response)?; let connections_reader = connections.read().await; connections_reader .get(&recipient_id) .ok_or_else(|| anyhow!("no sender for given recipient_id"))? .send(Message::Binary(response))?; Ok(()) } async fn user_disconnected(user_id: UserId, connections: &Connections, sessions: &Sessions) { connections.write().await.remove(&user_id); let mut sessions = sessions.write().await; let mut session_to_delete = None; for (session_id, session) in sessions.iter_mut() { if session.first == Some(user_id) { session.first = None; if session.first.is_none() && session.second.is_none() { session_to_delete = Some(*session_id); } break; } else if session.second == Some(user_id) { session.second = None; if session.first.is_none() && session.second.is_none() { session_to_delete = Some(*session_id); } break; } } // remove session if it's empty if let Some(session_id) = session_to_delete { sessions.remove(&session_id); } }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/signaling-server/src/env.rs
signaling-server/src/env.rs
use once_cell::sync::Lazy; use serde::Deserialize; #[derive(Deserialize, Debug)] pub struct Env { pub example_bool: bool, pub example_list: Vec<String>, } /// Access to parsed environment variables. pub static ENV: Lazy<Env> = Lazy::new(|| envy::from_env().expect("some env vars missing"));
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/signaling-server/src/error.rs
signaling-server/src/error.rs
pub type Result<T> = anyhow::Result<T>; pub type Error = anyhow::Error;
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/signaling-server/src/main.rs
signaling-server/src/main.rs
use std::env; use std::net::SocketAddr; use std::str::FromStr; use log::{info, LevelFilter}; use simplelog::{ColorChoice, Config, TermLogger, TerminalMode}; use wasm_peers_signaling_server::router::{self, ServerState}; #[tokio::main] async fn main() -> anyhow::Result<()> { TermLogger::init( LevelFilter::Debug, Config::default(), TerminalMode::Mixed, ColorChoice::Auto, )?; let server_state = ServerState::default(); let app = router::create(server_state); let address = env::args() .nth(1) .unwrap_or_else(|| "0.0.0.0:9001".to_string()); let address = SocketAddr::from_str(&address)?; info!("Listening on: http://{}", address); axum::Server::bind(&address) .serve(app.into_make_service()) .await?; Ok(()) }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/protocol/src/lib.rs
protocol/src/lib.rs
/*! Helper crate that declares common types and structures shared between [wasm-peers](https://docs.rs/wasm-peers/latest/wasm_peers/) and [wasm-peers-signaling-server](https://docs.rs/wasm-peers-signaling-server/latest/wasm_peers_signaling_server/). */ #![allow(clippy::module_name_repetitions)] // clippy WARN level lints #![warn( // missing_docs, clippy::cargo, clippy::pedantic, clippy::nursery, clippy::dbg_macro, clippy::unwrap_used, clippy::integer_division, clippy::large_include_file, clippy::map_err_ignore, // clippy::missing_docs_in_private_items, clippy::panic, clippy::todo, clippy::undocumented_unsafe_blocks, clippy::unimplemented, clippy::unreachable )] // clippy WARN level lints, that can be upgraded to DENY if preferred #![warn( clippy::float_arithmetic, clippy::arithmetic_side_effects, clippy::modulo_arithmetic, clippy::as_conversions, clippy::assertions_on_result_states, clippy::clone_on_ref_ptr, clippy::create_dir, clippy::default_union_representation, clippy::deref_by_slicing, clippy::empty_drop, clippy::empty_structs_with_brackets, clippy::exit, clippy::filetype_is_file, clippy::float_cmp_const, clippy::if_then_some_else_none, clippy::indexing_slicing, clippy::let_underscore_must_use, clippy::lossy_float_literal, clippy::pattern_type_mismatch, clippy::string_slice, clippy::try_err )] // clippy DENY level lints, they always have a quick fix that should be preferred #![deny( clippy::wildcard_imports, clippy::multiple_inherent_impl, clippy::rc_buffer, clippy::rc_mutex, clippy::rest_pat_in_fully_bound_structs, clippy::same_name_method, clippy::self_named_module_files, clippy::separated_literal_suffix, clippy::shadow_unrelated, clippy::str_to_string, clippy::string_add, clippy::string_to_string, clippy::unnecessary_self_imports, clippy::unneeded_field_pattern, clippy::unseparated_literal_suffix, clippy::verbose_file_reads )] mod common; pub mod many_to_many; pub mod one_to_many; pub mod one_to_one; pub use common::{IceCandidate, IsHost, SessionId, UserId};
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/protocol/src/many_to_many.rs
protocol/src/many_to_many.rs
/*! Signaling messages exchanged between used by `NetworkManagers` and signaling server to facilitate communication in many-to-many topology. */ use serde::{Deserialize, Serialize}; use crate::common::IceCandidate; use crate::{SessionId, UserId}; /// `Enum` consisting of two main categories are messages used to setup signaling session /// and messages used to setup `WebRTC` connection afterwards. /// Most of the include [`SessionId`] and [`UserId`] to uniquely identify each peer. #[derive(Debug, Serialize, Deserialize)] pub enum SignalMessage { /// Either client or server connecting to signaling session SessionJoin(SessionId), /// Report back to the users that both of them are in session SessionReady(SessionId, UserId), /// `SDP` Offer that gets passed to the other user without modifications SdpOffer(SessionId, UserId, String), /// `SDP` Answer that gets passed to the other user without modifications SdpAnswer(SessionId, UserId, String), /// Proposed ICE Candidate of one user passed to the other user without modifications IceCandidate(SessionId, UserId, IceCandidate), /// Generic error containing detailed information about the cause Error(SessionId, String), }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/protocol/src/one_to_many.rs
protocol/src/one_to_many.rs
/*! Signaling messages exchanged between used by `MiniServer`, `MiniClient` and signaling server to facilitate communication in client-server topology. */ use serde::{Deserialize, Serialize}; use crate::common::IceCandidate; use crate::{IsHost, SessionId, UserId}; /// `Enum` consisting of two main categories are messages used to setup signaling session /// and messages used to setup `WebRTC` connection afterwards. /// Most of the include [`SessionId`] and [`UserId`] to uniquely identify each peer. #[derive(Debug, Serialize, Deserialize)] pub enum SignalMessage { /// Either client or server connecting to signaling session SessionJoin(SessionId, IsHost), /// Report back to the users that both of them are in session SessionReady(SessionId, UserId), /// `SDP` Offer that gets passed to the other user without modifications SdpOffer(SessionId, UserId, String), /// `SDP` Answer that gets passed to the other user without modifications SdpAnswer(SessionId, UserId, String), /// Proposed ICE Candidate of one user passed to the other user without modifications IceCandidate(SessionId, UserId, IceCandidate), /// Generic error containing detailed information about the cause Error(SessionId, UserId, String), }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/protocol/src/one_to_one.rs
protocol/src/one_to_one.rs
/*! Signaling messages exchanged between used by `MiniServer`, `MiniClient` and signaling server to facilitate communication in client-server topology. */ use serde::{Deserialize, Serialize}; use crate::common::IceCandidate; use crate::{IsHost, SessionId}; /// `Enum` consisting of two main categories are messages used to setup signaling session /// and messages used to setup `WebRTC` connection afterwards. /// All of the messages include [`SessionId`] which is enough to identify the other peer in the connection. #[derive(Debug, Serialize, Deserialize)] pub enum SignalMessage { /// Either client or server connecting to signaling session SessionJoin(SessionId), /// Report back to the users that both of them are in session SessionReady(SessionId, IsHost), /// `SDP` Offer that gets passed to the other user without modifications SdpOffer(SessionId, String), /// `SDP` Answer that gets passed to the other user without modifications SdpAnswer(SessionId, String), /// Proposed ICE Candidate of one user passed to the other user without modifications IceCandidate(SessionId, IceCandidate), /// Generic error containing detailed information about the cause Error(SessionId, String), }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
wasm-peers/wasm-peers
https://github.com/wasm-peers/wasm-peers/blob/f37210d5d67a3d9732794a80a20347849ed8ab1e/protocol/src/common.rs
protocol/src/common.rs
use std::fmt::{Display, Formatter}; use std::str::FromStr; use serde::{Deserialize, Serialize}; /// Unique identifier of signaling session that each user provides /// when communicating with the signaling server. #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash)] pub struct SessionId(u128); impl SessionId { /// Wrap String into a `SessionId` `struct` #[must_use] pub const fn new(inner: u128) -> Self { Self(inner) } /// Acquire the underlying type #[must_use] #[allow(clippy::missing_const_for_fn)] pub fn inner(&self) -> u128 { self.0 } } impl FromStr for SessionId { type Err = <u128 as FromStr>::Err; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(Self(s.parse()?)) } } impl Display for SessionId { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "SessionId({})", self.0) } } /// Unique identifier of each peer connected to signaling server /// useful when communicating in one-to-many and many-to-many . #[derive(Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)] pub struct UserId(u64); impl UserId { /// Wrap `u64` into a `UserId` `struct` #[must_use] pub const fn new(inner: u64) -> Self { Self(inner) } /// Acquire the underlying type #[must_use] #[allow(clippy::missing_const_for_fn)] pub fn into_inner(self) -> u64 { self.0 } } impl From<u64> for UserId { fn from(val: u64) -> Self { Self(val) } } impl Display for UserId { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } } /// Unique identifier specifying which peer is host and will be creating an offer, /// and which will await it. pub type IsHost = bool; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct IceCandidate { pub candidate: String, pub sdp_mid: Option<String>, pub sdp_m_line_index: Option<u16>, }
rust
Apache-2.0
f37210d5d67a3d9732794a80a20347849ed8ab1e
2026-01-04T20:24:22.678899Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/src/lib.rs
src/lib.rs
#![doc = include_str!("../README.md")] pub use venndb_macros::VennDB; /// A trait that types can implement in order to support `#[venndb(any)]` attribute filters. pub trait Any { /// Returns true if the value is considered to be "any" within the context of the type. /// /// # Example /// /// ``` /// use venndb::Any; /// /// #[derive(Debug)] /// struct MyString(String); /// /// impl Any for MyString { /// fn is_any(&self) -> bool { /// self.0 == "*" /// } /// } /// /// let my_string = MyString("*".to_string()); /// assert!(my_string.is_any()); /// /// let my_string = MyString("hello".to_string()); /// assert!(!my_string.is_any()); /// ``` fn is_any(&self) -> bool; } impl<T: Any> Any for &T { fn is_any(&self) -> bool { T::is_any(*self) } } impl<T: Any> Any for Option<T> { fn is_any(&self) -> bool { match self { Some(value) => value.is_any(), None => false, } } } impl<T: Any> Any for std::sync::Arc<T> { fn is_any(&self) -> bool { T::is_any(&**self) } } impl<T: Any> Any for std::rc::Rc<T> { fn is_any(&self) -> bool { T::is_any(&**self) } } impl<T: Any> Any for Box<T> { fn is_any(&self) -> bool { T::is_any(&**self) } } mod bitvec; #[doc(hidden)] pub mod __internal { //! Hidden thirdparty dependencies for venndb, //! not to be relied upon directly, as they may change at any time. pub use crate::bitvec::{BitVec, IterOnes}; pub use hashbrown::HashMap; use rand::Rng; #[must_use] /// Generate a random `usize`. pub fn rand_range(limit: usize) -> usize { rand::rng().random_range(0..limit) } pub mod hash_map { //! Internal types related to hash map. pub use hashbrown::hash_map::Entry; } }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/src/bitvec.rs
src/bitvec.rs
#![allow(dead_code)] //! Fork from <https://docs.rs/vob/3.0.6/src/vob/lib.rs.html#138-145> //! Original License: <https://github.com/softdevteam/vob/blob/master/LICENSE-MIT> #[must_use] #[derive(Debug, Clone, Default)] pub struct BitVec { len: usize, data: Vec<usize>, } #[derive(Debug)] pub struct IterOnes<'a> { index: usize, bv: &'a BitVec, } impl BitVec { #[inline(always)] pub fn new() -> Self { Self::default() } pub fn with_capacity(capacity: usize) -> Self { Self { len: 0, data: Vec::with_capacity(blocks_required(capacity)), } } pub fn repeat(value: bool, len: usize) -> Self { let mut v = Self { len, data: vec![if value { !0 } else { 0 }; blocks_required(len)], }; v.mask_last_block(); v } #[must_use] pub fn iter_ones(&self) -> IterOnes<'_> { IterOnes { index: 0, bv: self } } #[must_use] pub fn count_ones(&self) -> usize { self.iter_ones().count() } #[must_use] pub fn any(&self) -> bool { self.iter_ones().next().is_some() } pub fn push(&mut self, value: bool) { debug_assert_eq!(self.data.len(), blocks_required(self.len)); if self.len.is_multiple_of(BITS_PER_BLOCK) { self.data.push(0); } let i = self.len; self.len = i.checked_add(1).expect("Overflow detected"); self.set(i, value); } fn set(&mut self, index: usize, value: bool) -> bool { if index >= self.len { panic!( "Index out of bounds: the len is {} but the index is {}", self.len, index ); } let msk = 1 << (index % BITS_PER_BLOCK); let off = block_offset(index); let old_v = self.data[off]; let new_v = if value { old_v | msk } else { old_v & !msk }; if new_v != old_v { self.data[off] = new_v; true } else { false } } pub fn or(&mut self, other: &Self) -> bool { let mut chngd = false; for (self_blk, other_blk) in self .data .iter_mut() .zip(other.data.iter().chain(std::iter::repeat(&0))) { let old_v = *self_blk; let new_v = old_v | *other_blk; *self_blk = new_v; chngd |= old_v != new_v; } // We don't need to mask the last block per our assumptions chngd } pub fn and(&mut self, other: &Self) -> bool { let mut chngd = false; for (self_blk, other_blk) in self .data .iter_mut() .zip(other.data.iter().chain(std::iter::repeat(&0))) { let old_v = *self_blk; let new_v = old_v & *other_blk; *self_blk = new_v; chngd |= old_v != new_v; } // We don't need to mask the last block as those bits can't be set by "&" by definition. chngd } /// We guarantee that the last storage block has no bits set past the "last" bit: this function /// clears any such bits. fn mask_last_block(&mut self) { debug_assert_eq!(self.data.len(), blocks_required(self.len)); let ub = self.len % BITS_PER_BLOCK; // If there are no unused bits, there's no need to perform masking. if ub > 0 { let msk = (1 << ub) - 1; let off = block_offset(self.len); let old_v = self.data[off]; let new_v = old_v & msk; if new_v != old_v { self.data[off] = new_v; } } } } impl Iterator for IterOnes<'_> { type Item = usize; fn next(&mut self) -> Option<Self::Item> { if self.index >= self.bv.len { return None; } // start at current index, mask off earlier bits in the starting block let mut b = self.index / BITS_PER_BLOCK; let off = self.index % BITS_PER_BLOCK; // guard against empty storage if b >= self.bv.data.len() { self.index = self.bv.len; return None; } let mut v = self.bv.data[b]; if off != 0 { v &= usize::MAX << off; } loop { if v != 0 { let tz = v.trailing_zeros() as usize; let bit = b * BITS_PER_BLOCK + tz; if bit < self.bv.len { self.index = bit + 1; return Some(bit); } else { self.index = self.bv.len; return None; } } b += 1; if b >= self.bv.data.len() { self.index = self.bv.len; return None; } v = self.bv.data[b]; } } fn size_hint(&self) -> (usize, Option<usize>) { // cannot know remaining ones cheaply, use a safe upper bound let remaining = self.bv.len.saturating_sub(self.index); (0, Some(remaining)) } } impl std::ops::BitOrAssign<&Self> for BitVec { #[inline(always)] fn bitor_assign(&mut self, other: &Self) { let _ = self.or(other); } } impl std::ops::BitOr<&BitVec> for &BitVec { type Output = BitVec; #[inline(always)] fn bitor(self, other: &BitVec) -> BitVec { let mut rv = self.clone(); let _ = rv.or(other); rv } } impl std::ops::BitOr<&Self> for BitVec { type Output = Self; #[inline(always)] fn bitor(mut self, other: &Self) -> Self { let _ = self.or(other); self } } impl std::ops::BitAndAssign<&Self> for BitVec { #[inline(always)] fn bitand_assign(&mut self, other: &Self) { let _ = self.and(other); } } impl std::ops::BitAnd<&BitVec> for &BitVec { type Output = BitVec; #[inline(always)] fn bitand(self, other: &BitVec) -> BitVec { let mut rv = self.clone(); let _ = rv.and(other); rv } } impl std::ops::BitAnd<&Self> for BitVec { type Output = Self; #[inline(always)] fn bitand(mut self, other: &Self) -> Self { let _ = self.and(other); self } } const BYTES_PER_BLOCK: usize = size_of::<usize>(); const BITS_PER_BLOCK: usize = BYTES_PER_BLOCK * 8; #[inline(always)] /// Takes as input a number of bits requiring storage; returns an aligned number of blocks needed /// to store those bits. const fn blocks_required(num_bits: usize) -> usize { let n = num_bits / BITS_PER_BLOCK; if !num_bits.is_multiple_of(BITS_PER_BLOCK) { n + 1 } else { n } } #[inline(always)] /// Return the offset in the vector of the storage block storing the bit `off`. const fn block_offset(off: usize) -> usize { off / BITS_PER_BLOCK } #[cfg(test)] mod tests { use super::*; #[test] fn push_adjusts_vec_correctly_one() { let mut v = BitVec::new(); assert_eq!(v.data.len(), 0); v.push(false); assert_eq!(v.data.len(), 1); } fn random_bitvec(len: usize) -> BitVec { let mut vob = BitVec::with_capacity(len); for _ in 0..len { vob.push(rand::random()); } // these tests can later be dialed down, as they noticeable slow down every random vob test. assert_eq!( vob.iter_ones().count(), vob.iter_ones().filter(|_| true).count() ); vob } #[test] fn test_count() { for test_len in 1..128 { let _ = random_bitvec(test_len); } } #[test] fn test_iter_ones() { #[allow(clippy::needless_pass_by_value)] fn t(v: &BitVec, expected: Vec<usize>) { assert_eq!(v.iter_ones().collect::<Vec<usize>>(), expected); } t(&BitVec::repeat(true, 131), (0..131).collect::<Vec<_>>()); let mut v1 = BitVec::new(); v1.push(false); v1.push(true); v1.push(false); v1.push(true); t(&v1, vec![1, 3]); } }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/fuzz/fuzz_targets/fuzz_employee_db.rs
fuzz/fuzz_targets/fuzz_employee_db.rs
#![no_main] use libfuzzer_sys::arbitrary::{self, Arbitrary}; use libfuzzer_sys::fuzz_target; use venndb::{Any, VennDB}; #[derive(Clone, Debug, Arbitrary, VennDB)] pub struct Employee { #[venndb(key)] id: u16, _name: String, earth: bool, alive: Option<bool>, #[venndb(filter)] faction: Faction, #[venndb(filter, any)] planet: Option<Planet>, } #[derive(Clone, Debug, Arbitrary, PartialEq, Eq, Hash)] pub enum Faction { Rebel, Empire, } #[derive(Clone, Debug, Arbitrary, PartialEq, Eq, Hash)] pub enum Planet { Any, Earth, Mars, } impl Any for Planet { fn is_any(&self) -> bool { self == &Planet::Any } } fuzz_target!(|rows: Vec<Employee>| { let _ = EmployeeDB::from_rows(rows); });
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/benches/proxydb.rs
benches/proxydb.rs
mod proxies; use divan::AllocProfiler; use proxies::{InMemProxyDB, NaiveProxyDB, ProxyDB, SqlLiteProxyDB}; use std::sync::atomic::AtomicUsize; #[global_allocator] static ALLOC: AllocProfiler = AllocProfiler::system(); fn main() { // Run registered benchmarks. divan::main(); } const POOLS: [&str; 14] = [ "poolA", "poolB", "poolC", "poolD", "poolE", "poolF", "poolG", "poolH", "poolI", "poolJ", "poolA", "poolB", "poolC", "poolD", ]; const COUNTRIES: [&str; 13] = [ "US", "CA", "GB", "DE", "FR", "IT", "ES", "AU", "JP", "CN", "FR", "IT", "ES", ]; #[allow(clippy::declare_interior_mutable_const)] const COUNTER: AtomicUsize = AtomicUsize::new(0); fn next_round() -> usize { #[allow(clippy::borrow_interior_mutable_const)] COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed) } fn test_db(db: &impl ProxyDB) { let i = next_round(); let pool = POOLS[i % POOLS.len()]; let country = COUNTRIES[i % COUNTRIES.len()]; let result = db.get(i as u64); divan::black_box(result); let result = db.any_tcp(pool, country); divan::black_box(result); let result = db.any_socks5_isp(pool, country); divan::black_box(result); } #[divan::bench] fn venn_proxy_db_100(bencher: divan::Bencher) { bencher .with_inputs(|| InMemProxyDB::create(100)) .bench_refs(|db| test_db(db)); } #[divan::bench] fn naive_proxy_db_100(bencher: divan::Bencher) { bencher .with_inputs(|| NaiveProxyDB::create(100)) .bench_refs(|db| test_db(db)); } #[divan::bench] fn sql_lite_proxy_db_100(bencher: divan::Bencher) { bencher .with_inputs(|| SqlLiteProxyDB::create(100)) .bench_refs(|db| test_db(db)); } #[divan::bench] fn venn_proxy_db_12_500(bencher: divan::Bencher) { bencher .with_inputs(|| InMemProxyDB::create(12_500)) .bench_refs(|db| test_db(db)); } #[divan::bench] fn naive_proxy_db_12_500(bencher: divan::Bencher) { bencher .with_inputs(|| NaiveProxyDB::create(12_500)) .bench_refs(|db| test_db(db)); } #[divan::bench] fn sql_lite_proxy_db_12_500(bencher: divan::Bencher) { bencher .with_inputs(|| SqlLiteProxyDB::create(12_500)) .bench_refs(|db| test_db(db)); } #[divan::bench] fn venn_proxy_db_100_000(bencher: divan::Bencher) { bencher .with_inputs(|| InMemProxyDB::create(100_000)) .bench_refs(|db| test_db(db)); } #[divan::bench] fn naive_proxy_db_100_000(bencher: divan::Bencher) { bencher .with_inputs(|| NaiveProxyDB::create(100_000)) .bench_refs(|db| test_db(db)); } #[divan::bench] fn sql_lite_proxy_db_100_000(bencher: divan::Bencher) { bencher .with_inputs(|| SqlLiteProxyDB::create(100_000)) .bench_refs(|db| test_db(db)); }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/benches/proxies/mod.rs
benches/proxies/mod.rs
use sqlite::Row; use std::{borrow::Cow, ops::Deref}; use venndb::{Any, VennDB}; pub(super) trait ProxyDB: Sized { fn create(n: usize) -> Self; fn get(&self, id: u64) -> Option<Cow<'_, Proxy>>; fn any_tcp(&self, pool: &str, country: &str) -> Option<Cow<'_, Proxy>>; fn any_socks5_isp(&self, pool: &str, country: &str) -> Option<Cow<'_, Proxy>>; } #[derive(Debug, Clone, VennDB)] #[venndb(name = "InMemProxyDB")] pub(super) struct Proxy { #[venndb(key)] pub(super) id: u64, pub(super) address: String, pub(super) username: String, pub(super) password: String, pub(super) tcp: bool, pub(super) udp: bool, pub(super) http: bool, pub(super) socks5: bool, pub(super) datacenter: bool, pub(super) residential: bool, pub(super) mobile: bool, #[venndb(filter)] pub(super) pool: Option<NormalizedString>, #[venndb(filter, any)] pub(super) country: NormalizedString, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub(super) struct NormalizedString(String); impl<S: AsRef<str>> From<S> for NormalizedString { fn from(s: S) -> Self { Self(s.as_ref().trim().to_lowercase()) } } impl Any for NormalizedString { fn is_any(&self) -> bool { self.0 == "*" } } impl Deref for NormalizedString { type Target = str; fn deref(&self) -> &Self::Target { &self.0 } } impl From<String> for Proxy { fn from(s: String) -> Self { let mut parts = s.split(','); Self { id: parts.next().unwrap().parse().unwrap(), address: parts.next().unwrap().to_owned(), username: parts.next().unwrap().to_owned(), password: parts.next().unwrap().to_owned(), tcp: parts.next().unwrap().parse().unwrap(), udp: parts.next().unwrap().parse().unwrap(), http: parts.next().unwrap().parse().unwrap(), socks5: parts.next().unwrap().parse().unwrap(), datacenter: parts.next().unwrap().parse().unwrap(), residential: parts.next().unwrap().parse().unwrap(), mobile: parts.next().unwrap().parse().unwrap(), pool: match parts.next().unwrap() { "" => None, s => Some(s.into()), }, country: parts.next().unwrap().into(), } } } const RAW_PROXIES_CSV: &str = include_str!("fake_proxies.csv"); impl ProxyDB for InMemProxyDB { fn create(n: usize) -> Self { let mut db = Self::with_capacity(n); for line in RAW_PROXIES_CSV.lines().take(n) { db.append(Proxy::from(line.to_owned())).unwrap(); } db } fn get(&self, id: u64) -> Option<Cow<'_, Proxy>> { self.get_by_id(&id).map(Cow::Borrowed) } fn any_tcp(&self, pool: &str, country: &str) -> Option<Cow<'_, Proxy>> { let mut query = self.query(); query.tcp(true).pool(pool).country(country); query.execute().map(|result| { let proxy_ref = result.any(); Cow::Borrowed(proxy_ref) }) } fn any_socks5_isp(&self, pool: &str, country: &str) -> Option<Cow<'_, Proxy>> { let mut query = self.query(); query .socks5(true) .datacenter(true) .residential(true) .pool(pool) .country(country); query.execute().map(|result| { let proxy_ref = result.any(); Cow::Borrowed(proxy_ref) }) } } #[derive(Debug)] pub(super) struct NaiveProxyDB { proxies: Vec<Proxy>, } impl ProxyDB for NaiveProxyDB { fn create(n: usize) -> Self { let proxies = RAW_PROXIES_CSV .lines() .take(n) .map(|line| Proxy::from(line.to_owned())) .collect(); Self { proxies } } fn get(&self, id: u64) -> Option<Cow<'_, Proxy>> { self.proxies.iter().find(|p| p.id == id).map(Cow::Borrowed) } fn any_tcp(&self, pool: &str, country: &str) -> Option<Cow<'_, Proxy>> { let found_proxies: Vec<_> = self .proxies .iter() .filter(|p| p.tcp && p.pool == Some(pool.into()) && p.country == country.into()) .collect(); if found_proxies.is_empty() { None } else { use rand::Rng; let index = rand::rng().random_range(0..found_proxies.len()); Some(Cow::Borrowed(found_proxies[index])) } } fn any_socks5_isp(&self, pool: &str, country: &str) -> Option<Cow<'_, Proxy>> { let found_proxies: Vec<_> = self .proxies .iter() .filter(|p| { p.socks5 && p.datacenter && p.residential && p.pool == Some(pool.into()) && p.country == country.into() }) .collect(); if found_proxies.is_empty() { None } else { use rand::Rng; let index = rand::rng().random_range(0..found_proxies.len()); Some(Cow::Borrowed(found_proxies[index])) } } } #[non_exhaustive] pub(super) struct SqlLiteProxyDB { conn: sqlite::Connection, } impl std::fmt::Debug for SqlLiteProxyDB { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("SqlLiteProxyDB").finish() } } impl ProxyDB for SqlLiteProxyDB { fn create(n: usize) -> Self { let conn = sqlite::open(":memory:").unwrap(); // create the DB conn.execute( "CREATE TABLE proxies ( id INTEGER PRIMARY KEY, address TEXT NOT NULL, username TEXT NOT NULL, password TEXT NOT NULL, tcp BOOLEAN NOT NULL, udp BOOLEAN NOT NULL, http BOOLEAN NOT NULL, socks5 BOOLEAN NOT NULL, datacenter BOOLEAN NOT NULL, residential BOOLEAN NOT NULL, mobile BOOLEAN NOT NULL, pool TEXT, country TEXT NOT NULL )", ) .unwrap(); // insert the rows for line in RAW_PROXIES_CSV.lines().take(n) { let proxy = Proxy::from(line.to_owned()); let statement = format!( "INSERT INTO proxies (id, address, username, password, tcp, udp, http, socks5, datacenter, residential, mobile, pool, country) VALUES ({}, '{}', '{}', '{}', {}, {}, {}, {}, {}, {}, {}, {}, '{}')", proxy.id, proxy.address, proxy.username, proxy.password, proxy.tcp as i32, proxy.udp as i32, proxy.http as i32, proxy.socks5 as i32, proxy.datacenter as i32, proxy.residential as i32, proxy.mobile as i32, proxy.pool.map_or("NULL".to_owned(), |s| format!("'{}'", s.deref())), proxy.country.0, ); conn.execute(&statement).unwrap(); } Self { conn } } fn get(&self, id: u64) -> Option<Cow<'_, Proxy>> { let statement = format!("SELECT * FROM proxies WHERE id = {} LIMIT 1", id); let row = self .conn .prepare(&statement) .unwrap() .into_iter() .next()? .ok()?; let proxy = proxy_from_sql_row(&row); Some(Cow::Owned(proxy)) } fn any_tcp(&self, pool: &str, country: &str) -> Option<Cow<'_, Proxy>> { let statement = format!( "SELECT * FROM proxies WHERE tcp = 1 AND pool = '{}' AND country = '{}' ORDER BY RANDOM() LIMIT 1", NormalizedString::from(pool).0, NormalizedString::from(country).0 ); let row = self .conn .prepare(&statement) .unwrap() .into_iter() .next()? .ok()?; let proxy = proxy_from_sql_row(&row); Some(Cow::Owned(proxy)) } fn any_socks5_isp(&self, pool: &str, country: &str) -> Option<Cow<'_, Proxy>> { let statement = format!( "SELECT * FROM proxies WHERE socks5 = 1 AND datacenter = 1 AND residential = 1 AND pool = '{}' AND country = '{}' ORDER BY RANDOM() LIMIT 1", NormalizedString::from(pool).0, NormalizedString::from(country).0 ); let row = self .conn .prepare(&statement) .unwrap() .into_iter() .next()? .ok()?; let proxy = proxy_from_sql_row(&row); Some(Cow::Owned(proxy)) } } fn proxy_from_sql_row(row: &Row) -> Proxy { Proxy { id: row.read::<i64, _>("id") as u64, address: row.read::<&str, _>("address").to_owned(), username: row.read::<&str, _>("username").to_owned(), password: row.read::<&str, _>("password").to_owned(), tcp: row.read::<i64, _>("tcp") != 0, udp: row.read::<i64, _>("udp") != 0, http: row.read::<i64, _>("http") != 0, socks5: row.read::<i64, _>("socks5") != 0, datacenter: row.read::<i64, _>("datacenter") != 0, residential: row.read::<i64, _>("residential") != 0, mobile: row.read::<i64, _>("mobile") != 0, pool: row.try_read::<&str, _>("pool").ok().map(Into::into), country: row.read::<&str, _>("country").into(), } }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-macros/src/errors.rs
venndb-macros/src/errors.rs
#![allow(dead_code)] use { proc_macro2::{Span, TokenStream}, quote::ToTokens, std::cell::RefCell, }; /// Produce functions to expect particular literals in `syn::Expr` macro_rules! expect_lit_fn { ($(($fn_name:ident, $syn_type:ident, $variant:ident, $lit_name:literal),)*) => { $( pub fn $fn_name<'a>(&self, e: &'a syn::Expr) -> Option<&'a syn::$syn_type> { if let syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::$variant(inner), .. }) = e { Some(inner) } else { self.unexpected_lit($lit_name, e); None } } )* } } /// Produce functions to expect particular variants of `syn::Meta` macro_rules! expect_meta_fn { ($(($fn_name:ident, $syn_type:ident, $variant:ident, $meta_name:literal),)*) => { $( pub fn $fn_name<'a>(&self, meta: &'a syn::Meta) -> Option<&'a syn::$syn_type> { if let syn::Meta::$variant(inner) = meta { Some(inner) } else { self.unexpected_meta($meta_name, meta); None } } )* } } /// A type for collecting procedural macro errors. #[derive(Default)] pub struct Errors { errors: RefCell<Vec<syn::Error>>, } impl Errors { expect_lit_fn![ (expect_lit_str, LitStr, Str, "string"), (expect_lit_char, LitChar, Char, "character"), (expect_lit_int, LitInt, Int, "integer"), ]; expect_meta_fn![ (expect_meta_word, Path, Path, "path"), (expect_meta_list, MetaList, List, "list"), ( expect_meta_name_value, MetaNameValue, NameValue, "name-value pair" ), ]; pub fn expect_path<'a>(&self, e: &'a syn::Expr) -> Option<&'a syn::Path> { if let syn::Expr::Path(path) = e { Some(&path.path) } else { self.unexpected_value("path", e); None } } fn unexpected_lit(&self, expected: &str, found: &syn::Expr) { fn lit_kind(lit: &syn::Lit) -> &'static str { use syn::Lit::{Bool, Byte, ByteStr, Char, Float, Int, Str, Verbatim}; match lit { Str(_) => "string", ByteStr(_) => "bytestring", Byte(_) => "byte", Char(_) => "character", Int(_) => "integer", Float(_) => "float", Bool(_) => "boolean", Verbatim(_) => "unknown (possibly extra-large integer)", _ => "unknown literal kind", } } if let syn::Expr::Lit(syn::ExprLit { lit, .. }) = found { self.err( found, &[ "Expected ", expected, " literal, found ", lit_kind(lit), " literal", ] .concat(), ) } else { self.err( found, &[ "Expected ", expected, " literal, found non-literal expression.", ] .concat(), ) } } fn unexpected_meta(&self, expected: &str, found: &syn::Meta) { fn meta_kind(meta: &syn::Meta) -> &'static str { use syn::Meta::{List, NameValue, Path}; match meta { Path(_) => "path", List(_) => "list", NameValue(_) => "name-value pair", } } self.err( found, &[ "Expected ", expected, " attribute, found ", meta_kind(found), " attribute", ] .concat(), ) } fn unexpected_value(&self, expected: &str, found: &syn::Expr) { fn expr_kind(expr: &syn::Expr) -> &'static str { use syn::Expr::{ Array, Assign, Async, Await, Binary, Block, Break, Call, Cast, Closure, Const, Continue, Field, ForLoop, Group, If, Index, Infer, Let, Lit, Loop, Macro, Match, MethodCall, Paren, Path, Range, Reference, Repeat, Return, Struct, Try, TryBlock, Tuple, Unary, Unsafe, Verbatim, While, Yield, }; match expr { Array(_) => "array", Assign(_) => "assignment", Async(_) => "async block", Await(_) => "await", Binary(_) => "binary operation", Block(_) => "block", Break(_) => "break", Call(_) => "function call", Cast(_) => "cast", Closure(_) => "closure", Const(_) => "const", Continue(_) => "continue", Field(_) => "field access", ForLoop(_) => "for loop", Group(_) => "group", If(_) => "if", Index(_) => "index", Infer(_) => "inferred type", Let(_) => "let", Lit(_) => "literal", Loop(_) => "loop", Macro(_) => "macro", Match(_) => "match", MethodCall(_) => "method call", Paren(_) => "parentheses", Path(_) => "path", Range(_) => "range", Reference(_) => "reference", Repeat(_) => "repeat", Return(_) => "return", Struct(_) => "struct", Try(_) => "try", TryBlock(_) => "try block", Tuple(_) => "tuple", Unary(_) => "unary operation", Unsafe(_) => "unsafe block", Verbatim(_) => "verbatim", While(_) => "while", Yield(_) => "yield", _ => "unknown expression kind", } } self.err( found, &[ "Expected ", expected, " attribute, found ", found.to_token_stream().to_string().as_str(), " attribute (", expr_kind(found), ")", ] .concat(), ) } /// Issue an error relating to a particular `Spanned` structure. pub fn err(&self, spanned: &impl syn::spanned::Spanned, msg: &str) { self.err_span(spanned.span(), msg); } /// Issue an error relating to a particular `Span`. pub fn err_span(&self, span: Span, msg: &str) { self.push(syn::Error::new(span, msg)); } /// Issue an error spanning over the given syntax tree node. pub fn err_span_tokens<T: ToTokens>(&self, tokens: T, msg: &str) { self.push(syn::Error::new_spanned(tokens, msg)); } /// Push a `syn::Error` onto the list of errors to issue. pub fn push(&self, err: syn::Error) { self.errors.borrow_mut().push(err); } /// Convert a `syn::Result` to an `Option`, logging the error if present. pub fn ok<T>(&self, r: syn::Result<T>) -> Option<T> { match r { Ok(v) => Some(v), Err(e) => { self.push(e); None } } } } impl ToTokens for Errors { /// Convert the errors into tokens that, when emit, will cause /// the user of the macro to receive compiler errors. fn to_tokens(&self, tokens: &mut TokenStream) { tokens.extend(self.errors.borrow().iter().map(|e| e.to_compile_error())); } }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-macros/src/field.rs
venndb-macros/src/field.rs
//! Struct Field Info use crate::{ errors::Errors, parse_attrs::{FieldAttrs, FieldKind}, }; use quote::format_ident; use syn::Ident; /// A field of a `#![derive(VennDB)]` struct with attributes and some other /// notable metadata appended. pub struct StructField<'a> { /// The original parsed field field: &'a syn::Field, /// The parsed attributes of the field attrs: FieldAttrs<'a>, /// The field name. This is contained optionally inside `field`, /// but is duplicated non-optionally here to indicate that all field that /// have reached this point must have a field name, and it no longer /// needs to be unwrapped. name: &'a syn::Ident, } pub enum FieldInfo<'a> { Key(KeyField<'a>), Filter(FilterField<'a>), FilterMap(FilterMapField<'a>), } pub struct KeyField<'a> { pub name: &'a Ident, pub ty: &'a syn::Type, } impl<'a> KeyField<'a> { pub fn name(&'a self) -> &'a Ident { self.name } pub fn ty(&'a self) -> &'a syn::Type { self.ty } pub fn method_name(&self) -> Ident { format_ident!("get_by_{}", self.name) } pub fn map_name(&self) -> Ident { format_ident!("map_{}", self.name) } } pub struct FilterField<'a> { pub name: &'a Ident, pub optional: bool, } impl<'a> FilterField<'a> { pub fn name(&'a self) -> &'a Ident { self.name } pub fn filter_name(&self) -> Ident { format_ident!("filter_{}", self.name) } pub fn filter_not_name(&self) -> Ident { format_ident!("filter_not_{}", self.name) } } impl<'a> StructField<'a> { /// Attempts to parse a field of a `#[derive(VennDB)]` struct, pulling out the /// fields required for code generation. pub fn new(_errors: &Errors, field: &'a syn::Field, attrs: FieldAttrs<'a>) -> Option<Self> { let name = field.ident.as_ref().expect("missing ident for named field"); Some(StructField { field, attrs, name }) } /// Return the method name for this struct field. pub fn info(&self) -> Option<FieldInfo<'_>> { self.attrs.kind.as_ref().map(|kind| match kind { FieldKind::Key => FieldInfo::Key(KeyField { name: self.name, ty: self.attrs.option_ty.unwrap_or(&self.field.ty), }), FieldKind::Filter => FieldInfo::Filter(FilterField { name: self.name, optional: self.attrs.option_ty.is_some(), }), FieldKind::FilterMap { any } => FieldInfo::FilterMap(FilterMapField { name: self.name, ty: self.attrs.option_ty.unwrap_or(&self.field.ty), optional: self.attrs.option_ty.is_some(), any: *any, }), }) } } pub struct FilterMapField<'a> { pub name: &'a Ident, pub ty: &'a syn::Type, pub optional: bool, pub any: bool, } impl<'a> FilterMapField<'a> { pub fn name(&'a self) -> &'a Ident { self.name } pub fn ty(&'a self) -> &'a syn::Type { self.ty } pub fn filter_map_name(&self) -> Ident { format_ident!("filter_map_{}", self.name) } pub fn filter_vec_name(&self) -> Ident { format_ident!("filter_vec_{}", self.name) } pub fn filter_any_name(&self) -> Option<Ident> { if self.any { Some(format_ident!("filter_any_{}", self.name)) } else { None } } }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-macros/src/lib.rs
venndb-macros/src/lib.rs
#![forbid(unsafe_code)] mod errors; mod field; mod generate_db; mod parse_attrs; use errors::Errors; use field::StructField; use parse_attrs::{FieldAttrs, TypeAttrs}; use proc_macro2::TokenStream; use quote::{ToTokens, format_ident, quote}; /// Derive macro generating VennDB functionality for this struct. /// /// See <https://docs.rs/venndb> for more information on how to use it. /// Or check out the README and usage tests in [the repository][repo] of this macro. /// /// [repo]: https://github.com/plabayo/venndb #[proc_macro_derive(VennDB, attributes(venndb))] pub fn venndb(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let ast = syn::parse_macro_input!(input as syn::DeriveInput); let ts: TokenStream = impl_from_args(&ast); ts.into() } /// Transform the input into a token stream containing any generated implementations, /// as well as all errors that occurred. fn impl_from_args(input: &syn::DeriveInput) -> TokenStream { let errors = &Errors::default(); let type_attrs = &TypeAttrs::parse(errors, input); let mut output_tokens = match &input.data { syn::Data::Struct(ds) => impl_from_args_struct( errors, &input.vis, &input.ident, type_attrs, &input.generics, ds, ), syn::Data::Enum(_) => { errors.err(input, "`#[derive(VennDB)]` cannot be applied to enums"); TokenStream::new() } syn::Data::Union(_) => { errors.err(input, "`#[derive(VennDB)]` cannot be applied to unions"); TokenStream::new() } }; errors.to_tokens(&mut output_tokens); output_tokens } /// Implements `VennDB` for a `#[derive(VennDB)]` struct. fn impl_from_args_struct( errors: &Errors, vis: &syn::Visibility, name: &syn::Ident, type_attrs: &TypeAttrs, _generic_args: &syn::Generics, ds: &syn::DataStruct, ) -> TokenStream { let fields = match &ds.fields { syn::Fields::Named(fields) => fields, syn::Fields::Unnamed(_) => { errors.err( &ds.struct_token, "`#![derive(VennDB)]` is not currently supported on tuple structs", ); return TokenStream::new(); } syn::Fields::Unit => { errors.err( &ds.struct_token, "#![derive(VennDB)]` cannot be applied to unit structs", ); return TokenStream::new(); } }; let fields: Vec<_> = fields .named .iter() .filter_map(|field| { let attrs = FieldAttrs::parse(errors, field); StructField::new(errors, field, attrs) }) .collect(); let name_db = match &type_attrs.name { Some(name) => format_ident!("{}", name.value()), None => format_ident!("{}DB", name), }; let db_code = generate_db::generate_db( name, &name_db, type_attrs.validator.as_ref(), vis, &fields[..], ); quote! { #db_code } }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-macros/src/generate_db.rs
venndb-macros/src/generate_db.rs
use crate::field::{FieldInfo, StructField}; use proc_macro2::TokenStream; use quote::{ToTokens, format_ident, quote}; use syn::{Ident, Path}; /// Generate the venndb logic pub fn generate_db( name: &Ident, name_db: &Ident, validator: Option<&Path>, vis: &syn::Visibility, fields: &[StructField], ) -> TokenStream { let fields: Vec<_> = fields.iter().filter_map(StructField::info).collect(); let db_error = DbError::new(validator, &fields[..]); let db_struct = generate_db_struct(name, name_db, vis, &fields[..]); let db_struct_methods = generate_db_struct_methods(name, name_db, validator, vis, &db_error, &fields[..]); let db_query = generate_query_struct(name, name_db, vis, &fields[..]); let db_error_definitions = db_error.generate_definitions(name_db, vis); quote! { #db_struct #db_struct_methods #db_query #db_error_definitions } } fn generate_db_struct( name: &Ident, name_db: &Ident, vis: &syn::Visibility, fields: &[FieldInfo], ) -> TokenStream { let db_fields: Vec<_> = fields .iter() .map(|info| match info { FieldInfo::Key(field) => { let field_name = field.map_name(); let ty: &syn::Type = field.ty(); quote! { #field_name: ::venndb::__internal::HashMap<#ty, usize>, } } FieldInfo::Filter(field) => { let field_name = field.filter_name(); let field_name_not = field.filter_not_name(); quote! { #field_name: ::venndb::__internal::BitVec, #field_name_not: ::venndb::__internal::BitVec, } } FieldInfo::FilterMap(field) => { let filter_map_name = field.filter_map_name(); let filter_vec_name = field.filter_vec_name(); let filter_any = match field.filter_any_name() { Some(name) => quote! { #name: ::venndb::__internal::BitVec, }, None => quote! {}, }; let ty: &syn::Type = field.ty(); quote! { #filter_map_name: ::venndb::__internal::HashMap<#ty, usize>, #filter_vec_name: ::std::vec::Vec<::venndb::__internal::BitVec>, #filter_any } } }) .collect(); let db_doc = format!( "An in-memory database for storing instances of [`{}`], generated by `#[derive(VennDB)]`.", name ); quote! { #[doc=#db_doc] #[derive(Debug, Default)] #vis struct #name_db { rows: Vec<#name>, #(#db_fields)* } } } fn generate_db_struct_methods( name: &Ident, name_db: &Ident, validator: Option<&Path>, vis: &syn::Visibility, db_error: &DbError, fields: &[FieldInfo], ) -> TokenStream { let method_new = generate_db_struct_method_new(name, name_db, vis, fields); let method_with_capacity = generate_db_struct_method_with_capacity(name, name_db, vis, fields); let method_from_rows = generate_db_struct_method_from_rows(name, name_db, vis, db_error, fields); let field_methods = generate_db_struct_field_methods(name, name_db, vis, fields); let method_append = generate_db_struct_method_append(name, name_db, validator, vis, db_error, fields); quote! { #[allow(clippy::unused_unit)] impl #name_db { #method_new #method_with_capacity #method_from_rows /// Return the number of rows in the database. #vis fn len(&self) -> usize { self.rows.len() } /// Return the capacity of the database, /// which is automatically grown as needed. #vis fn capacity(&self) -> usize { self.rows.capacity() } /// Return `true` if the database is empty. #vis fn is_empty(&self) -> bool { self.rows.is_empty() } /// Return an iterator over the rows in the database. #vis fn iter(&self) -> impl ::std::iter::Iterator<Item = &#name> { self.rows.iter() } #field_methods #method_append /// Consumes the database and returns the rows. #vis fn into_rows(self) -> ::std::vec::Vec<#name> { self.rows } } } } fn generate_db_struct_method_new( name: &Ident, _name_db: &Ident, vis: &syn::Visibility, fields: &[FieldInfo], ) -> TokenStream { let method_doc = format!( "Construct a new empty database for storing instances of [`{}`].", name ); let db_fields_initialisers: Vec<_> = fields .iter() .map(|info| match info { FieldInfo::Key(field) => { let name = field.map_name(); quote! { #name: ::venndb::__internal::HashMap::new(), } } FieldInfo::Filter(field) => { let name = field.filter_name(); let name_not = field.filter_not_name(); quote! { #name: ::venndb::__internal::BitVec::new(), #name_not: ::venndb::__internal::BitVec::new(), } } FieldInfo::FilterMap(field) => { let filter_map_name = field.filter_map_name(); let filter_vec_name = field.filter_vec_name(); let filter_any = match field.filter_any_name() { Some(name) => quote! { #name: ::venndb::__internal::BitVec::new(), }, None => quote! {}, }; quote! { #filter_map_name: ::venndb::__internal::HashMap::new(), #filter_vec_name: ::std::vec::Vec::new(), #filter_any } } }) .collect(); quote! { #[doc=#method_doc] #vis fn new() -> Self { Self { rows: Vec::new(), #(#db_fields_initialisers)* } } } } fn generate_db_struct_method_with_capacity( name: &Ident, _name_db: &Ident, vis: &syn::Visibility, fields: &[FieldInfo], ) -> TokenStream { let method_doc = format!( "Construct a new empty database for storing instances of [`{}`] with a given capacity.", name ); let db_fields_initialisers_with_capacity: Vec<_> = fields .iter() .map(|info| match info { FieldInfo::Key(field) => { let name = field.map_name(); quote! { #name: ::venndb::__internal::HashMap::with_capacity(capacity), } } FieldInfo::Filter(field) => { let name = field.filter_name(); let name_not = field.filter_not_name(); quote! { #name: ::venndb::__internal::BitVec::with_capacity(capacity), #name_not: ::venndb::__internal::BitVec::with_capacity(capacity), } } FieldInfo::FilterMap(field) => { let filter_map_name = field.filter_map_name(); let filter_vec_name = field.filter_vec_name(); let filter_any = match field.filter_any_name() { Some(name) => quote! { #name: ::venndb::__internal::BitVec::with_capacity(capacity), }, None => quote! {}, }; quote! { #filter_map_name: ::venndb::__internal::HashMap::with_capacity(capacity), #filter_vec_name: ::std::vec::Vec::with_capacity(capacity), #filter_any } } }) .collect(); quote! { #[doc=#method_doc] #vis fn with_capacity(capacity: usize) -> Self { Self { rows: Vec::new(), #(#db_fields_initialisers_with_capacity)* } } } } fn generate_db_struct_method_from_rows( name: &Ident, name_db: &Ident, vis: &syn::Visibility, db_error: &DbError, _fields: &[FieldInfo], ) -> TokenStream { let method_doc = format!( "Construct a new database from the given set of [`{}`] rows.", name ); let method_iter_doc = format!( "Construct a new database from the given iterator of items that can be turned into [`{}`] instances.", name ); let return_type = db_error.generate_fn_output(name_db, quote! { ::std::vec::Vec<#name> }, quote! { Self }); let append_internal_call = db_error.generate_fn_error_kind_usage( name_db, quote! { db.append_internal(row, index) }, quote! { rows }, ); let fn_result = db_error.generate_fn_return_value_ok(quote! { db }); quote! { #[doc=#method_doc] #vis fn from_rows(rows: ::std::vec::Vec<#name>) -> #return_type { let mut db = Self::with_capacity(rows.len()); for (index, row) in rows.iter().enumerate() { #append_internal_call } db.rows = rows; #fn_result } #[doc=#method_iter_doc] #vis fn from_iter<I, Item>(iter: I) -> #return_type where I: ::std::iter::IntoIterator<Item = Item>, Item: ::std::convert::Into<#name>, { let rows: ::std::vec::Vec<#name> = iter.into_iter().map(::std::convert::Into::into).collect(); Self::from_rows(rows) } } } fn generate_db_struct_method_append( name: &Ident, name_db: &Ident, validator: Option<&Path>, vis: &syn::Visibility, db_error: &DbError, fields: &[FieldInfo], ) -> TokenStream { let method_doc = format!("Append a new instance of [`{}`] to the database.", name); let method_iter_doc = format!( "Extend the database with the given iterator of items that can be turned into [`{}`] instances.", name ); let validator_check = match validator { Some(validator) => { let err = DbError::generate_invalid_row_error_kind_creation(name_db); quote! { if !#validator(&data) { return Err(#err); } } } None => quote! {}, }; let db_field_insert_checks: Vec<_> = fields .iter() .filter_map(|info| match info { FieldInfo::Key(field) => { let map_name = field.map_name(); let field_name = field.name(); let entry_field_name = format_ident!("entry_{}", field_name); let db_duplicate_error_kind_creation = DbError::generate_duplicate_key_error_kind_creation( name_db, ); Some(quote! { let #entry_field_name = match self.#map_name.entry(data.#field_name.clone()) { ::venndb::__internal::hash_map::Entry::Occupied(_) => return Err(#db_duplicate_error_kind_creation), ::venndb::__internal::hash_map::Entry::Vacant(entry) => entry, }; }) } FieldInfo::Filter(_) => None, FieldInfo::FilterMap(_) => None, }) .collect(); let db_field_insert_commits: Vec<_> = fields .iter() .map(|info| match info { FieldInfo::Key(field) => { let field_name = field.name(); let entry_field_name = format_ident!("entry_{}", field_name); quote! { #entry_field_name.insert(index); } } FieldInfo::Filter(field) => { let name = field.name(); let field_name = field.filter_name(); let field_name_not = field.filter_not_name(); if field.optional { quote! { match data.#name { Some(value) => { self.#field_name.push(value); self.#field_name_not.push(!value); } None => { self.#field_name.push(false); self.#field_name_not.push(false); } } } } else { quote! { self.#field_name.push(data.#name); self.#field_name_not.push(!data.#name); } } } FieldInfo::FilterMap(field) => { let name = field.name(); let filter_map_name = field.filter_map_name(); let filter_vec_name = field.filter_vec_name(); let filter_index = format_ident!("{}_index", filter_vec_name); let filter_any_backfill = match field.filter_any_name() { Some(name) => quote! { let bv = &bv | &self.#name; }, None => quote! {}, }; let filter_any_register = match field.filter_any_name() { Some(any_vec) => if field.optional { quote! { self.#any_vec.push(data.#name.as_ref().map(::venndb::Any::is_any).unwrap_or_default()); } } else { quote! { self.#any_vec.push(::venndb::Any::is_any(&data.#name)); } }, None => quote! {}, }; let register_rows = if field.optional { quote! { for (i, row) in self.#filter_vec_name.iter_mut().enumerate() { row.push(Some(i) == #filter_index); } } } else { quote! { for (i, row) in self.#filter_vec_name.iter_mut().enumerate() { row.push(i == #filter_index); } } }; let is_any_value = if field.optional { quote! { data.#name.as_ref().map(|v| ::venndb::Any::is_any(v)).unwrap_or_default() } } else { quote! { ::venndb::Any::is_any(&data.#name) } }; let register_rows = if field.any { quote! { if #is_any_value { for row in self.#filter_vec_name.iter_mut() { row.push(true); } } else { #register_rows } } } else { register_rows }; if field.optional { quote! { #filter_any_register let #filter_index = match data.#name.clone() { Some(value) => { Some(match self.#filter_map_name.entry(value) { ::venndb::__internal::hash_map::Entry::Occupied(entry) => *entry.get(), ::venndb::__internal::hash_map::Entry::Vacant(entry) => { let vec_index = self.#filter_vec_name.len(); entry.insert(vec_index); let bv = ::venndb::__internal::BitVec::repeat(false, index); #filter_any_backfill self.#filter_vec_name.push(bv); vec_index } }) }, None => None, }; #register_rows } } else { quote! { #filter_any_register let #filter_index = match self.#filter_map_name.entry(data.#name.clone()) { ::venndb::__internal::hash_map::Entry::Occupied(entry) => *entry.get(), ::venndb::__internal::hash_map::Entry::Vacant(entry) => { let vec_index = self.#filter_vec_name.len(); entry.insert(vec_index); let bv = ::venndb::__internal::BitVec::repeat(false, index); #filter_any_backfill self.#filter_vec_name.push(bv); vec_index } }; #register_rows } } } }) .collect(); let append_return_type = db_error.generate_fn_output(name_db, quote! { #name }, quote! { () }); let extend_return_type = db_error.generate_fn_output(name_db, quote! { (#name, I::IntoIter) }, quote! { () }); let append_kind_return_type = db_error.generate_fn_kind_output(name_db, quote! { () }); let append_internal_call = db_error.generate_fn_error_kind_usage( name_db, quote! { self.append_internal(&data, index) }, quote! { data }, ); let extend_append_internal_call = db_error.generate_fn_error_kind_usage( name_db, quote! { self.append_internal(&data, index) }, quote! { (data, iter) }, ); let append_return_output = db_error.generate_fn_return_value_ok(quote! { () }); quote! { #[doc=#method_doc] #vis fn append(&mut self, data: impl ::std::convert::Into<#name>) -> #append_return_type { let index = self.rows.len(); let data = data.into(); #append_internal_call self.rows.push(data); #append_return_output } #[doc=#method_iter_doc] #vis fn extend<I, Item>(&mut self, iter: I) -> #extend_return_type where I: ::std::iter::IntoIterator<Item = Item>, Item: ::std::convert::Into<#name>, { let mut index = self.rows.len(); let mut iter = iter.into_iter(); for item in &mut iter { let data = item.into(); #extend_append_internal_call self.rows.push(data); index += 1; } #append_return_output } fn append_internal(&mut self, data: &#name, index: usize) -> #append_kind_return_type { #validator_check #(#db_field_insert_checks)* #(#db_field_insert_commits)* #append_return_output } } } fn generate_db_struct_field_methods( name: &Ident, _name_db: &Ident, vis: &syn::Visibility, fields: &[FieldInfo], ) -> TokenStream { let db_key_methods: Vec<_> = fields .iter() .filter_map(|info| match info { FieldInfo::Key(field) => { let map_name = field.map_name(); let ty = field.ty(); let method_name = field.method_name(); let doc = format!( "Get an instance of [`{}`] by its key `{}`, if it exists in the database.", name, field.name() ); Some(quote! { #[doc=#doc] #vis fn #method_name<Q>(&self, key: &Q) -> ::std::option::Option<&#name> where #ty: ::std::borrow::Borrow<Q>, Q: ::std::hash::Hash + ::std::cmp::Eq + ?::std::marker::Sized, { self.#map_name.get(key).and_then(|index| self.rows.get(*index)) } }) } FieldInfo::Filter(_) => None, FieldInfo::FilterMap(_) => None, }) .collect(); quote! { #(#db_key_methods)* } } fn generate_query_struct( name: &Ident, name_db: &Ident, vis: &syn::Visibility, fields: &[FieldInfo], ) -> TokenStream { let name_query = format_ident!("{}Query", name_db); let query_fields: Vec<_> = fields .iter() .filter_map(|info| match info { FieldInfo::Filter(field) => { let name = field.name(); Some(quote! { #name: Option<bool>, }) } FieldInfo::FilterMap(field) => { let name = field.name(); let ty = field.ty(); Some(quote! { #name: Vec<#ty>, }) } FieldInfo::Key(_) => None, }) .collect(); if query_fields.is_empty() { return TokenStream::new(); } let query_field_initialisers: Vec<_> = fields .iter() .filter_map(|info| match info { FieldInfo::Filter(field) => { let name = field.name(); Some(quote! { #name: None, }) } FieldInfo::FilterMap(field) => { let name = field.name(); Some(quote! { #name: Vec::new(), }) } FieldInfo::Key(_) => None, }) .collect(); let query_impl = generate_query_struct_impl(name, name_db, &name_query, vis, fields); let query_doc = format!( "A query object for filtering instances of [`{}`], within [`{}`], generated by `#[derive(VennDB)]`.", name, name_db ); let query_method_doc = format!( "Return a new [`{}`] for filtering instances of [`{}`].", name_query, name ); quote! { #[doc=#query_doc] #[derive(Debug)] #vis struct #name_query<'a> { db: &'a #name_db, #(#query_fields)* } impl<'a> #name_query<'a> { fn new(db: &'a #name_db) -> Self { Self { db, #(#query_field_initialisers)* } } } #query_impl impl #name_db { #[doc=#query_method_doc] #vis fn query(&self) -> #name_query { #name_query::new(&self) } } } } fn generate_query_struct_impl( name: &Ident, _name_db: &Ident, name_query: &Ident, vis: &syn::Visibility, fields: &[FieldInfo], ) -> TokenStream { let filter_setters: Vec<_> = fields .iter() .filter_map(|info| match info { FieldInfo::Filter(field) => { let name = field.name(); let doc = format!("Enable and set the `{}` filter.", name); Some(quote! { #[doc=#doc] #vis fn #name(&mut self, value: bool) -> &mut Self { self.#name = Some(value); self } }) } FieldInfo::FilterMap(field) => { let name = field.name(); let ty = field.ty(); let doc = format!( "Enable and set the `{}` filter-map with the given option.", name ); Some(quote! { #[doc=#doc] #vis fn #name(&mut self, value: impl::std::convert::Into<#ty>) -> &mut Self { self.#name.push(value.into()); self } }) } FieldInfo::Key(_) => None, }) .collect(); let filter_resetters: Vec<_> = fields .iter() .filter_map(|info| match info { FieldInfo::Filter(field) => { let name = field.name(); Some(quote! { self.#name = None; }) } FieldInfo::FilterMap(field) => { let name = field.name(); Some(quote! { self.#name.clear(); }) } FieldInfo::Key(_) => None, }) .collect(); let filters: Vec<_> = fields .iter() .filter_map(|info| match info { FieldInfo::Filter(field) => { let name = field.name(); let filter_name: Ident = field.filter_name(); let filter_not_name: Ident = field.filter_not_name(); Some(quote! { // Filter by the filter below. Only if it is defined as Some(_). // Using negation if negation is desired, and // the regular filter otherwise. match self.#name { Some(true) => filter &= &self.db.#filter_name, Some(false) => filter &= &self.db.#filter_not_name, None => (), }; }) } FieldInfo::FilterMap(field) => { let name = field.name(); let filter_map_name: Ident = field.filter_map_name(); let filter_vec_name: Ident = field.filter_vec_name(); // used if only one value is set, making it more efficient let value_filter_one = match field.filter_any_name() { Some(filter_any_vec) => quote! { if ::venndb::Any::is_any(&value) { filter &= &self.db.#filter_any_vec; } else { match self.db.#filter_map_name.get(value) { Some(index) => filter &= &self.db.#filter_vec_name[*index], None => filter &= &self.db.#filter_any_vec, }; } }, None => quote! { match self.db.#filter_map_name.get(value) { Some(index) => filter &= &self.db.#filter_vec_name[*index], None => return None, }; }, }; // used if multiple values are set, requires an extra alloc let value_filter_multi = match field.filter_any_name() { Some(filter_any_vec) => quote! { if ::venndb::Any::is_any(&value) { inter_filter |= &self.db.#filter_any_vec; } else { match self.db.#filter_map_name.get(value) { Some(index) => inter_filter |= &self.db.#filter_vec_name[*index], None => inter_filter |= &self.db.#filter_any_vec, }; } }, None => quote! { match self.db.#filter_map_name.get(value) { Some(index) => inter_filter |= &self.db.#filter_vec_name[*index], None => return None, }; }, }; // apply the filter Some(quote! { // Filter by the filterm ap below, only if it is defined as Some(_). // If there is no filter matched to the given value then the search is over, // and we early return None. match &self.#name.len() { 0 => (), 1 => { let value = &self.#name[self.#name.len() - 1]; #value_filter_one } _ => { let mut inter_filter = ::venndb::__internal::BitVec::repeat(false, self.db.rows.len()); for value in &self.#name { #value_filter_multi } filter &= &inter_filter; } } }) } FieldInfo::Key(_) => None, }) .collect(); let name_query_result = format_ident!("{}Result", name_query); let name_query_result_doc = format!( "Contains a reference to the found instances of [`{}`] if there is at least one found, queried using [`{}`], generated by `#[derive(VennDB)]`.", name, name_query ); let name_query_result_kind = format_ident!("{}Kind", name_query_result); let name_query_result_iter = format_ident!("{}Iter", name_query_result); let name_query_result_iter_kind = format_ident!("{}Kind", name_query_result_iter); let name_query_result_iter_doc = format!( "An iterator over the found instances of [`{}`] queried using [`{}`], generated by `#[derive(VennDB)]`.", name, name_query ); let query_result_method_doc_first = format!( "Return the first instance of [`{}`] found by the query.", name ); let query_result_method_doc_any = format!( "Return a random instance of [`{}`] found by the query.", name ); let query_result_method_doc_iter = format!( "Return an iterator over the instances of [`{}`] found by the query.", name ); quote! { impl<'a> #name_query<'a> { #(#filter_setters)* /// Reset the query to its initial values. #vis fn reset(&mut self) -> &mut Self { #(#filter_resetters)* self } /// Execute the query on the database, returning an iterator over the results. #vis fn execute(&self) -> Option<#name_query_result<'a>> { let mut filter = ::venndb::__internal::BitVec::repeat(true, self.db.rows.len()); #(#filters)* if filter.any() { Some(#name_query_result { rows: &self.db.rows, references: #name_query_result_kind::Bits(filter), }) } else { None } } } #[doc=#name_query_result_doc] #[derive(Debug)] #vis struct #name_query_result<'a> { rows: &'a [#name], references: #name_query_result_kind, } #[derive(Debug)] enum #name_query_result_kind { Bits(::venndb::__internal::BitVec), Indices(::std::vec::Vec<usize>), } impl<'a> #name_query_result<'a> { #[doc=#query_result_method_doc_first] #vis fn first(&self) -> &'a #name { let index = match &self.references { #name_query_result_kind::Bits(v) => v.iter_ones().next().unwrap(), #name_query_result_kind::Indices(i) => i[0], }; &self.rows[index] } #[doc=#query_result_method_doc_any] #vis fn any(&self) -> &'a #name { let index = match &self.references { #name_query_result_kind::Bits(v) => { let n = ::venndb::__internal::rand_range(v.count_ones()); v.iter_ones().nth(n).unwrap() } #name_query_result_kind::Indices(i) => { let n = ::venndb::__internal::rand_range(i.len()); i[n]
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
true
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-macros/src/parse_attrs.rs
venndb-macros/src/parse_attrs.rs
use quote::ToTokens; use crate::errors::Errors; /// Attributes applied to a field of a `#![derive(VennDB)]` struct. #[derive(Default)] pub struct FieldAttrs<'a> { pub kind: Option<FieldKind>, pub option_ty: Option<&'a syn::Type>, } pub enum FieldKind { Key, Filter, FilterMap { any: bool }, } impl<'a> FieldAttrs<'a> { pub fn parse(errors: &Errors, field: &'a syn::Field) -> Self { let mut this = Self::default(); let mut skipped = false; let mut is_key = false; let mut is_filter = false; let mut is_any = false; for attr in &field.attrs { let ml: Vec<_> = if let Some(ml) = venndb_attr_to_meta_list(errors, attr) { ml.into_iter().collect() } else { continue; }; if ml.iter().any(|meta| meta.path().is_ident("skip")) { // check first to avoid any other invalid combinations skipped = true; } else { for meta in ml { let name = meta.path(); if name.is_ident("key") { if is_filter { errors.err( &meta, concat!( "Invalid field-level `venndb` attribute\n", "Cannot have both `key` and `filter`", ), ); } else if is_any { errors.err( &meta, concat!( "Invalid field-level `venndb` attribute\n", "Cannot have both `key` and `any`", ), ); } else { is_key = true; } } else if name.is_ident("filter") { if is_key { errors.err( &meta, concat!( "Invalid field-level `venndb` attribute\n", "Cannot have both `key` and `filter`", ), ); } else { is_filter = true; } } else if name.is_ident("any") { if is_key { errors.err( &meta, concat!( "Invalid field-level `venndb` attribute\n", "Cannot have both `key` and `any`", ), ); } else { is_any = true; } } else { errors.err( &meta, concat!( "Invalid field-level `venndb` attribute\n", "Expected one of: `key`", ), ); } } } } this.option_ty = ty_inner(&["Option"], &field.ty); if skipped { this.kind = None; } else if is_key { if this.option_ty.is_some() { errors.err( &field.ty, concat!( "Invalid field-level `venndb` attribute\n", "`key` fields cannot be `Option`", ), ); } else { this.kind = Some(FieldKind::Key); } } else if is_bool(this.option_ty.unwrap_or(&field.ty)) { if is_any { errors.err( &field.ty, concat!( "Invalid field-level `venndb` attribute\n", "`any` cannot be used with `bool`", ), ); } else { this.kind = Some(FieldKind::Filter); } } else if is_filter { // bool filters are to be seen as regular filters, even when made explicitly so! this.kind = Some(FieldKind::FilterMap { any: is_any }); } else if is_any { errors.err( &field.ty, concat!( "Invalid field-level `venndb` attribute\n", "`any` can only be used with `filter`", ), ); } this } } fn is_bool(ty: &syn::Type) -> bool { if let syn::Type::Path(syn::TypePath { path, .. }) = ty { path.is_ident("bool") } else { if ty.to_token_stream().to_string().contains("bool") { panic!( "Expected bool, found {:?}", ty.to_token_stream().to_string() ); } false } } /// Represents a `#[derive(VennDB)]` type's top-level attributes. #[derive(Default)] pub struct TypeAttrs { pub name: Option<syn::LitStr>, pub validator: Option<syn::Path>, } impl TypeAttrs { /// Parse top-level `#[venndb(...)]` attributes pub fn parse(errors: &Errors, derive_input: &syn::DeriveInput) -> Self { let mut this = Self::default(); for attr in &derive_input.attrs { let ml = if let Some(ml) = venndb_attr_to_meta_list(errors, attr) { ml } else { continue; }; for meta in ml { let name = meta.path(); if name.is_ident("name") { if let Some(m) = errors.expect_meta_name_value(&meta) { this.name = errors.expect_lit_str(&m.value).cloned(); } } else if name.is_ident("validator") { if let Some(m) = errors.expect_meta_name_value(&meta) { this.validator = errors.expect_path(&m.value).cloned(); } } else { errors.err( &meta, concat!( "Invalid field-level `venndb` attribute\n", "Expected one of: `name`", ), ); } } } this } } /// Filters out non-`#[venndb(...)]` attributes and converts to a sequence of `syn::Meta`. fn venndb_attr_to_meta_list( errors: &Errors, attr: &syn::Attribute, ) -> Option<impl IntoIterator<Item = syn::Meta>> { if !is_venndb_attr(attr) { return None; } let ml = errors.expect_meta_list(&attr.meta)?; errors.ok(ml.parse_args_with( syn::punctuated::Punctuated::<syn::Meta, syn::Token![,]>::parse_terminated, )) } // Whether the attribute is one like `#[<name> ...]` fn is_matching_attr(name: &str, attr: &syn::Attribute) -> bool { attr.path().segments.len() == 1 && attr.path().segments[0].ident == name } /// Checks for `#[venndb ...]` fn is_venndb_attr(attr: &syn::Attribute) -> bool { is_matching_attr("venndb", attr) } /// Returns `Some(T)` if a type is `wrapper_name<T>` for any `wrapper_name` in `wrapper_names`. fn ty_inner<'a>(wrapper_names: &[&str], ty: &'a syn::Type) -> Option<&'a syn::Type> { if let syn::Type::Path(path) = ty { if path.qself.is_some() { return None; } // Since we only check the last path segment, it isn't necessarily the case that // we're referring to `std::vec::Vec` or `std::option::Option`, but there isn't // a fool proof way to check these since name resolution happens after macro expansion, // so this is likely "good enough" (so long as people don't have their own types called // `Option` or `Vec` that take one generic parameter they're looking to parse). let last_segment = path.path.segments.last()?; if !wrapper_names.iter().any(|name| last_segment.ident == *name) { return None; } if let syn::PathArguments::AngleBracketed(gen_args) = &last_segment.arguments { let generic_arg = gen_args.args.first()?; if let syn::GenericArgument::Type(ty) = &generic_arg { return Some(ty); } } } None }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/src/main.rs
venndb-usage/src/main.rs
#![allow(dead_code)] use venndb::{Any, VennDB}; #[derive(Debug, VennDB)] #[venndb(validator = employee_validator)] pub struct Employee { #[venndb(key)] id: u32, #[venndb(key)] name: String, is_manager: bool, is_admin: bool, is_active: bool, #[venndb(filter, any)] department: Department, } fn employee_validator(employee: &Employee) -> bool { employee.id > 0 } #[derive(Debug)] pub struct L1Engineer { id: u32, name: String, } impl From<L1Engineer> for Employee { fn from(engineer: L1Engineer) -> Employee { Employee { id: engineer.id, name: engineer.name, is_manager: false, is_admin: false, is_active: true, department: Department::Engineering, } } } #[derive(Debug, PartialEq, Eq, Hash, Clone)] pub enum Department { Any, Engineering, Sales, Marketing, HR, } impl Any for Department { fn is_any(&self) -> bool { self == &Department::Any } } fn main() { let employee = Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }; println!("employee: {:#?}", employee); let _db = EmployeeDB::new(); } #[cfg(test)] mod tests_v0_1 { use super::*; #[test] fn test_employee_db_empty() { let db = EmployeeDB::new(); assert_eq!(db.len(), 0); assert_eq!(db.capacity(), 0); } #[test] fn test_employee_db_append() { let mut db = EmployeeDB::default(); assert_eq!(db.len(), 0); assert_eq!(db.capacity(), 0); let employee = Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }; db.append(employee).unwrap(); assert_eq!(db.len(), 1); assert!(db.get_by_id(&0).is_none()); let employee: &Employee = db.get_by_id(&1).unwrap(); assert_eq!(employee.id, 1); assert_eq!(employee.name, "Alice"); let employee: &Employee = db.get_by_name("Alice").unwrap(); assert_eq!(employee.id, 1); assert_eq!(employee.name, "Alice"); } #[test] fn test_append_into() { let mut db = EmployeeDB::default(); db.append(L1Engineer { id: 1, name: "Alice".to_string(), }) .unwrap(); assert_eq!(db.len(), 1); let employee: &Employee = db.get_by_id(&1).unwrap(); assert_eq!(employee.id, 1); assert_eq!(employee.name, "Alice"); assert!(!employee.is_manager); assert!(!employee.is_admin); assert!(employee.is_active); assert_eq!(employee.department, Department::Engineering); } #[test] fn test_extend() { let mut db = EmployeeDB::default(); assert_eq!(db.len(), 0); assert!(db.get_by_id(&1).is_none()); assert!(db.get_by_id(&2).is_none()); assert!(db.is_empty()); db.extend(vec![ L1Engineer { id: 1, name: "Alice".to_string(), }, L1Engineer { id: 2, name: "Bob".to_string(), }, ]) .unwrap(); assert_eq!(db.len(), 2); let employee: &Employee = db.get_by_id(&1).unwrap(); assert_eq!(employee.id, 1); assert_eq!(employee.name, "Alice"); let employee: &Employee = db.get_by_id(&2).unwrap(); assert_eq!(employee.id, 2); assert_eq!(employee.name, "Bob"); } #[test] fn test_extend_duplicate_key() { let mut db = EmployeeDB::default(); db.extend(vec![ L1Engineer { id: 1, name: "Alice".to_string(), }, L1Engineer { id: 2, name: "Bob".to_string(), }, ]) .unwrap(); assert_eq!(db.len(), 2); let err = db .extend(vec![ L1Engineer { id: 2, name: "Charlie".to_string(), }, L1Engineer { id: 3, name: "David".to_string(), }, ]) .unwrap_err(); assert_eq!(EmployeeDBErrorKind::DuplicateKey, err.kind()); let (dup_employee, employee_iter) = err.into_input(); assert_eq!(dup_employee.id, 2); assert_eq!(dup_employee.name, "Charlie"); let employees: Vec<_> = employee_iter.collect(); assert_eq!(employees.len(), 1); assert_eq!(employees[0].id, 3); db.extend(employees).unwrap(); assert_eq!(db.len(), 3); assert_eq!(db.get_by_id(&3).unwrap().name, "David"); } #[test] fn test_employee_query_filters() { let mut db = EmployeeDB::default(); db.append(Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }) .unwrap(); db.append(Employee { id: 2, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Engineering, }) .unwrap(); db.append(Employee { id: 3, name: "Charlie".to_string(), is_manager: true, is_admin: true, is_active: true, department: Department::Sales, }) .unwrap(); let mut query = db.query(); let results: Vec<_> = query .is_manager(true) .is_admin(true) .execute() .unwrap() .iter() .collect(); assert_eq!(results.len(), 1); assert_eq!(results[0].id, 3); assert_eq!(query.execute().unwrap().first().id, 3); let mut query = db.query(); assert!(query.is_active(false).execute().is_none()); } #[test] fn test_employee_duplicate_key() { let mut db = EmployeeDB::default(); db.append(Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }) .unwrap(); // duplicate key: id (=1) let err = db .append(Employee { id: 1, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Engineering, }) .unwrap_err(); assert_eq!(EmployeeDBErrorKind::DuplicateKey, err.kind()); assert_eq!("Bob", err.into_input().name); } #[test] fn test_duplicate_key_with_zero_pollution() { #[derive(Debug, VennDB)] struct MultiKey { #[venndb(key)] a: String, #[venndb(key)] b: String, #[venndb(key)] c: String, d: bool, e: bool, } let mut db = MultiKeyDB::from_rows(vec![ MultiKey { a: "a".to_string(), b: "b".to_string(), c: "c".to_string(), d: true, e: false, }, MultiKey { a: "A".to_string(), b: "B".to_string(), c: "C".to_string(), d: false, e: true, }, ]) .unwrap(); let err = db .append(MultiKey { a: "foo".to_string(), b: "bar".to_string(), c: "c".to_string(), d: false, e: true, }) .unwrap_err(); assert_eq!(MultiKeyDBErrorKind::DuplicateKey, err.kind()); // ensure there was no pollution, // this will panic in ase there was assert!(db.get_by_a("foo").is_none()); assert!(db.get_by_b("bar").is_none()); } #[test] fn test_into_from_rows() { let rows = vec![ Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }, Employee { id: 2, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Engineering, }, ]; let db = EmployeeDB::from_rows(rows).unwrap(); assert_eq!(db.len(), 2); assert_eq!(db.capacity(), 2); let mut query = db.query(); query.is_manager(true); let results: Vec<_> = query.execute().unwrap().iter().collect(); assert_eq!(results.len(), 1); assert_eq!(results[0].id, 1); let rows = db.into_rows(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].id, 1); assert_eq!(rows[1].id, 2); } #[test] fn test_from_rows_duplicate_key() { let err = EmployeeDB::from_rows(vec![ Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }, Employee { id: 1, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Engineering, }, ]) .unwrap_err(); assert_eq!(EmployeeDBErrorKind::DuplicateKey, err.kind()); let employees = err.into_input(); assert_eq!(employees.len(), 2); assert_eq!(employees[0].name, "Alice"); assert_eq!(employees[1].name, "Bob"); } #[test] fn test_from_iter() { let db = EmployeeDB::from_iter([ L1Engineer { id: 1, name: "Alice".to_string(), }, L1Engineer { id: 2, name: "Bob".to_string(), }, ]) .unwrap(); assert_eq!(db.len(), 2); assert_eq!(db.capacity(), 2); let mut query = db.query(); query.is_manager(true); assert!(query.execute().is_none()); query .reset() .is_manager(false) .department(Department::Engineering); let results: Vec<_> = query.execute().unwrap().iter().collect(); assert_eq!(results.len(), 2); assert_eq!(results[0].id, 1); assert_eq!(results[1].id, 2); } #[test] fn test_from_iter_duplicate_key() { let err = EmployeeDB::from_iter([ L1Engineer { id: 1, name: "Alice".to_string(), }, L1Engineer { id: 1, name: "Bob".to_string(), }, ]) .unwrap_err(); assert_eq!(EmployeeDBErrorKind::DuplicateKey, err.kind()); let employees = err.into_input(); assert_eq!(employees.len(), 2); assert_eq!(employees[0].name, "Alice"); assert_eq!(employees[1].name, "Bob"); } #[test] fn test_query_reset() { let mut db = EmployeeDB::default(); db.append(Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }) .unwrap(); db.append(Employee { id: 2, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Engineering, }) .unwrap(); db.append(Employee { id: 3, name: "Charlie".to_string(), is_manager: true, is_admin: true, is_active: true, department: Department::Sales, }) .unwrap(); let mut query = db.query(); query.is_manager(true); let results: Vec<_> = query.execute().unwrap().iter().collect(); assert_eq!(results.len(), 2); assert_eq!(results[0].id, 1); assert_eq!(results[1].id, 3); query.reset(); let results: Vec<_> = query.execute().unwrap().iter().collect(); assert_eq!(results.len(), 3); assert_eq!(results[0].id, 1); assert_eq!(results[1].id, 2); assert_eq!(results[2].id, 3); } #[test] fn test_query_result_any() { let mut db = EmployeeDB::default(); db.append(Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }) .unwrap(); db.append(Employee { id: 2, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Engineering, }) .unwrap(); db.append(Employee { id: 3, name: "Charlie".to_string(), is_manager: true, is_admin: true, is_active: true, department: Department::Sales, }) .unwrap(); let mut query = db.query(); query.is_active(true); let result = query.execute().unwrap().any(); assert!(result.id == 1 || result.id == 2 || result.id == 3); } #[test] fn test_db_without_keys() { #[derive(Debug, VennDB)] struct NoKeys { name: String, a: bool, b: bool, } let mut db = NoKeysDB::from_rows(vec![ NoKeys { name: "Alice".to_string(), a: true, b: false, }, NoKeys { name: "Bob".to_string(), a: false, b: true, }, ]); assert_eq!(db.len(), 2); assert_eq!(db.capacity(), 2); let mut query = db.query(); query.a(true); let results: Vec<_> = query.execute().unwrap().iter().collect(); assert_eq!(results.len(), 1); assert_eq!(results[0].name, "Alice"); db.append(NoKeys { name: "Charlie".to_string(), a: true, b: true, }); let mut query = db.query(); query.b(true); let results: Vec<_> = query.execute().unwrap().iter().collect(); assert_eq!(results.len(), 2); assert_eq!(results[0].name, "Bob"); assert_eq!(results[1].name, "Charlie"); } #[test] fn test_db_iter() { let db = EmployeeDB::from_rows(vec![ Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }, Employee { id: 2, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Engineering, }, ]) .unwrap(); let mut iter = db.iter(); assert_eq!(iter.next().unwrap().id, 1); assert_eq!(iter.next().unwrap().id, 2); assert!(iter.next().is_none()); } #[test] fn test_db_result_filter() { let db = EmployeeDB::from_rows(vec![ Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }, Employee { id: 2, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Engineering, }, ]) .unwrap(); let mut query = db.query(); query.is_active(true); let results = query.execute().unwrap(); let rows = results.iter().collect::<Vec<_>>(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].id, 1); assert_eq!(rows[1].id, 2); let results = results .filter(|r| r.department == Department::Engineering) .unwrap(); let rows = results.iter().collect::<Vec<_>>(); assert_eq!(rows.len(), 2); assert_eq!(rows[0].id, 1); assert_eq!(rows[1].id, 2); let results = results.filter(|r| r.is_manager).unwrap(); let rows = results.iter().collect::<Vec<_>>(); assert_eq!(rows.len(), 1); assert_eq!(rows[0].id, 1); assert!(results.filter(|r| r.is_admin).is_none()); } #[test] fn test_db_filter_map() { let db = EmployeeDB::from_rows(vec![ Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }, Employee { id: 2, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Engineering, }, Employee { id: 3, name: "Charlie".to_string(), is_manager: true, is_admin: true, is_active: true, department: Department::Sales, }, Employee { id: 4, name: "David".to_string(), is_manager: false, is_admin: true, is_active: true, department: Department::HR, }, Employee { id: 5, name: "Eve".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::HR, }, ]) .unwrap(); let mut query = db.query(); query.department(Department::Marketing); assert!(query.execute().is_none()); query.reset().department(Department::Engineering); let results = query.execute().unwrap().iter().collect::<Vec<_>>(); assert_eq!(results.len(), 2); assert_eq!(results[0].id, 1); assert_eq!(results[1].id, 2); query.reset().department(Department::HR); let results = query.execute().unwrap().iter().collect::<Vec<_>>(); assert_eq!(results.len(), 2); assert_eq!(results[0].id, 4); assert_eq!(results[1].id, 5); query.reset().department(Department::Sales); let results = query.execute().unwrap().iter().collect::<Vec<_>>(); assert_eq!(results.len(), 1); assert_eq!(results[0].id, 3); // all the filters let results = query .reset() .department(Department::Engineering) .is_manager(true) .is_admin(false) .is_active(true) .execute() .unwrap() .iter() .collect::<Vec<_>>(); assert_eq!(results.len(), 1); assert_eq!(results[0].id, 1); } } #[cfg(test)] mod tests_v0_2 { use super::*; #[derive(Debug, VennDB)] pub struct Worker { #[venndb(key)] id: u32, is_admin: bool, is_active: Option<bool>, #[venndb(filter)] department: Option<Department>, } #[test] fn test_optional_bool_filter() { let db = WorkerDB::from_rows(vec![ Worker { id: 1, is_admin: false, is_active: Some(true), department: Some(Department::Engineering), }, Worker { id: 2, is_admin: false, is_active: None, department: None, }, ]) .unwrap(); let mut query = db.query(); query.is_active(true); let results = query.execute().unwrap().iter().collect::<Vec<_>>(); assert_eq!(results.len(), 1); assert_eq!(results[0].id, 1); } #[test] fn test_optional_map_filter() { let db = WorkerDB::from_rows(vec![ Worker { id: 1, is_admin: false, is_active: Some(true), department: Some(Department::Engineering), }, Worker { id: 2, is_admin: false, is_active: None, department: None, }, ]) .unwrap(); let mut query = db.query(); query.department(Department::Engineering); let results = query.execute().unwrap().iter().collect::<Vec<_>>(); assert_eq!(results.len(), 1); assert_eq!(results[0].id, 1); } #[test] fn test_db_with_optional_properties_default_filter() { let db = WorkerDB::from_rows(vec![ Worker { id: 1, is_admin: false, is_active: Some(true), department: Some(Department::Engineering), }, Worker { id: 2, is_admin: false, is_active: None, department: None, }, ]) .unwrap(); let query = db.query(); let results = query.execute().unwrap().iter().collect::<Vec<_>>(); assert_eq!(results.len(), 2); assert_eq!(results[0].id, 1); assert_eq!(results[1].id, 2); } #[test] fn test_optional_map_filter_specific() { let db = WorkerDB::from_rows(vec![ Worker { id: 1, is_admin: false, is_active: None, department: Some(Department::Engineering), }, Worker { id: 2, is_admin: false, is_active: None, department: Some(Department::HR), }, Worker { id: 3, is_admin: false, is_active: None, department: None, }, Worker { id: 4, is_admin: false, is_active: None, department: Some(Department::Engineering), }, ]) .unwrap(); let mut query = db.query(); query.department(Department::Engineering); let results = query.execute().unwrap().iter().collect::<Vec<_>>(); assert_eq!(results.len(), 2); assert_eq!(results[0].id, 1); assert_eq!(results[1].id, 4); } } #[cfg(test)] mod tests_v0_2_1 { use super::*; #[derive(Debug, VennDB)] pub struct Worker { #[venndb(key)] id: u32, is_admin: bool, is_active: Option<bool>, #[venndb(filter, any)] department: Option<Department>, } // these two tests are no longer correct since // the fix introduced in issue https://github.com/plabayo/venndb/issues/7 // // this is intended. As such these issues have moved to `::tests_v0_4`. // Check out the above issue if you want to find the motivation why. // #[test] // fn test_any_filter_map() { // let db = EmployeeDB::from_rows(vec![ // Employee { // id: 1, // name: "Alice".to_string(), // is_manager: true, // is_admin: false, // is_active: true, // department: Department::Engineering, // }, // Employee { // id: 2, // name: "Bob".to_string(), // is_manager: false, // is_admin: false, // is_active: true, // department: Department::HR, // }, // ]) // .unwrap(); // let mut query = db.query(); // query.department(Department::Any); // let results = query.execute().unwrap().iter().collect::<Vec<_>>(); // assert_eq!(results.len(), 2); // assert_eq!(results[0].id, 1); // assert_eq!(results[1].id, 2); // } // #[test] // fn test_any_option_filter_map() { // let db = WorkerDB::from_rows(vec![ // Worker { // id: 1, // is_admin: false, // is_active: Some(true), // department: Some(Department::Engineering), // }, // Worker { // id: 2, // is_admin: false, // is_active: Some(true), // department: Some(Department::HR), // }, // Worker { // id: 3, // is_admin: false, // is_active: None, // department: None, // }, // ]) // .unwrap(); // let mut query = db.query(); // query.department(Department::Any); // let results = query.execute().unwrap().iter().collect::<Vec<_>>(); // assert_eq!(results.len(), 3); // assert_eq!(results[0].id, 1); // assert_eq!(results[1].id, 2); // assert_eq!(results[2].id, 3); // } #[test] fn test_any_row_filter_map() { let db = EmployeeDB::from_rows(vec![ Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }, Employee { id: 2, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Any, }, ]) .unwrap(); let mut query = db.query(); query.department(Department::Engineering); let results = query.execute().unwrap().iter().collect::<Vec<_>>(); assert_eq!(results.len(), 2); assert_eq!(results[0].id, 1); assert_eq!(results[1].id, 2); } #[test] fn test_any_row_optional_filter_map() { let db = WorkerDB::from_rows(vec![ Worker { id: 1, is_admin: false, is_active: Some(true), department: Some(Department::Engineering), }, Worker { id: 2, is_admin: false, is_active: None, department: None, }, Worker { id: 3, is_admin: false, is_active: Some(true), department: Some(Department::Any), }, Worker { id: 4, is_admin: false, is_active: Some(true), department: Some(Department::HR), }, ]) .unwrap(); let mut query = db.query(); query.department(Department::Engineering); let results = query.execute().unwrap().iter().collect::<Vec<_>>(); assert_eq!(results.len(), 2); assert_eq!(results[0].id, 1); assert_eq!(results[1].id, 3); } } #[cfg(test)] mod tests_v0_3_0 { use super::*; #[derive(Debug, VennDB)] pub struct Worker { #[venndb(key)] id: u32, is_admin: bool, is_active: Option<bool>, #[venndb(filter, any)] department: Option<Department>, } // regression test: <https://github.com/plabayo/venndb/issues/5> #[test] fn test_any_row_optional_filter_map_white_rabbit() { let db = WorkerDB::from_rows(vec![ Worker { id: 1, is_admin: false, is_active: Some(true), department: Some(Department::Engineering), }, Worker { id: 2, is_admin: false, is_active: None, department: None, }, Worker { id: 3, is_admin: false, is_active: Some(true), department: Some(Department::Any), }, Worker { id: 4, is_admin: false, is_active: Some(true), department: Some(Department::HR), }, ]) .unwrap(); let mut query = db.query(); query.department(Department::Marketing); let results = query.execute().unwrap().iter().collect::<Vec<_>>(); assert_eq!(results.len(), 1); assert_eq!(results[0].id, 3); } } #[cfg(test)] mod tests_v0_4 { use super::*; #[derive(Debug, VennDB)] #[venndb(validator = worker_validator)] pub struct Worker { #[venndb(key)] id: u32, is_admin: bool, is_active: Option<bool>, #[venndb(filter, any)] department: Option<Department>, } fn worker_validator(worker: &Worker) -> bool { worker.id > 0 && (worker.is_active.unwrap_or_default() || !worker.is_admin) } #[test] fn test_any_filter_map() { let db = EmployeeDB::from_rows(vec![ Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }, Employee { id: 2, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::HR, }, ]) .unwrap(); let mut query = db.query(); query.department(Department::Any); // no row matches the filter, // given all rows have an explicit department value assert!(query.execute().is_none()); } #[test] fn test_any_filter_map_match() { let db = EmployeeDB::from_rows(vec![ Employee { id: 1, name: "Alice".to_string(), is_manager: true, is_admin: false, is_active: true, department: Department::Engineering, }, Employee { id: 2, name: "Bob".to_string(), is_manager: false, is_admin: false, is_active: true, department: Department::Any, }, ]) .unwrap(); let mut query = db.query(); query.department(Department::Any); let employee = query.execute().unwrap().any(); assert_eq!(employee.id, 2); } #[test] fn test_any_option_filter_map() { let db = WorkerDB::from_rows(vec![ Worker { id: 1, is_admin: false, is_active: Some(true), department: Some(Department::Engineering), }, Worker { id: 2, is_admin: false, is_active: Some(true), department: Some(Department::HR), }, Worker { id: 3, is_admin: false, is_active: None, department: None, }, ]) .unwrap(); let mut query = db.query(); query.department(Department::Any); // no row matches the filter,
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
true
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compilation_tests.rs
venndb-usage/tests/compilation_tests.rs
#[test] fn should_compile() { let t = trybuild::TestCases::new(); t.pass("tests/compiles/*.rs"); } #[test] fn should_not_compile() { let t = trybuild::TestCases::new(); t.compile_fail("tests/fails/*.rs"); }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/any_bool.rs
venndb-usage/tests/fails/any_bool.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { #[venndb(any)] is_alive: bool, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/lonely_any.rs
venndb-usage/tests/fails/lonely_any.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { #[venndb(any)] country: String, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/unknown_attr_struct.rs
venndb-usage/tests/fails/unknown_attr_struct.rs
use venndb::VennDB; #[derive(Debug, VennDB)] #[venndb(foo)] struct Employee { id: u32, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/filter_skipped_field.rs
venndb-usage/tests/fails/filter_skipped_field.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { id: u32, is_manager: bool, #[venndb(skip)] is_active: bool, } fn main() { let mut db = EmployeeDB::new(); db.append(Employee { id: 1, is_manager: true, is_active: true, }); let mut query = db.query(); query.is_active(true); assert!(query.execute().is_some()); }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/any_filter_bool.rs
venndb-usage/tests/fails/any_filter_bool.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { #[venndb(any, filter)] is_alive: bool, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/any_key.rs
venndb-usage/tests/fails/any_key.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { #[venndb(any, key)] country: String, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/derive_enum.rs
venndb-usage/tests/fails/derive_enum.rs
use venndb::VennDB; #[derive(VennDB)] enum MyEnum { A, B, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/key_and_filter.rs
venndb-usage/tests/fails/key_and_filter.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { id: u32, is_manager: bool, is_active: bool, #[venndb(key, filter)] country: String, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/derive_struct_with_validator_str.rs
venndb-usage/tests/fails/derive_struct_with_validator_str.rs
use venndb::VennDB; #[derive(Debug, VennDB)] #[venndb(validator = "employee_validator")] struct Employee { pub id: u32, pub name: String, pub is_manager: bool, pub is_admin: bool, pub is_active: bool, pub department: Department, } #[derive(Debug)] pub enum Department { Engineering, Sales, Marketing, HR, } fn employee_validator(employee: &Employee) -> bool { employee.id > 0 && !employee.name.is_empty() } fn main() { let _ = EmployeeDB::new(); }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/derive_tuple_struct.rs
venndb-usage/tests/fails/derive_tuple_struct.rs
use venndb::VennDB; #[derive(VennDB)] struct MyStruct(u32); fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/filter_any_bool.rs
venndb-usage/tests/fails/filter_any_bool.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { #[venndb(filter, any)] is_alive: bool, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/key_any.rs
venndb-usage/tests/fails/key_any.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { #[venndb(key, any)] country: String, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/unknown_attr_field.rs
venndb-usage/tests/fails/unknown_attr_field.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { #[venndb(foo)] id: u32, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/option_key.rs
venndb-usage/tests/fails/option_key.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { #[venndb(key)] id: Option<u32>, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/fails/filter_and_key.rs
venndb-usage/tests/fails/filter_and_key.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { id: u32, is_manager: bool, is_active: bool, #[venndb(filter, key)] country: String, } fn main() {}
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false
plabayo/venndb
https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/derive_struct_with_explicit_filters.rs
venndb-usage/tests/compiles/derive_struct_with_explicit_filters.rs
use venndb::VennDB; #[derive(Debug, VennDB)] struct Employee { #[venndb(key)] id: u32, name: String, #[venndb(filter)] is_manager: bool, #[venndb(filter)] is_admin: bool, #[venndb(filter)] is_active: bool, department: Department, } #[derive(Debug)] pub enum Department { Engineering, Sales, Marketing, HR, } fn main() { let _ = EmployeeDB::new(); }
rust
Apache-2.0
68c929e6a7f97f74b0b1c84facfa5b1b61369be3
2026-01-04T20:24:17.564367Z
false