repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/cache/cache_filter_engine.rs | src/cache/cache_filter_engine.rs | use crate::FirestoreQueryFilter;
use crate::*;
pub struct FirestoreCacheFilterEngine<'a> {
filter: &'a FirestoreQueryFilter,
}
impl<'a> FirestoreCacheFilterEngine<'a> {
pub fn new(filter: &'a FirestoreQueryFilter) -> Self {
Self { filter }
}
pub fn matches_doc(&'a self, doc: &FirestoreDocument) -> bool {
Self::matches_doc_filter(doc, self.filter)
}
pub fn matches_doc_filter(doc: &FirestoreDocument, filter: &FirestoreQueryFilter) -> bool {
match filter {
FirestoreQueryFilter::Composite(composite_filter) => match composite_filter.operator {
FirestoreQueryFilterCompositeOperator::And => composite_filter
.for_all_filters
.iter()
.all(|filter| Self::matches_doc_filter(doc, filter)),
FirestoreQueryFilterCompositeOperator::Or => composite_filter
.for_all_filters
.iter()
.any(|filter| Self::matches_doc_filter(doc, filter)),
},
FirestoreQueryFilter::Unary(unary_filter) => {
Self::matches_doc_filter_unary(doc, unary_filter)
}
FirestoreQueryFilter::Compare(compare_filter) => {
Self::matches_doc_filter_compare(doc, compare_filter)
}
}
}
pub fn matches_doc_filter_unary(
doc: &FirestoreDocument,
filter: &FirestoreQueryFilterUnary,
) -> bool {
match filter {
FirestoreQueryFilterUnary::IsNan(field_path) => {
firestore_doc_get_field_by_path(doc, field_path)
.map(|field_value| match field_value {
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(
double_value,
) => double_value.is_nan(),
_ => false,
})
.unwrap_or(false)
}
FirestoreQueryFilterUnary::IsNotNan(field_path) => {
firestore_doc_get_field_by_path(doc, field_path)
.map(|field_value| match field_value {
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(
double_value,
) => !double_value.is_nan(),
_ => true,
})
.unwrap_or(true)
}
FirestoreQueryFilterUnary::IsNull(field_path) => {
firestore_doc_get_field_by_path(doc, field_path)
.map(|field_value| {
matches!(
field_value,
gcloud_sdk::google::firestore::v1::value::ValueType::NullValue(_)
)
})
.unwrap_or(true)
}
FirestoreQueryFilterUnary::IsNotNull(field_path) => {
firestore_doc_get_field_by_path(doc, field_path)
.map(|field_value| {
!matches!(
field_value,
gcloud_sdk::google::firestore::v1::value::ValueType::NullValue(_)
)
})
.unwrap_or(false)
}
}
}
pub fn matches_doc_filter_compare(
doc: &FirestoreDocument,
filter: &Option<FirestoreQueryFilterCompare>,
) -> bool {
match filter {
Some(FirestoreQueryFilterCompare::Equal(field_path, compare_with)) => {
firestore_doc_get_field_by_path(doc, field_path)
.and_then(|field_value| {
compare_with
.value
.value_type
.as_ref()
.map(|compare_with_value| {
compare_values(CompareOp::Equal, field_value, compare_with_value)
})
})
.unwrap_or(false)
}
Some(FirestoreQueryFilterCompare::LessThan(field_path, compare_with)) => {
firestore_doc_get_field_by_path(doc, field_path)
.and_then(|field_value| {
compare_with
.value
.value_type
.as_ref()
.map(|compare_with_value| {
compare_values(CompareOp::LessThan, field_value, compare_with_value)
})
})
.unwrap_or(false)
}
Some(FirestoreQueryFilterCompare::LessThanOrEqual(field_path, compare_with)) => {
firestore_doc_get_field_by_path(doc, field_path)
.and_then(|field_value| {
compare_with
.value
.value_type
.as_ref()
.map(|compare_with_value| {
compare_values(
CompareOp::LessThanOrEqual,
field_value,
compare_with_value,
)
})
})
.unwrap_or(false)
}
Some(FirestoreQueryFilterCompare::GreaterThan(field_path, compare_with)) => {
firestore_doc_get_field_by_path(doc, field_path)
.and_then(|field_value| {
compare_with
.value
.value_type
.as_ref()
.map(|compare_with_value| {
compare_values(
CompareOp::GreaterThan,
field_value,
compare_with_value,
)
})
})
.unwrap_or(false)
}
Some(FirestoreQueryFilterCompare::GreaterThanOrEqual(field_path, compare_with)) => {
firestore_doc_get_field_by_path(doc, field_path)
.and_then(|field_value| {
compare_with
.value
.value_type
.as_ref()
.map(|compare_with_value| {
compare_values(
CompareOp::GreaterThanOrEqual,
field_value,
compare_with_value,
)
})
})
.unwrap_or(false)
}
Some(FirestoreQueryFilterCompare::NotEqual(field_path, compare_with)) => {
firestore_doc_get_field_by_path(doc, field_path)
.and_then(|field_value| {
compare_with
.value
.value_type
.as_ref()
.map(|compare_with_value| {
compare_values(CompareOp::NotEqual, field_value, compare_with_value)
})
})
.unwrap_or(false)
}
Some(FirestoreQueryFilterCompare::ArrayContains(field_path, compare_with)) => {
firestore_doc_get_field_by_path(doc, field_path)
.and_then(|field_value| {
compare_with
.value
.value_type
.as_ref()
.map(|compare_with_value| {
compare_values(
CompareOp::ArrayContains,
field_value,
compare_with_value,
)
})
})
.unwrap_or(false)
}
Some(FirestoreQueryFilterCompare::In(field_path, compare_with)) => {
firestore_doc_get_field_by_path(doc, field_path)
.and_then(|field_value| {
compare_with
.value
.value_type
.as_ref()
.map(|compare_with_value| {
compare_values(CompareOp::In, field_value, compare_with_value)
})
})
.unwrap_or(false)
}
Some(FirestoreQueryFilterCompare::ArrayContainsAny(field_path, compare_with)) => {
firestore_doc_get_field_by_path(doc, field_path)
.and_then(|field_value| {
compare_with
.value
.value_type
.as_ref()
.map(|compare_with_value| {
compare_values(
CompareOp::ArrayContainsAny,
field_value,
compare_with_value,
)
})
})
.unwrap_or(false)
}
Some(FirestoreQueryFilterCompare::NotIn(field_path, compare_with)) => {
firestore_doc_get_field_by_path(doc, field_path)
.and_then(|field_value| {
compare_with
.value
.value_type
.as_ref()
.map(|compare_with_value| {
compare_values(CompareOp::NotIn, field_value, compare_with_value)
})
})
.unwrap_or(false)
}
None => true,
}
}
}
pub(super) enum CompareOp {
Equal,
NotEqual,
LessThan,
LessThanOrEqual,
GreaterThan,
GreaterThanOrEqual,
ArrayContains,
ArrayContainsAny,
In,
NotIn,
}
pub(super) fn compare_values(
op: CompareOp,
a: &gcloud_sdk::google::firestore::v1::value::ValueType,
b: &gcloud_sdk::google::firestore::v1::value::ValueType,
) -> bool {
match (op, a, b) {
// handle BooleanValue
(
CompareOp::Equal,
gcloud_sdk::google::firestore::v1::value::ValueType::BooleanValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::BooleanValue(b_val),
) => a_val == b_val,
(
CompareOp::NotEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::BooleanValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::BooleanValue(b_val),
) => a_val != b_val,
// handle IntegerValue
(
CompareOp::Equal,
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(b_val),
) => a_val == b_val,
(
CompareOp::NotEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(b_val),
) => a_val != b_val,
(
CompareOp::LessThan,
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(b_val),
) => a_val < b_val,
(
CompareOp::LessThanOrEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(b_val),
) => a_val <= b_val,
(
CompareOp::GreaterThan,
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(b_val),
) => a_val > b_val,
(
CompareOp::GreaterThanOrEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::IntegerValue(b_val),
) => a_val >= b_val,
// For DoubleValue
(
CompareOp::Equal,
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(b_val),
) => a_val == b_val,
(
CompareOp::NotEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(b_val),
) => a_val != b_val,
(
CompareOp::LessThan,
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(b_val),
) => a_val < b_val,
(
CompareOp::LessThanOrEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(b_val),
) => a_val <= b_val,
(
CompareOp::GreaterThan,
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(b_val),
) => a_val > b_val,
(
CompareOp::GreaterThanOrEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(b_val),
) => a_val >= b_val,
// For TimestampValue, assumes it's a numerical timestamp; if it's a string or date type, adjust accordingly
(
CompareOp::Equal,
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(b_val),
) => a_val == b_val,
(
CompareOp::NotEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(b_val),
) => a_val != b_val,
(
CompareOp::LessThan,
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(b_val),
) => a_val.seconds < b_val.seconds && a_val.nanos < b_val.nanos,
(
CompareOp::LessThanOrEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(b_val),
) => a_val.seconds <= b_val.seconds && a_val.nanos <= b_val.nanos,
(
CompareOp::GreaterThan,
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(b_val),
) => a_val.seconds > b_val.seconds && a_val.nanos > b_val.nanos,
(
CompareOp::GreaterThanOrEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::TimestampValue(b_val),
) => a_val.seconds >= b_val.seconds && a_val.nanos >= b_val.nanos,
// For StringType only Equal, NotEqual operations make sense in a general context
(
CompareOp::Equal,
gcloud_sdk::google::firestore::v1::value::ValueType::StringValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::StringValue(b_val),
) => a_val == b_val,
(
CompareOp::NotEqual,
gcloud_sdk::google::firestore::v1::value::ValueType::StringValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::StringValue(b_val),
) => a_val != b_val,
// Array Operation
(
CompareOp::ArrayContains,
gcloud_sdk::google::firestore::v1::value::ValueType::ArrayValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::ArrayValue(b_val),
) => a_val
.values
.iter()
.flat_map(|v| &v.value_type)
.any(|a_val| {
b_val
.values
.iter()
.flat_map(|v| &v.value_type)
.all(|b_val| compare_values(CompareOp::Equal, a_val, b_val))
}),
(
CompareOp::ArrayContainsAny,
gcloud_sdk::google::firestore::v1::value::ValueType::ArrayValue(a_val),
gcloud_sdk::google::firestore::v1::value::ValueType::ArrayValue(b_val),
) => a_val
.values
.iter()
.flat_map(|v| &v.value_type)
.any(|a_val| {
b_val
.values
.iter()
.flat_map(|v| &v.value_type)
.any(|b_val| compare_values(CompareOp::Equal, a_val, b_val))
}),
(
CompareOp::In,
gcloud_sdk::google::firestore::v1::value::ValueType::ArrayValue(a_val),
b_val,
) => a_val
.values
.iter()
.flat_map(|v| &v.value_type)
.any(|a_val| compare_values(CompareOp::Equal, a_val, b_val)),
(
CompareOp::NotIn,
gcloud_sdk::google::firestore::v1::value::ValueType::ArrayValue(a_val),
b_val,
) => a_val
.values
.iter()
.flat_map(|v| &v.value_type)
.any(|a_val| !compare_values(CompareOp::Equal, a_val, b_val)),
// Any other combinations result in false
_ => false,
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/cache/backends/persistent_backend.rs | src/cache/backends/persistent_backend.rs | use crate::errors::*;
use crate::*;
use async_trait::async_trait;
use futures::stream::BoxStream;
use crate::cache::cache_query_engine::FirestoreCacheQueryEngine;
use chrono::Utc;
use futures::StreamExt;
use gcloud_sdk::google::firestore::v1::Document;
use gcloud_sdk::prost::Message;
use redb::*;
use std::collections::HashMap;
use std::path::PathBuf;
use tracing::*;
pub struct FirestorePersistentCacheBackend {
pub config: FirestoreCacheConfiguration,
redb: Database,
}
impl FirestorePersistentCacheBackend {
pub fn new(config: FirestoreCacheConfiguration) -> FirestoreResult<Self> {
let temp_dir = std::env::temp_dir();
let firestore_cache_dir = temp_dir.join("firestore_cache");
let db_dir = firestore_cache_dir.join("persistent");
if !db_dir.exists() {
debug!(
directory = %db_dir.display(),
"Creating a temp directory to store persistent cache.",
);
std::fs::create_dir_all(&db_dir)?;
} else {
debug!(
directory = %db_dir.display(),
"Using a temp directory to store persistent cache.",
);
}
Self::with_options(config, db_dir.join("redb"))
}
pub fn with_options(
config: FirestoreCacheConfiguration,
data_file_path: PathBuf,
) -> FirestoreResult<Self> {
if data_file_path.exists() {
debug!(?data_file_path, "Opening database for persistent cache...",);
} else {
debug!(?data_file_path, "Creating database for persistent cache...",);
}
let mut db = Database::create(data_file_path)?;
db.compact()?;
info!("Successfully opened database for persistent cache.");
Ok(Self { config, redb: db })
}
async fn preload_collections(&self, db: &FirestoreDb) -> Result<(), FirestoreError> {
for (collection_path, config) in &self.config.collections {
let td: TableDefinition<&str, &[u8]> = TableDefinition::new(collection_path.as_str());
match config.collection_load_mode {
FirestoreCacheCollectionLoadMode::PreloadAllDocs
| FirestoreCacheCollectionLoadMode::PreloadAllIfEmpty => {
let existing_records = {
let read_tx = self.redb.begin_read()?;
if read_tx
.list_tables()?
.any(|t| t.name() == collection_path.as_str())
{
read_tx.open_table(td)?.len()?
} else {
0
}
};
if matches!(
config.collection_load_mode,
FirestoreCacheCollectionLoadMode::PreloadAllIfEmpty
) && existing_records > 0
{
info!(
collection_path = collection_path.as_str(),
entries_loaded = existing_records,
"Preloading collection has been skipped.",
);
continue;
}
debug!(
collection_path = collection_path.as_str(),
"Preloading collection."
);
let params = if let Some(parent) = &config.parent {
db.fluent()
.select()
.from(config.collection_name.as_str())
.parent(parent)
} else {
db.fluent().select().from(config.collection_name.as_str())
};
let stream = params.stream_query().await?;
stream
.enumerate()
.map(|(index, docs)| {
if index > 0 && index % 5000 == 0 {
debug!(
collection_path = collection_path.as_str(),
entries_loaded = index,
"Collection preload in progress...",
);
}
docs
})
.ready_chunks(100)
.for_each(|docs| async move {
if let Err(err) = self.write_batch_docs(collection_path, docs) {
error!(?err, "Error while preloading collection.");
}
})
.await;
let updated_records = if matches!(
config.collection_load_mode,
FirestoreCacheCollectionLoadMode::PreloadAllDocs
) || existing_records == 0
{
let read_tx = self.redb.begin_read()?;
let table = read_tx.open_table(td)?;
table.len()?
} else {
existing_records
};
info!(
collection_path = collection_path.as_str(),
updated_records, "Preloading collection has been finished.",
);
}
FirestoreCacheCollectionLoadMode::PreloadNone => {
let tx = self.redb.begin_write()?;
debug!(collection_path, "Creating corresponding collection table.",);
tx.open_table(td)?;
tx.commit()?;
}
}
}
Ok(())
}
fn write_batch_docs(&self, collection_path: &str, docs: Vec<Document>) -> FirestoreResult<()> {
let td: TableDefinition<&str, &[u8]> = TableDefinition::new(collection_path);
let write_txn = self.redb.begin_write()?;
{
let mut table = write_txn.open_table(td)?;
for doc in docs {
let (_, document_id) = split_document_path(&doc.name);
let doc_bytes = Self::document_to_buf(&doc)?;
table.insert(document_id, doc_bytes.as_slice())?;
}
}
write_txn.commit()?;
Ok(())
}
fn document_to_buf(doc: &FirestoreDocument) -> FirestoreResult<Vec<u8>> {
let mut proto_output_buf = Vec::new();
doc.encode(&mut proto_output_buf)?;
Ok(proto_output_buf)
}
fn buf_to_document<B>(buf: B) -> FirestoreResult<FirestoreDocument>
where
B: AsRef<[u8]>,
{
let doc = FirestoreDocument::decode(buf.as_ref())?;
Ok(doc)
}
fn write_document(&self, doc: &Document) -> FirestoreResult<()> {
let (collection_path, document_id) = split_document_path(&doc.name);
if self.config.collections.contains_key(collection_path) {
let td: TableDefinition<&str, &[u8]> = TableDefinition::new(collection_path);
let write_txn = self.redb.begin_write()?;
{
let mut table = write_txn.open_table(td)?;
let doc_bytes = Self::document_to_buf(doc)?;
table.insert(document_id, doc_bytes.as_slice())?;
}
write_txn.commit()?;
Ok(())
} else {
Ok(())
}
}
fn table_len(&self, collection_id: &str) -> FirestoreResult<u64> {
let td: TableDefinition<&str, &[u8]> = TableDefinition::new(collection_id);
let read_tx = self.redb.begin_read()?;
let len = read_tx.open_table(td)?.len()?;
Ok(len)
}
async fn query_cached_docs<'b>(
&self,
collection_path: &str,
query_engine: FirestoreCacheQueryEngine,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreDocument>>> {
let td: TableDefinition<&str, &[u8]> = TableDefinition::new(collection_path);
let read_tx = self.redb.begin_read()?;
let table = read_tx.open_table(td)?;
let iter = table.iter()?;
// It seems there is no way to work with streaming for redb, so this is not efficient
let mut docs: Vec<FirestoreResult<FirestoreDocument>> = Vec::new();
for record in iter {
let (_, v) = record?;
let doc = Self::buf_to_document(v.value())?;
if query_engine.matches_doc(&doc) {
docs.push(Ok(doc));
}
}
let filtered_stream = Box::pin(futures::stream::iter(docs));
let output_stream = query_engine.process_query_stream(filtered_stream).await?;
Ok(output_stream)
}
}
#[async_trait]
impl FirestoreCacheBackend for FirestorePersistentCacheBackend {
async fn load(
&self,
_options: &FirestoreCacheOptions,
db: &FirestoreDb,
) -> Result<Vec<FirestoreListenerTargetParams>, FirestoreError> {
let read_from_time = Utc::now();
self.preload_collections(db).await?;
Ok(self
.config
.collections
.iter()
.map(|(collection_path, collection_config)| {
let collection_table_len = self.table_len(collection_path).ok().unwrap_or(0);
let resume_type = if collection_table_len == 0 {
Some(FirestoreListenerTargetResumeType::ReadTime(read_from_time))
} else {
None
};
FirestoreListenerTargetParams::new(
collection_config.listener_target.clone(),
FirestoreTargetType::Query(
FirestoreQueryParams::new(
collection_config.collection_name.as_str().into(),
)
.opt_parent(collection_config.parent.clone()),
),
HashMap::new(),
)
.opt_resume_type(resume_type)
})
.collect())
}
async fn invalidate_all(&self) -> FirestoreResult<()> {
for collection_path in self.config.collections.keys() {
let td: TableDefinition<&str, &[u8]> = TableDefinition::new(collection_path.as_str());
let write_txn = self.redb.begin_write()?;
{
debug!(
collection_path,
"Invalidating collection and draining the corresponding table.",
);
let mut table = write_txn.open_table(td)?;
table.retain(|_, _| false)?;
}
write_txn.commit()?;
}
Ok(())
}
async fn shutdown(&self) -> Result<(), FirestoreError> {
Ok(())
}
async fn on_listen_event(&self, event: FirestoreListenEvent) -> FirestoreResult<()> {
match event {
FirestoreListenEvent::DocumentChange(doc_change) => {
if let Some(doc) = doc_change.document {
trace!(
doc_name = ?doc.name,
"Writing document to cache due to listener event.",
);
self.write_document(&doc)?;
}
Ok(())
}
FirestoreListenEvent::DocumentDelete(doc_deleted) => {
let (collection_path, document_id) = split_document_path(&doc_deleted.document);
let write_txn = self.redb.begin_write()?;
let td: TableDefinition<&str, &[u8]> = TableDefinition::new(collection_path);
let mut table = write_txn.open_table(td)?;
trace!(
deleted_doc = ?doc_deleted.document.as_str(),
"Removing document from cache due to listener event.",
);
table.remove(document_id)?;
Ok(())
}
_ => Ok(()),
}
}
}
#[async_trait]
impl FirestoreCacheDocsByPathSupport for FirestorePersistentCacheBackend {
async fn get_doc_by_path(
&self,
document_path: &str,
) -> FirestoreResult<Option<FirestoreDocument>> {
let (collection_path, document_id) = split_document_path(document_path);
if self.config.collections.contains_key(collection_path) {
let td: TableDefinition<&str, &[u8]> = TableDefinition::new(collection_path);
let read_tx = self.redb.begin_read()?;
let table = read_tx.open_table(td)?;
let value = table.get(document_id)?;
value.map(|v| Self::buf_to_document(v.value())).transpose()
} else {
Ok(None)
}
}
async fn update_doc_by_path(&self, document: &FirestoreDocument) -> FirestoreResult<()> {
self.write_document(document)?;
Ok(())
}
async fn list_all_docs<'b>(
&self,
collection_path: &str,
) -> FirestoreResult<FirestoreCachedValue<BoxStream<'b, FirestoreResult<FirestoreDocument>>>>
{
if self.config.collections.contains_key(collection_path) {
let td: TableDefinition<&str, &[u8]> = TableDefinition::new(collection_path);
let read_tx = self.redb.begin_read()?;
let table = read_tx.open_table(td)?;
let iter = table.iter()?;
// It seems there is no way to work with streaming for redb, so this is not efficient
let mut docs: Vec<FirestoreResult<FirestoreDocument>> = Vec::new();
for record in iter {
let (_, v) = record?;
let doc = Self::buf_to_document(v.value())?;
docs.push(Ok(doc));
}
Ok(FirestoreCachedValue::UseCached(Box::pin(
futures::stream::iter(docs),
)))
} else {
Ok(FirestoreCachedValue::SkipCache)
}
}
async fn query_docs<'b>(
&self,
collection_path: &str,
query: &FirestoreQueryParams,
) -> FirestoreResult<FirestoreCachedValue<BoxStream<'b, FirestoreResult<FirestoreDocument>>>>
{
if self.config.collections.contains_key(collection_path) {
// For now only basic/simple query all supported
let simple_query_engine = FirestoreCacheQueryEngine::new(query);
if simple_query_engine.params_supported() {
Ok(FirestoreCachedValue::UseCached(
self.query_cached_docs(collection_path, simple_query_engine)
.await?,
))
} else {
Ok(FirestoreCachedValue::SkipCache)
}
} else {
Ok(FirestoreCachedValue::SkipCache)
}
}
}
impl From<redb::Error> for FirestoreError {
fn from(db_err: redb::Error) -> Self {
FirestoreError::CacheError(FirestoreCacheError::new(
FirestoreErrorPublicGenericDetails::new("RedbError".into()),
format!("Cache error: {db_err}"),
))
}
}
impl From<redb::DatabaseError> for FirestoreError {
fn from(db_err: redb::DatabaseError) -> Self {
FirestoreError::CacheError(FirestoreCacheError::new(
FirestoreErrorPublicGenericDetails::new("RedbDatabaseError".into()),
format!("Cache error: {db_err}"),
))
}
}
impl From<redb::TransactionError> for FirestoreError {
fn from(db_err: redb::TransactionError) -> Self {
FirestoreError::CacheError(FirestoreCacheError::new(
FirestoreErrorPublicGenericDetails::new("RedbTransactionError".into()),
format!("Cache error: {db_err}"),
))
}
}
impl From<redb::TableError> for FirestoreError {
fn from(db_err: redb::TableError) -> Self {
FirestoreError::CacheError(FirestoreCacheError::new(
FirestoreErrorPublicGenericDetails::new("RedbTableError".into()),
format!("Cache error: {db_err}"),
))
}
}
impl From<redb::CommitError> for FirestoreError {
fn from(db_err: redb::CommitError) -> Self {
FirestoreError::CacheError(FirestoreCacheError::new(
FirestoreErrorPublicGenericDetails::new("RedbCommitError".into()),
format!("Cache error: {db_err}"),
))
}
}
impl From<redb::StorageError> for FirestoreError {
fn from(db_err: redb::StorageError) -> Self {
FirestoreError::CacheError(FirestoreCacheError::new(
FirestoreErrorPublicGenericDetails::new("RedbStorageError".into()),
format!("Cache error: {db_err}"),
))
}
}
impl From<redb::CompactionError> for FirestoreError {
fn from(db_err: redb::CompactionError) -> Self {
FirestoreError::CacheError(FirestoreCacheError::new(
FirestoreErrorPublicGenericDetails::new("RedbCompactionError".into()),
format!("Cache error: {db_err}"),
))
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/cache/backends/memory_backend.rs | src/cache/backends/memory_backend.rs | use crate::errors::*;
use crate::*;
use async_trait::async_trait;
use chrono::Utc;
use futures::stream::BoxStream;
use moka::future::{Cache, CacheBuilder};
use crate::cache::cache_query_engine::FirestoreCacheQueryEngine;
use futures::StreamExt;
use std::collections::HashMap;
use tracing::*;
pub type FirestoreMemCache = Cache<String, FirestoreDocument>;
pub type FirestoreMemCacheOptions = CacheBuilder<String, FirestoreDocument, FirestoreMemCache>;
pub struct FirestoreMemoryCacheBackend {
pub config: FirestoreCacheConfiguration,
collection_caches: HashMap<String, FirestoreMemCache>,
}
const FIRESTORE_MEMORY_CACHE_DEFAULT_MAX_CAPACITY: u64 = 50000;
impl FirestoreMemoryCacheBackend {
pub fn new(config: FirestoreCacheConfiguration) -> FirestoreResult<Self> {
Self::with_max_capacity(config, FIRESTORE_MEMORY_CACHE_DEFAULT_MAX_CAPACITY)
}
pub fn with_max_capacity(
config: FirestoreCacheConfiguration,
max_capacity: u64,
) -> FirestoreResult<Self> {
Self::with_collection_options(config, |_| {
FirestoreMemCache::builder().max_capacity(max_capacity)
})
}
pub fn with_collection_options<FN>(
config: FirestoreCacheConfiguration,
collection_mem_options: FN,
) -> FirestoreResult<Self>
where
FN: Fn(&str) -> FirestoreMemCacheOptions,
{
let collection_caches = config
.collections
.keys()
.map(|collection_path| {
(
collection_path.clone(),
collection_mem_options(collection_path.as_str()).build(),
)
})
.collect();
Ok(Self {
config,
collection_caches,
})
}
async fn preload_collections(&self, db: &FirestoreDb) -> Result<(), FirestoreError> {
for (collection_path, config) in &self.config.collections {
match config.collection_load_mode {
FirestoreCacheCollectionLoadMode::PreloadAllDocs
| FirestoreCacheCollectionLoadMode::PreloadAllIfEmpty => {
if let Some(mem_cache) = self.collection_caches.get(collection_path.as_str()) {
debug!(collection_path, "Preloading collection.");
let params = if let Some(parent) = &config.parent {
db.fluent()
.select()
.from(config.collection_name.as_str())
.parent(parent)
} else {
db.fluent().select().from(config.collection_name.as_str())
};
let stream = params.stream_query().await?;
stream
.enumerate()
.map(|(index, docs)| {
if index > 0 && index % 5000 == 0 {
debug!(
collection_path = collection_path.as_str(),
entries_loaded = index,
"Collection preload in progress...",
);
}
docs
})
.for_each_concurrent(1, |doc| async move {
let (_, document_id) = split_document_path(&doc.name);
mem_cache.insert(document_id.to_string(), doc).await;
})
.await;
mem_cache.run_pending_tasks().await;
info!(
collection_path = collection_path.as_str(),
entry_count = mem_cache.entry_count(),
"Preloading collection has been finished.",
);
}
}
FirestoreCacheCollectionLoadMode::PreloadNone => {}
}
}
Ok(())
}
async fn query_cached_docs<'b>(
&self,
collection_path: &str,
query_engine: FirestoreCacheQueryEngine,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreDocument>>> {
match self.collection_caches.get(collection_path) {
Some(mem_cache) => {
let filtered_results: Vec<FirestoreResult<FirestoreDocument>> = mem_cache
.iter()
.filter(|(_, doc)| query_engine.matches_doc(doc))
.map(|(_, doc)| Ok(doc))
.collect();
let filtered_stream = futures::stream::iter(filtered_results);
let output_stream = query_engine
.process_query_stream(Box::pin(filtered_stream))
.await?;
Ok(output_stream)
}
None => Ok(Box::pin(futures::stream::empty())),
}
}
}
#[async_trait]
impl FirestoreCacheBackend for FirestoreMemoryCacheBackend {
async fn load(
&self,
_options: &FirestoreCacheOptions,
db: &FirestoreDb,
) -> Result<Vec<FirestoreListenerTargetParams>, FirestoreError> {
let read_from_time = Utc::now();
self.preload_collections(db).await?;
Ok(self
.config
.collections
.values()
.map(|collection_config| {
FirestoreListenerTargetParams::new(
collection_config.listener_target.clone(),
FirestoreTargetType::Query(
FirestoreQueryParams::new(
collection_config.collection_name.as_str().into(),
)
.opt_parent(collection_config.parent.clone()),
),
HashMap::new(),
)
.with_resume_type(FirestoreListenerTargetResumeType::ReadTime(read_from_time))
})
.collect())
}
async fn invalidate_all(&self) -> FirestoreResult<()> {
for (collection_path, mem_cache) in &self.collection_caches {
debug!(collection_path, "Invalidating cache for collection.");
mem_cache.invalidate_all();
mem_cache.run_pending_tasks().await;
}
Ok(())
}
async fn shutdown(&self) -> Result<(), FirestoreError> {
Ok(())
}
async fn on_listen_event(&self, event: FirestoreListenEvent) -> FirestoreResult<()> {
match event {
FirestoreListenEvent::DocumentChange(doc_change) => {
if let Some(doc) = doc_change.document {
let (collection_path, document_id) = split_document_path(&doc.name);
if let Some(mem_cache) = self.collection_caches.get(collection_path) {
trace!(
doc_name = ?doc.name,
"Writing document to cache due to listener event.",
);
mem_cache.insert(document_id.to_string(), doc).await;
}
}
Ok(())
}
FirestoreListenEvent::DocumentDelete(doc_deleted) => {
let (collection_path, document_id) = split_document_path(&doc_deleted.document);
if let Some(mem_cache) = self.collection_caches.get(collection_path) {
trace!(
deleted_doc = ?doc_deleted.document.as_str(),
"Removing document from cache due to listener event.",
);
mem_cache.remove(document_id).await;
}
Ok(())
}
_ => Ok(()),
}
}
}
#[async_trait]
impl FirestoreCacheDocsByPathSupport for FirestoreMemoryCacheBackend {
async fn get_doc_by_path(
&self,
document_path: &str,
) -> FirestoreResult<Option<FirestoreDocument>> {
let (collection_path, document_id) = split_document_path(document_path);
match self.collection_caches.get(collection_path) {
Some(mem_cache) => Ok(mem_cache.get(document_id).await),
None => Ok(None),
}
}
async fn update_doc_by_path(&self, document: &FirestoreDocument) -> FirestoreResult<()> {
let (collection_path, document_id) = split_document_path(&document.name);
match self.collection_caches.get(collection_path) {
Some(mem_cache) => {
mem_cache
.insert(document_id.to_string(), document.clone())
.await;
Ok(())
}
None => Ok(()),
}
}
async fn list_all_docs<'b>(
&self,
collection_path: &str,
) -> FirestoreResult<FirestoreCachedValue<BoxStream<'b, FirestoreResult<FirestoreDocument>>>>
{
match self.collection_caches.get(collection_path) {
Some(mem_cache) => {
let all_docs: Vec<FirestoreResult<FirestoreDocument>> =
mem_cache.iter().map(|(_, doc)| Ok(doc)).collect();
Ok(FirestoreCachedValue::UseCached(Box::pin(
futures::stream::iter(all_docs),
)))
}
None => Ok(FirestoreCachedValue::SkipCache),
}
}
async fn query_docs<'b>(
&self,
collection_path: &str,
query: &FirestoreQueryParams,
) -> FirestoreResult<FirestoreCachedValue<BoxStream<'b, FirestoreResult<FirestoreDocument>>>>
{
let simple_query_engine = FirestoreCacheQueryEngine::new(query);
if simple_query_engine.params_supported() {
Ok(FirestoreCachedValue::UseCached(
self.query_cached_docs(collection_path, simple_query_engine)
.await?,
))
} else {
Ok(FirestoreCachedValue::SkipCache)
}
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/cache/backends/mod.rs | src/cache/backends/mod.rs | #[cfg(feature = "caching-memory")]
mod memory_backend;
#[cfg(feature = "caching-memory")]
pub use memory_backend::*;
#[cfg(feature = "caching-persistent")]
mod persistent_backend;
#[cfg(feature = "caching-persistent")]
pub use persistent_backend::*;
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/session_params.rs | src/db/session_params.rs | use crate::FirestoreConsistencySelector;
use rsb_derive::*;
/// Parameters that define the behavior of a Firestore session or a specific set of operations.
///
/// `FirestoreDbSessionParams` allow for configuring aspects like read consistency
/// (e.g., reading data as of a specific time) and caching behavior for operations
/// performed with a [`FirestoreDb`](crate::FirestoreDb) instance that is associated
/// with these parameters.
///
/// These parameters can be applied to a `FirestoreDb` instance using methods like
/// [`FirestoreDb::with_session_params()`](crate::FirestoreDb::with_session_params) or
/// [`FirestoreDb::clone_with_session_params()`](crate::FirestoreDb::clone_with_session_params).
/// ```
#[derive(Clone, Builder)]
pub struct FirestoreDbSessionParams {
/// Specifies the consistency guarantee for read operations.
///
/// If `None` (the default), strong consistency is used (i.e., the latest version of data is read).
/// Can be set to a [`FirestoreConsistencySelector`] to read data at a specific
/// point in time or within a transaction.
pub consistency_selector: Option<FirestoreConsistencySelector>,
/// Defines the caching behavior for this session.
/// Defaults to [`FirestoreDbSessionCacheMode::None`].
///
/// This field is only effective if the `caching` feature is enabled.
#[default = "FirestoreDbSessionCacheMode::None"]
pub cache_mode: FirestoreDbSessionCacheMode,
}
/// Defines the caching mode for Firestore operations within a session.
///
/// This enum is used in [`FirestoreDbSessionParams`] to control how and if
/// caching is utilized for read operations.
#[derive(Clone)]
pub enum FirestoreDbSessionCacheMode {
/// No caching is performed. All read operations go directly to Firestore.
None,
/// Enables read-through caching.
///
/// When a read operation is performed:
/// 1. The cache is checked first.
/// 2. If data is found in the cache, it's returned.
/// 3. If data is not in the cache, it's fetched from Firestore, stored in the cache,
/// and then returned.
///
/// This mode is only available if the `caching` feature is enabled.
#[cfg(feature = "caching")]
ReadThroughCache(FirestoreSharedCacheBackend),
/// Reads exclusively from the cache.
///
/// When a read operation is performed:
/// 1. The cache is checked.
/// 2. If data is found, it's returned.
/// 3. If data is not found, the operation will typically result in a "not found"
/// status without attempting to fetch from Firestore.
///
/// This mode is only available if the `caching` feature is enabled.
#[cfg(feature = "caching")]
ReadCachedOnly(FirestoreSharedCacheBackend),
}
/// A type alias for a thread-safe, shareable Firestore cache backend.
///
/// This is an `Arc` (Atomically Reference Counted) pointer to a trait object
/// that implements [`FirestoreCacheBackend`](crate::FirestoreCacheBackend).
/// It allows multiple parts of the application or different `FirestoreDb` instances
/// to share the same underlying cache storage.
///
/// This type is only available if the `caching` feature is enabled.
#[cfg(feature = "caching")]
pub type FirestoreSharedCacheBackend =
std::sync::Arc<dyn crate::FirestoreCacheBackend + Send + Sync + 'static>;
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/transaction_models.rs | src/db/transaction_models.rs | use crate::errors::FirestoreError;
use crate::{FirestoreConsistencySelector, FirestoreWriteResult};
use chrono::prelude::*;
use chrono::Duration;
use rsb_derive::Builder;
/// Options for configuring a Firestore transaction.
///
/// These options control the behavior of a transaction, such as its mode (read-only or read-write)
/// and consistency requirements for read-only transactions.
#[derive(Debug, Eq, PartialEq, Clone, Builder)]
pub struct FirestoreTransactionOptions {
/// The mode of the transaction (e.g., read-only, read-write).
/// Defaults to [`FirestoreTransactionMode::ReadWrite`].
#[default = "FirestoreTransactionMode::ReadWrite"]
pub mode: FirestoreTransactionMode,
/// An optional maximum duration for the entire transaction, including retries.
/// If set, the transaction will attempt to complete within this duration.
/// If `None`, default retry policies of the underlying gRPC client or Firestore service apply.
pub max_elapsed_time: Option<Duration>,
}
impl Default for FirestoreTransactionOptions {
fn default() -> Self {
Self {
mode: FirestoreTransactionMode::ReadWrite,
max_elapsed_time: None,
}
}
}
impl TryFrom<FirestoreTransactionOptions>
for gcloud_sdk::google::firestore::v1::TransactionOptions
{
type Error = FirestoreError;
fn try_from(options: FirestoreTransactionOptions) -> Result<Self, Self::Error> {
match options.mode {
FirestoreTransactionMode::ReadOnly => {
Ok(gcloud_sdk::google::firestore::v1::TransactionOptions {
mode: Some(
gcloud_sdk::google::firestore::v1::transaction_options::Mode::ReadOnly(
gcloud_sdk::google::firestore::v1::transaction_options::ReadOnly {
consistency_selector: None,
},
),
),
})
}
FirestoreTransactionMode::ReadOnlyWithConsistency(ref selector) => {
Ok(gcloud_sdk::google::firestore::v1::TransactionOptions {
mode: Some(
gcloud_sdk::google::firestore::v1::transaction_options::Mode::ReadOnly(
gcloud_sdk::google::firestore::v1::transaction_options::ReadOnly {
consistency_selector: Some(selector.try_into()?),
},
),
),
})
}
FirestoreTransactionMode::ReadWrite => {
Ok(gcloud_sdk::google::firestore::v1::TransactionOptions {
mode: Some(
gcloud_sdk::google::firestore::v1::transaction_options::Mode::ReadWrite(
gcloud_sdk::google::firestore::v1::transaction_options::ReadWrite {
retry_transaction: vec![],
},
),
),
})
}
FirestoreTransactionMode::ReadWriteRetry(tid) => {
Ok(gcloud_sdk::google::firestore::v1::TransactionOptions {
mode: Some(
gcloud_sdk::google::firestore::v1::transaction_options::Mode::ReadWrite(
gcloud_sdk::google::firestore::v1::transaction_options::ReadWrite {
retry_transaction: tid,
},
),
),
})
}
}
}
}
/// Defines the mode of a Firestore transaction.
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum FirestoreTransactionMode {
/// A read-only transaction.
///
/// In this mode, only read operations are allowed. The transaction will use
/// strong consistency by default, reading the latest version of data.
ReadOnly,
/// A read-write transaction.
///
/// This is the default mode. Both read and write operations are allowed.
/// Firestore ensures atomicity for all operations within the transaction.
ReadWrite,
/// A read-only transaction with a specific consistency requirement.
///
/// Allows specifying how data should be read, for example, at a particular
/// A read-write transaction.src/db/transaction_models.rs:36:28, at a particular
/// point in time using [`FirestoreConsistencySelector::ReadTime`].
ReadOnlyWithConsistency(FirestoreConsistencySelector),
/// A read-write transaction that attempts to retry a previous transaction.
///
/// This is used internally by the client when retrying a transaction that
/// failed due to contention or other transient issues. The `FirestoreTransactionId`
/// is the ID of the transaction to retry.
ReadWriteRetry(FirestoreTransactionId),
}
/// A type alias for Firestore transaction IDs.
/// Transaction IDs are represented as a vector of bytes.
pub type FirestoreTransactionId = Vec<u8>;
/// Represents the response from committing a Firestore transaction.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreTransactionResponse {
/// A list of results for each write operation performed within the transaction.
/// Each [`FirestoreWriteResult`] provides information about a specific write,
/// such as its update time.
pub write_results: Vec<FirestoreWriteResult>,
/// The time at which the transaction was committed.
/// This is `None` if the transaction was read-only or did not involve writes.
pub commit_time: Option<DateTime<Utc>>,
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/listen_changes.rs | src/db/listen_changes.rs | use crate::db::safe_document_path;
use crate::errors::*;
use crate::timestamp_utils::to_timestamp;
use crate::{FirestoreDb, FirestoreQueryParams, FirestoreResult, FirestoreResumeStateStorage};
pub use async_trait::async_trait;
use chrono::prelude::*;
use futures::stream::BoxStream;
use futures::StreamExt;
use futures::TryFutureExt;
use futures::TryStreamExt;
use gcloud_sdk::google::firestore::v1::*;
use rsb_derive::*;
pub use rvstruct::ValueStruct;
use std::collections::HashMap;
use std::future::Future;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use tokio::task::JoinHandle;
use tracing::*;
#[derive(Debug, Clone, Builder)]
pub struct FirestoreListenerTargetParams {
pub target: FirestoreListenerTarget,
pub target_type: FirestoreTargetType,
pub resume_type: Option<FirestoreListenerTargetResumeType>,
pub add_target_once: Option<bool>,
pub labels: HashMap<String, String>,
}
impl FirestoreListenerTargetParams {
pub fn validate(&self) -> FirestoreResult<()> {
self.target.validate()?;
Ok(())
}
}
#[derive(Debug, Clone, Builder)]
pub struct FirestoreCollectionDocuments {
pub parent: Option<String>,
pub collection: String,
pub documents: Vec<String>,
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone)]
pub enum FirestoreTargetType {
Query(FirestoreQueryParams),
Documents(FirestoreCollectionDocuments),
}
#[derive(Debug, Clone)]
pub enum FirestoreListenerTargetResumeType {
Token(FirestoreListenerToken),
ReadTime(DateTime<Utc>),
}
#[async_trait]
pub trait FirestoreListenSupport {
async fn listen_doc_changes<'a, 'b>(
&'a self,
targets: Vec<FirestoreListenerTargetParams>,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<ListenResponse>>>;
}
#[async_trait]
impl FirestoreListenSupport for FirestoreDb {
async fn listen_doc_changes<'a, 'b>(
&'a self,
targets: Vec<FirestoreListenerTargetParams>,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<ListenResponse>>> {
let listen_requests = targets
.into_iter()
.map(|target_params| self.create_listen_request(target_params))
.collect::<FirestoreResult<Vec<ListenRequest>>>()?;
let request = gcloud_sdk::tonic::Request::new(
futures::stream::iter(listen_requests).chain(futures::stream::pending()),
);
let response = self.client().get().listen(request).await?;
Ok(response.into_inner().map_err(|e| e.into()).boxed())
}
}
#[derive(Clone, Debug, Eq, PartialEq, Hash, ValueStruct)]
pub struct FirestoreListenerTarget(u32);
impl FirestoreListenerTarget {
pub fn validate(&self) -> FirestoreResult<()> {
if *self.value() == 0 {
Err(FirestoreError::InvalidParametersError(
FirestoreInvalidParametersError::new(FirestoreInvalidParametersPublicDetails::new(
"target_id".to_string(),
"Listener target ID cannot be zero".to_string(),
)),
))
} else if *self.value() > i32::MAX as u32 {
Err(FirestoreError::InvalidParametersError(
FirestoreInvalidParametersError::new(FirestoreInvalidParametersPublicDetails::new(
"target_id".to_string(),
format!(
"Listener target ID cannot be more than: {}. {} is specified",
i32::MAX,
self.value()
),
)),
))
} else {
Ok(())
}
}
}
impl TryInto<i32> for FirestoreListenerTarget {
type Error = FirestoreError;
fn try_into(self) -> FirestoreResult<i32> {
self.validate()?;
(*self.value()).try_into().map_err(|e| {
FirestoreError::InvalidParametersError(FirestoreInvalidParametersError::new(
FirestoreInvalidParametersPublicDetails::new(
"target_id".to_string(),
format!("Invalid target ID: {} {}", self.value(), e),
),
))
})
}
}
impl TryFrom<i32> for FirestoreListenerTarget {
type Error = FirestoreError;
fn try_from(value: i32) -> FirestoreResult<Self> {
value
.try_into()
.map_err(|e| {
FirestoreError::InvalidParametersError(FirestoreInvalidParametersError::new(
FirestoreInvalidParametersPublicDetails::new(
"target_id".to_string(),
format!("Invalid target ID: {value} {e}"),
),
))
})
.map(FirestoreListenerTarget)
}
}
#[derive(Clone, Debug, ValueStruct)]
pub struct FirestoreListenerToken(Vec<u8>);
impl FirestoreDb {
pub async fn create_listener<S>(
&self,
storage: S,
) -> FirestoreResult<FirestoreListener<FirestoreDb, S>>
where
S: FirestoreResumeStateStorage + Clone + Send + Sync + 'static,
{
self.create_listener_with_params(storage, FirestoreListenerParams::new())
.await
}
pub async fn create_listener_with_params<S>(
&self,
storage: S,
params: FirestoreListenerParams,
) -> FirestoreResult<FirestoreListener<FirestoreDb, S>>
where
S: FirestoreResumeStateStorage + Clone + Send + Sync + 'static,
{
FirestoreListener::new(self.clone(), storage, params).await
}
fn create_listen_request(
&self,
target_params: FirestoreListenerTargetParams,
) -> FirestoreResult<ListenRequest> {
Ok(ListenRequest {
database: self.get_database_path().to_string(),
labels: target_params.labels,
target_change: Some(listen_request::TargetChange::AddTarget(Target {
target_id: target_params.target.try_into()?,
once: target_params.add_target_once.unwrap_or(false),
target_type: Some(match target_params.target_type {
FirestoreTargetType::Query(query_params) => {
target::TargetType::Query(target::QueryTarget {
parent: query_params
.parent
.as_ref()
.unwrap_or_else(|| self.get_documents_path())
.clone(),
query_type: Some(target::query_target::QueryType::StructuredQuery(
query_params.try_into()?,
)),
})
}
FirestoreTargetType::Documents(collection_documents) => {
target::TargetType::Documents(target::DocumentsTarget {
documents: collection_documents
.documents
.into_iter()
.map(|doc_id| {
safe_document_path(
collection_documents
.parent
.as_deref()
.unwrap_or_else(|| self.get_documents_path()),
collection_documents.collection.as_str(),
doc_id,
)
})
.collect::<FirestoreResult<Vec<String>>>()?,
})
}
}),
resume_type: target_params
.resume_type
.map(|resume_type| match resume_type {
FirestoreListenerTargetResumeType::Token(token) => {
target::ResumeType::ResumeToken(token.into_value())
}
FirestoreListenerTargetResumeType::ReadTime(dt) => {
target::ResumeType::ReadTime(to_timestamp(dt))
}
}),
..Default::default()
})),
})
}
}
pub type FirestoreListenEvent = listen_response::ResponseType;
#[derive(Debug, Clone, Eq, PartialEq, Builder)]
pub struct FirestoreListenerParams {
pub retry_delay: Option<std::time::Duration>,
}
pub struct FirestoreListener<D, S>
where
D: FirestoreListenSupport,
S: FirestoreResumeStateStorage,
{
db: D,
storage: S,
listener_params: FirestoreListenerParams,
targets: Vec<FirestoreListenerTargetParams>,
shutdown_flag: Arc<AtomicBool>,
shutdown_handle: Option<JoinHandle<()>>,
shutdown_writer: Option<Arc<UnboundedSender<i8>>>,
}
impl<D, S> FirestoreListener<D, S>
where
D: FirestoreListenSupport + Clone + Send + Sync + 'static,
S: FirestoreResumeStateStorage + Clone + Send + Sync + 'static,
{
pub async fn new(
db: D,
storage: S,
listener_params: FirestoreListenerParams,
) -> FirestoreResult<FirestoreListener<D, S>> {
Ok(FirestoreListener {
db,
storage,
listener_params,
targets: vec![],
shutdown_flag: Arc::new(AtomicBool::new(false)),
shutdown_handle: None,
shutdown_writer: None,
})
}
pub fn add_target(
&mut self,
target_params: FirestoreListenerTargetParams,
) -> FirestoreResult<()> {
target_params.validate()?;
self.targets.push(target_params);
Ok(())
}
pub async fn start<FN, F>(&mut self, cb: FN) -> FirestoreResult<()>
where
FN: Fn(FirestoreListenEvent) -> F + Send + Sync + 'static,
F: Future<Output = AnyBoxedErrResult<()>> + Send + 'static,
{
info!(
num_targets = self.targets.len(),
"Starting a Firestore listener for targets...",
);
let mut initial_states: HashMap<FirestoreListenerTarget, FirestoreListenerTargetParams> =
HashMap::new();
for target_params in &self.targets {
match &target_params.resume_type {
Some(resume_type) => {
initial_states.insert(
target_params.target.clone(),
target_params.clone().with_resume_type(resume_type.clone()),
);
}
None => {
let resume_type = self
.storage
.read_resume_state(&target_params.target)
.map_err(|err| {
FirestoreError::SystemError(FirestoreSystemError::new(
FirestoreErrorPublicGenericDetails::new("SystemError".into()),
format!("Listener init error: {err}"),
))
})
.await?;
initial_states.insert(
target_params.target.clone(),
target_params.clone().opt_resume_type(resume_type),
);
}
}
}
if initial_states.is_empty() {
warn!("No initial states for listener targets. Exiting...");
return Ok(());
}
let (tx, rx): (UnboundedSender<i8>, UnboundedReceiver<i8>) =
tokio::sync::mpsc::unbounded_channel();
self.shutdown_writer = Some(Arc::new(tx));
self.shutdown_handle = Some(tokio::spawn(Self::listener_loop(
self.db.clone(),
self.storage.clone(),
self.shutdown_flag.clone(),
initial_states,
self.listener_params.clone(),
rx,
cb,
)));
Ok(())
}
pub async fn shutdown(&mut self) -> FirestoreResult<()> {
debug!("Shutting down Firestore listener...");
self.shutdown_flag.store(true, Ordering::Relaxed);
if let Some(shutdown_writer) = self.shutdown_writer.take() {
shutdown_writer.send(1).ok();
}
if let Some(signaller) = self.shutdown_handle.take() {
if let Err(err) = signaller.await {
warn!(%err, "Firestore listener exit error!");
};
}
debug!("Shutting down Firestore listener has been finished...");
Ok(())
}
async fn listener_loop<FN, F>(
db: D,
storage: S,
shutdown_flag: Arc<AtomicBool>,
mut targets_state: HashMap<FirestoreListenerTarget, FirestoreListenerTargetParams>,
listener_params: FirestoreListenerParams,
mut shutdown_receiver: UnboundedReceiver<i8>,
cb: FN,
) where
D: FirestoreListenSupport + Clone + Send + Sync,
FN: Fn(FirestoreListenEvent) -> F + Send + Sync,
F: Future<Output = AnyBoxedErrResult<()>> + Send,
{
let effective_delay = listener_params
.retry_delay
.unwrap_or_else(|| std::time::Duration::from_secs(5));
while !shutdown_flag.load(Ordering::Relaxed) {
debug!(
num_targets = targets_state.len(),
"Start listening on targets..."
);
match db
.listen_doc_changes(targets_state.values().cloned().collect())
.await
{
Err(err) => {
if Self::check_listener_if_permanent_error(err, effective_delay).await {
shutdown_flag.store(true, Ordering::Relaxed);
}
}
Ok(mut listen_stream) => loop {
tokio::select! {
shutdown_trigger = shutdown_receiver.recv() => {
if shutdown_trigger.is_none() {
debug!("Listener dropped. Exiting...");
shutdown_flag.store(true, Ordering::Relaxed);
}
debug!(num_targets = targets_state.len(), "Exiting from listener on targets...");
shutdown_receiver.close();
break;
}
tried = listen_stream.try_next() => {
if shutdown_flag.load(Ordering::Relaxed) {
break;
}
else {
match tried {
Ok(Some(event)) => {
trace!(?event, "Received a listen response event to handle.");
match event.response_type {
Some(listen_response::ResponseType::TargetChange(ref target_change))
if !target_change.resume_token.is_empty() =>
{
for target_id_num in &target_change.target_ids {
match FirestoreListenerTarget::try_from(*target_id_num) {
Ok(target_id) => {
if let Some(target) = targets_state.get_mut(&target_id) {
let new_token: FirestoreListenerToken = target_change.resume_token.clone().into();
if let Err(err) = storage.update_resume_token(&target.target, new_token.clone()).await {
error!(%err, "Listener token storage error occurred.");
break;
}
else {
target.resume_type = Some(FirestoreListenerTargetResumeType::Token(new_token))
}
}
},
Err(err) => {
error!(%err, target_id_num, "Listener system error - unexpected target ID.");
break;
}
}
}
}
Some(response_type) => {
if let Err(err) = cb(response_type).await {
error!(%err, "Listener callback function error occurred.");
break;
}
}
None => {}
}
}
Ok(None) => break,
Err(err) => {
if Self::check_listener_if_permanent_error(err, effective_delay).await {
shutdown_flag.store(true, Ordering::Relaxed);
}
break;
}
}
}
}
}
},
}
}
}
async fn check_listener_if_permanent_error(
err: FirestoreError,
delay: std::time::Duration,
) -> bool {
match err {
FirestoreError::DatabaseError(ref db_err)
if db_err.details.contains("unexpected end of file")
|| db_err.details.contains("stream error received") =>
{
debug!(%err, ?delay, "Listen EOF.. Restarting after the specified delay...");
tokio::time::sleep(delay).await;
false
}
FirestoreError::DatabaseError(ref db_err)
if db_err.public.code.contains("InvalidArgument") =>
{
error!(%err, "Listen error. Exiting...");
true
}
FirestoreError::InvalidParametersError(_) => {
error!(%err, "Listen error. Exiting...");
true
}
_ => {
error!(%err, ?delay, "Listen error. Restarting after the specified delay...");
tokio::time::sleep(delay).await;
false
}
}
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/update.rs | src/db/update.rs | use crate::db::safe_document_path;
use crate::{FirestoreDb, FirestoreResult, FirestoreWritePrecondition};
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use gcloud_sdk::google::firestore::v1::*;
use serde::{Deserialize, Serialize};
use tracing::*;
#[async_trait]
pub trait FirestoreUpdateSupport {
async fn update_obj<I, O, S>(
&self,
collection_id: &str,
document_id: S,
obj: &I,
update_only: Option<Vec<String>>,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send;
async fn update_obj_at<I, O, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
obj: &I,
update_only: Option<Vec<String>>,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send;
async fn update_doc(
&self,
collection_id: &str,
firestore_doc: Document,
update_only: Option<Vec<String>>,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<Document>;
}
#[async_trait]
impl FirestoreUpdateSupport for FirestoreDb {
async fn update_obj<I, O, S>(
&self,
collection_id: &str,
document_id: S,
obj: &I,
update_only: Option<Vec<String>>,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send,
{
self.update_obj_at(
self.get_documents_path().as_str(),
collection_id,
document_id,
obj,
update_only,
return_only_fields,
precondition,
)
.await
}
async fn update_obj_at<I, O, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
obj: &I,
update_only: Option<Vec<String>>,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send,
{
let firestore_doc = Self::serialize_to_doc(
safe_document_path(parent, collection_id, document_id.as_ref())?.as_str(),
obj,
)?;
let doc = self
.update_doc(
collection_id,
firestore_doc,
update_only,
return_only_fields,
precondition,
)
.await?;
Self::deserialize_doc_to(&doc)
}
async fn update_doc(
&self,
collection_id: &str,
firestore_doc: Document,
update_only: Option<Vec<String>>,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<Document> {
let document_id = firestore_doc.name.clone();
let span = span!(
Level::DEBUG,
"Firestore Update Document",
"/firestore/collection_name" = collection_id,
"/firestore/document_name" = document_id,
"/firestore/response_time" = field::Empty,
);
let update_document_request = gcloud_sdk::tonic::Request::new(UpdateDocumentRequest {
update_mask: update_only.map({
|vf| DocumentMask {
field_paths: vf.iter().map(|f| f.to_string()).collect(),
}
}),
document: Some(firestore_doc),
mask: return_only_fields.as_ref().map(|masks| DocumentMask {
field_paths: masks.clone(),
}),
current_document: precondition.map(|cond| cond.try_into()).transpose()?,
});
let begin_query_utc: DateTime<Utc> = Utc::now();
let update_response = self
.client()
.get()
.update_document(update_document_request)
.await?;
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
span.record(
"/firestore/response_time",
query_duration.num_milliseconds(),
);
span.in_scope(|| {
debug!(collection_id, document_id, "Updated the document.");
});
Ok(update_response.into_inner())
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/batch_simple_writer.rs | src/db/batch_simple_writer.rs | use crate::errors::*;
use crate::{
FirestoreBatch, FirestoreBatchWriteResponse, FirestoreBatchWriter, FirestoreDb,
FirestoreResult, FirestoreWriteResult,
};
use async_trait::async_trait;
use futures::TryFutureExt;
use gcloud_sdk::google::firestore::v1::{BatchWriteRequest, Write};
use rsb_derive::*;
use std::collections::HashMap;
use tracing::*;
#[derive(Debug, Eq, PartialEq, Clone, Builder)]
pub struct FirestoreSimpleBatchWriteOptions {
retry_max_elapsed_time: Option<chrono::Duration>,
}
pub struct FirestoreSimpleBatchWriter {
pub db: FirestoreDb,
pub options: FirestoreSimpleBatchWriteOptions,
pub batch_span: Span,
}
impl FirestoreSimpleBatchWriter {
pub async fn new(
db: FirestoreDb,
options: FirestoreSimpleBatchWriteOptions,
) -> FirestoreResult<FirestoreSimpleBatchWriter> {
let batch_span = span!(Level::DEBUG, "Firestore Batch Write");
Ok(Self {
db,
options,
batch_span,
})
}
pub fn new_batch(&self) -> FirestoreBatch<'_, FirestoreSimpleBatchWriter> {
FirestoreBatch::new(&self.db, self)
}
}
#[async_trait]
impl FirestoreBatchWriter for FirestoreSimpleBatchWriter {
type WriteResult = FirestoreBatchWriteResponse;
async fn write(&self, writes: Vec<Write>) -> FirestoreResult<FirestoreBatchWriteResponse> {
let backoff = backoff::ExponentialBackoffBuilder::new()
.with_max_elapsed_time(
self.options
.retry_max_elapsed_time
.map(|v| v.to_std())
.transpose()?,
)
.build();
let request = BatchWriteRequest {
database: self.db.get_database_path().to_string(),
writes,
labels: HashMap::new(),
};
backoff::future::retry(backoff, || {
async {
let response = self
.db
.client()
.get()
.batch_write(request.clone())
.await
.map_err(FirestoreError::from)?;
let batch_response = response.into_inner();
let write_results: FirestoreResult<Vec<FirestoreWriteResult>> = batch_response
.write_results
.into_iter()
.map(|s| s.try_into())
.collect();
Ok(FirestoreBatchWriteResponse::new(
0,
write_results?,
batch_response.status,
))
}
.map_err(firestore_err_to_backoff)
})
.await
}
}
impl FirestoreDb {
pub async fn create_simple_batch_writer(&self) -> FirestoreResult<FirestoreSimpleBatchWriter> {
self.create_simple_batch_writer_with_options(FirestoreSimpleBatchWriteOptions::new())
.await
}
pub async fn create_simple_batch_writer_with_options(
&self,
options: FirestoreSimpleBatchWriteOptions,
) -> FirestoreResult<FirestoreSimpleBatchWriter> {
FirestoreSimpleBatchWriter::new(self.clone(), options).await
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/query_models.rs | src/db/query_models.rs | // Allow derive_partial_eq_without_eq because some of these types wrap generated gRPC types
// that might not implement Eq, or their Eq implementation might change.
#![allow(clippy::derive_partial_eq_without_eq)]
use crate::errors::{
FirestoreError, FirestoreInvalidParametersError, FirestoreInvalidParametersPublicDetails,
};
use crate::{FirestoreValue, FirestoreVector};
use gcloud_sdk::google::firestore::v1::*;
use rsb_derive::Builder;
/// Specifies the target collection(s) for a Firestore query.
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum FirestoreQueryCollection {
/// Queries a single collection identified by its ID.
Single(String),
/// Performs a collection group query across all collections with the specified ID(s).
/// While Firestore gRPC supports multiple collection IDs here, typically a collection group query
/// targets all collections with *one* specific ID.
Group(Vec<String>),
}
#[allow(clippy::to_string_trait_impl)]
impl ToString for FirestoreQueryCollection {
fn to_string(&self) -> String {
match self {
FirestoreQueryCollection::Single(single) => single.to_string(),
FirestoreQueryCollection::Group(group) => group.join(","),
}
}
}
impl From<&str> for FirestoreQueryCollection {
fn from(collection_id_str: &str) -> Self {
FirestoreQueryCollection::Single(collection_id_str.to_string())
}
}
/// Parameters for constructing and executing a Firestore query.
///
/// This struct encapsulates all configurable aspects of a query, such as the
/// target collection, filters, ordering, limits, offsets, cursors, and projections.
/// It is used by the fluent API and direct query methods to define the query to be sent to Firestore.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreQueryParams {
/// The parent resource path. For top-level collections, this is typically
/// the database path (e.g., "projects/my-project/databases/(default)/documents").
/// For sub-collections, it's the path to the parent document.
/// If `None`, the query is assumed to be on a top-level collection relative to the
/// `FirestoreDb`'s document path.
pub parent: Option<String>,
/// The ID of the collection or collection group to query.
pub collection_id: FirestoreQueryCollection,
/// The maximum number of results to return.
pub limit: Option<u32>,
/// The number of results to skip.
pub offset: Option<u32>,
/// A list of fields and directions to order the results by.
pub order_by: Option<Vec<FirestoreQueryOrder>>,
/// The filter to apply to the query.
pub filter: Option<FirestoreQueryFilter>,
/// If `true`, the query will search all collections located anywhere in the
/// database under the `parent` path (if specified) that have the given
/// `collection_id`. This is used for collection group queries.
/// Defaults to `false` if not set, meaning only direct children collections are queried.
pub all_descendants: Option<bool>,
/// If set, only these fields will be returned in the query results (projection).
/// If `None`, all fields are returned.
pub return_only_fields: Option<Vec<String>>,
/// A cursor to define the starting point of the query.
pub start_at: Option<FirestoreQueryCursor>,
/// A cursor to define the ending point of the query.
pub end_at: Option<FirestoreQueryCursor>,
/// Options for requesting an explanation of the query execution plan.
pub explain_options: Option<FirestoreExplainOptions>,
/// Options for performing a vector similarity search (find nearest neighbors).
pub find_nearest: Option<FirestoreFindNearestOptions>,
}
impl TryFrom<FirestoreQueryParams> for StructuredQuery {
type Error = FirestoreError;
fn try_from(params: FirestoreQueryParams) -> Result<Self, Self::Error> {
let query_filter = params.filter.map(|f| f.into());
Ok(StructuredQuery {
select: params.return_only_fields.map(|select_only_fields| {
structured_query::Projection {
fields: select_only_fields
.into_iter()
.map(|field_name| structured_query::FieldReference {
field_path: field_name,
})
.collect(),
}
}),
start_at: params.start_at.map(|start_at| start_at.into()),
end_at: params.end_at.map(|end_at| end_at.into()),
limit: params.limit.map(|x| x as i32),
offset: params.offset.map(|x| x as i32).unwrap_or(0),
order_by: params
.order_by
.map(|po| po.into_iter().map(|fo| fo.into()).collect())
.unwrap_or_default(),
from: match params.collection_id {
FirestoreQueryCollection::Single(collection_id) => {
vec![structured_query::CollectionSelector {
collection_id,
all_descendants: params.all_descendants.unwrap_or(false),
}]
}
FirestoreQueryCollection::Group(collection_ids) => collection_ids
.into_iter()
.map(|collection_id| structured_query::CollectionSelector {
collection_id,
all_descendants: params.all_descendants.unwrap_or(false),
})
.collect(),
},
find_nearest: params
.find_nearest
.map(|find_nearest| find_nearest.try_into())
.transpose()?,
r#where: query_filter,
})
}
}
/// Represents a filter condition for a Firestore query.
///
/// Filters are used to narrow down the documents returned by a query based on
/// conditions applied to their fields.
#[derive(Debug, PartialEq, Clone)]
pub enum FirestoreQueryFilter {
/// A composite filter that combines multiple sub-filters using an operator (AND/OR).
Composite(FirestoreQueryFilterComposite),
/// A unary filter that applies an operation to a single field (e.g., IS NULL, IS NAN).
Unary(FirestoreQueryFilterUnary),
/// A field filter that compares a field to a value (e.g., equality, greater than).
/// The `Option` allows for representing an effectively empty or no-op filter,
/// which can be useful in dynamic filter construction.
Compare(Option<FirestoreQueryFilterCompare>),
}
impl From<FirestoreQueryFilter> for structured_query::Filter {
fn from(filter: FirestoreQueryFilter) -> Self {
let filter_type = match filter {
FirestoreQueryFilter::Compare(comp) => comp.map(|cmp| {
structured_query::filter::FilterType::FieldFilter(match cmp {
FirestoreQueryFilterCompare::Equal(field_name, fvalue) => {
structured_query::FieldFilter {
field: Some(structured_query::FieldReference {
field_path: field_name,
}),
op: structured_query::field_filter::Operator::Equal.into(),
value: Some(fvalue.value),
}
}
FirestoreQueryFilterCompare::NotEqual(field_name, fvalue) => {
structured_query::FieldFilter {
field: Some(structured_query::FieldReference {
field_path: field_name,
}),
op: structured_query::field_filter::Operator::NotEqual.into(),
value: Some(fvalue.value),
}
}
FirestoreQueryFilterCompare::In(field_name, fvalue) => {
structured_query::FieldFilter {
field: Some(structured_query::FieldReference {
field_path: field_name,
}),
op: structured_query::field_filter::Operator::In.into(),
value: Some(fvalue.value),
}
}
FirestoreQueryFilterCompare::NotIn(field_name, fvalue) => {
structured_query::FieldFilter {
field: Some(structured_query::FieldReference {
field_path: field_name,
}),
op: structured_query::field_filter::Operator::NotIn.into(),
value: Some(fvalue.value),
}
}
FirestoreQueryFilterCompare::ArrayContains(field_name, fvalue) => {
structured_query::FieldFilter {
field: Some(structured_query::FieldReference {
field_path: field_name,
}),
op: structured_query::field_filter::Operator::ArrayContains.into(),
value: Some(fvalue.value),
}
}
FirestoreQueryFilterCompare::ArrayContainsAny(field_name, fvalue) => {
structured_query::FieldFilter {
field: Some(structured_query::FieldReference {
field_path: field_name,
}),
op: structured_query::field_filter::Operator::ArrayContainsAny.into(),
value: Some(fvalue.value),
}
}
FirestoreQueryFilterCompare::LessThan(field_name, fvalue) => {
structured_query::FieldFilter {
field: Some(structured_query::FieldReference {
field_path: field_name,
}),
op: structured_query::field_filter::Operator::LessThan.into(),
value: Some(fvalue.value),
}
}
FirestoreQueryFilterCompare::LessThanOrEqual(field_name, fvalue) => {
structured_query::FieldFilter {
field: Some(structured_query::FieldReference {
field_path: field_name,
}),
op: structured_query::field_filter::Operator::LessThanOrEqual.into(),
value: Some(fvalue.value),
}
}
FirestoreQueryFilterCompare::GreaterThan(field_name, fvalue) => {
structured_query::FieldFilter {
field: Some(structured_query::FieldReference {
field_path: field_name,
}),
op: structured_query::field_filter::Operator::GreaterThan.into(),
value: Some(fvalue.value),
}
}
FirestoreQueryFilterCompare::GreaterThanOrEqual(field_name, fvalue) => {
structured_query::FieldFilter {
field: Some(structured_query::FieldReference {
field_path: field_name,
}),
op: structured_query::field_filter::Operator::GreaterThanOrEqual.into(),
value: Some(fvalue.value),
}
}
})
}),
FirestoreQueryFilter::Composite(composite) => {
Some(structured_query::filter::FilterType::CompositeFilter(
structured_query::CompositeFilter {
op: (Into::<structured_query::composite_filter::Operator>::into(
composite.operator,
))
.into(),
filters: composite
.for_all_filters
.into_iter()
.map(structured_query::Filter::from)
.filter(|filter| filter.filter_type.is_some())
.collect(),
},
))
}
FirestoreQueryFilter::Unary(unary) => match unary {
FirestoreQueryFilterUnary::IsNan(field_name) => {
Some(structured_query::filter::FilterType::UnaryFilter(
structured_query::UnaryFilter {
op: structured_query::unary_filter::Operator::IsNan.into(),
operand_type: Some(structured_query::unary_filter::OperandType::Field(
structured_query::FieldReference {
field_path: field_name,
},
)),
},
))
}
FirestoreQueryFilterUnary::IsNull(field_name) => {
Some(structured_query::filter::FilterType::UnaryFilter(
structured_query::UnaryFilter {
op: structured_query::unary_filter::Operator::IsNull.into(),
operand_type: Some(structured_query::unary_filter::OperandType::Field(
structured_query::FieldReference {
field_path: field_name,
},
)),
},
))
}
FirestoreQueryFilterUnary::IsNotNan(field_name) => {
Some(structured_query::filter::FilterType::UnaryFilter(
structured_query::UnaryFilter {
op: structured_query::unary_filter::Operator::IsNotNan.into(),
operand_type: Some(structured_query::unary_filter::OperandType::Field(
structured_query::FieldReference {
field_path: field_name,
},
)),
},
))
}
FirestoreQueryFilterUnary::IsNotNull(field_name) => {
Some(structured_query::filter::FilterType::UnaryFilter(
structured_query::UnaryFilter {
op: structured_query::unary_filter::Operator::IsNotNull.into(),
operand_type: Some(structured_query::unary_filter::OperandType::Field(
structured_query::FieldReference {
field_path: field_name,
},
)),
},
))
}
},
};
structured_query::Filter { filter_type }
}
}
/// Specifies an ordering for query results based on a field.
#[derive(Debug, Eq, PartialEq, Clone, Builder)]
pub struct FirestoreQueryOrder {
/// The path to the field to order by (e.g., "name", "address.city").
pub field_name: String,
/// The direction of the ordering (ascending or descending).
pub direction: FirestoreQueryDirection,
}
impl FirestoreQueryOrder {
/// Returns a string representation of the order, e.g., "fieldName asc".
pub fn to_string_format(&self) -> String {
format!("{} {}", self.field_name, self.direction.to_string())
}
}
impl<S> From<(S, FirestoreQueryDirection)> for FirestoreQueryOrder
where
S: AsRef<str>,
{
fn from(field_order: (S, FirestoreQueryDirection)) -> Self {
FirestoreQueryOrder::new(field_order.0.as_ref().to_string(), field_order.1)
}
}
impl From<FirestoreQueryOrder> for structured_query::Order {
fn from(order: FirestoreQueryOrder) -> Self {
structured_query::Order {
field: Some(structured_query::FieldReference {
field_path: order.field_name,
}),
direction: (match order.direction {
FirestoreQueryDirection::Ascending => structured_query::Direction::Ascending.into(),
FirestoreQueryDirection::Descending => {
structured_query::Direction::Descending.into()
}
}),
}
}
}
/// The direction for ordering query results.
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum FirestoreQueryDirection {
/// Sort results in ascending order.
Ascending,
/// Sort results in descending order.
Descending,
}
#[allow(clippy::to_string_trait_impl)]
impl ToString for FirestoreQueryDirection {
fn to_string(&self) -> String {
match self {
FirestoreQueryDirection::Ascending => "asc".to_string(),
FirestoreQueryDirection::Descending => "desc".to_string(),
}
}
}
/// A composite filter that combines multiple [`FirestoreQueryFilter`]s.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreQueryFilterComposite {
/// The list of sub-filters to combine.
pub for_all_filters: Vec<FirestoreQueryFilter>,
/// The operator used to combine the sub-filters (AND/OR).
pub operator: FirestoreQueryFilterCompositeOperator,
}
/// The operator for combining filters in a [`FirestoreQueryFilterComposite`].
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum FirestoreQueryFilterCompositeOperator {
/// Logical AND: all sub-filters must be true.
And,
/// Logical OR: at least one sub-filter must be true.
Or,
}
impl From<FirestoreQueryFilterCompositeOperator> for structured_query::composite_filter::Operator {
fn from(operator: FirestoreQueryFilterCompositeOperator) -> Self {
match operator {
FirestoreQueryFilterCompositeOperator::And => {
structured_query::composite_filter::Operator::And
}
FirestoreQueryFilterCompositeOperator::Or => {
structured_query::composite_filter::Operator::Or
}
}
}
}
/// A unary filter that applies an operation to a single field.
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum FirestoreQueryFilterUnary {
/// Checks if a field's value is NaN (Not a Number).
/// The string argument is the field path.
IsNan(String),
/// Checks if a field's value is NULL.
/// The string argument is the field path.
IsNull(String),
/// Checks if a field's value is not NaN.
/// The string argument is the field path.
IsNotNan(String),
/// Checks if a field's value is not NULL.
/// The string argument is the field path.
IsNotNull(String),
}
/// A field filter that compares a field to a value using a specific operator.
/// The first `String` argument in each variant is the field path.
/// The `FirestoreValue` is the value to compare against.
#[derive(Debug, PartialEq, Clone)]
pub enum FirestoreQueryFilterCompare {
/// Field is less than the value.
LessThan(String, FirestoreValue),
/// Field is less than or equal to the value.
LessThanOrEqual(String, FirestoreValue),
/// Field is greater than the value.
GreaterThan(String, FirestoreValue),
/// Field is greater than or equal to the value.
GreaterThanOrEqual(String, FirestoreValue),
/// Field is equal to the value.
Equal(String, FirestoreValue),
/// Field is not equal to the value.
NotEqual(String, FirestoreValue),
/// Field (which must be an array) contains the value.
ArrayContains(String, FirestoreValue),
/// Field's value is IN the given array value. The `FirestoreValue` should be an array.
In(String, FirestoreValue),
/// Field (which must be an array) contains any of the values in the given array value.
/// The `FirestoreValue` should be an array.
ArrayContainsAny(String, FirestoreValue),
/// Field's value is NOT IN the given array value. The `FirestoreValue` should be an array.
NotIn(String, FirestoreValue),
}
/// Represents a cursor for paginating query results.
///
/// Cursors define a starting or ending point for a query based on the values
/// of the fields being ordered by.
#[derive(Debug, PartialEq, Clone)]
pub enum FirestoreQueryCursor {
/// Starts the query results before the document that has these field values.
/// The `Vec<FirestoreValue>` corresponds to the values of the ordered fields.
BeforeValue(Vec<FirestoreValue>),
/// Starts the query results after the document that has these field values.
/// The `Vec<FirestoreValue>` corresponds to the values of the ordered fields.
AfterValue(Vec<FirestoreValue>),
}
impl From<FirestoreQueryCursor> for gcloud_sdk::google::firestore::v1::Cursor {
fn from(cursor: FirestoreQueryCursor) -> Self {
match cursor {
FirestoreQueryCursor::BeforeValue(values) => {
gcloud_sdk::google::firestore::v1::Cursor {
values: values.into_iter().map(|value| value.value).collect(),
before: true,
}
}
FirestoreQueryCursor::AfterValue(values) => gcloud_sdk::google::firestore::v1::Cursor {
values: values.into_iter().map(|value| value.value).collect(),
before: false,
},
}
}
}
impl From<gcloud_sdk::google::firestore::v1::Cursor> for FirestoreQueryCursor {
fn from(cursor: gcloud_sdk::google::firestore::v1::Cursor) -> Self {
let firestore_values = cursor
.values
.into_iter()
.map(FirestoreValue::from)
.collect();
if cursor.before {
FirestoreQueryCursor::BeforeValue(firestore_values)
} else {
FirestoreQueryCursor::AfterValue(firestore_values)
}
}
}
/// Parameters for a partitioned query.
///
/// Partitioned queries allow you to divide a large query into smaller, parallelizable chunks.
/// This is useful for exporting data or performing large-scale data processing.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestorePartitionQueryParams {
/// The base query parameters to partition.
pub query_params: FirestoreQueryParams,
/// The desired number of partitions to return. Must be a positive integer.
pub partition_count: u32,
/// The maximum number of partitions to return in this call, used for paging.
/// Must be a positive integer.
pub page_size: u32,
/// A page token from a previous `PartitionQuery` response to retrieve the next set of partitions.
pub page_token: Option<String>,
}
/// Represents a single partition of a query.
///
/// Each partition defines a range of the original query using `start_at` and `end_at` cursors.
/// Executing a query with these cursors will yield the documents for that specific partition.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestorePartition {
/// The cursor indicating the start of this partition.
pub start_at: Option<FirestoreQueryCursor>,
/// The cursor indicating the end of this partition.
pub end_at: Option<FirestoreQueryCursor>,
}
/// Options for requesting query execution analysis from Firestore.
///
/// When `analyze` is true, Firestore will return detailed information about
/// how the query was executed, including index usage and performance metrics.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreExplainOptions {
/// If `true`, Firestore will analyze the query and return execution details.
/// Defaults to `false` if not specified.
pub analyze: Option<bool>,
}
impl TryFrom<&FirestoreExplainOptions> for gcloud_sdk::google::firestore::v1::ExplainOptions {
type Error = FirestoreError;
fn try_from(explain_options: &FirestoreExplainOptions) -> Result<Self, Self::Error> {
Ok(ExplainOptions {
analyze: explain_options.analyze.unwrap_or(false),
})
}
}
/// Options for performing a vector similarity search (find nearest neighbors).
///
/// This is used to find documents whose vector field is closest to a given query vector.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreFindNearestOptions {
/// The path to the vector field in your documents to search against.
pub field_name: String,
/// The query vector to find nearest neighbors for.
pub query_vector: FirestoreVector,
/// The distance measure to use for comparing vectors.
pub distance_measure: FirestoreFindNearestDistanceMeasure,
/// The maximum number of nearest neighbors to return.
pub neighbors_limit: u32,
/// An optional field name to store the calculated distance in the query results.
/// If provided, each returned document will include this field with the distance value.
pub distance_result_field: Option<String>,
/// An optional threshold for the distance. Only neighbors within this distance
/// will be returned.
pub distance_threshold: Option<f64>,
}
impl TryFrom<FirestoreFindNearestOptions>
for gcloud_sdk::google::firestore::v1::structured_query::FindNearest
{
type Error = FirestoreError;
fn try_from(options: FirestoreFindNearestOptions) -> Result<Self, Self::Error> {
Ok(structured_query::FindNearest {
vector_field: Some(structured_query::FieldReference {
field_path: options.field_name,
}),
query_vector: Some(Into::<FirestoreValue>::into(options.query_vector).value),
distance_measure: {
let distance_measure: structured_query::find_nearest::DistanceMeasure =
options.distance_measure.try_into()?;
distance_measure.into()
},
limit: Some(options.neighbors_limit.try_into().map_err(|e| {
FirestoreError::InvalidParametersError(FirestoreInvalidParametersError::new(
FirestoreInvalidParametersPublicDetails::new(
"neighbors_limit".to_string(),
format!(
"Invalid value for neighbors_limit: {}. Maximum allowed value is {}. Error: {}",
options.neighbors_limit,
i32::MAX,
e
),
),
))
})?),
distance_result_field: options.distance_result_field.unwrap_or_default(),
distance_threshold: options.distance_threshold,
})
}
}
/// Specifies the distance measure for vector similarity searches.
#[derive(Debug, PartialEq, Clone)]
pub enum FirestoreFindNearestDistanceMeasure {
/// Euclidean distance.
Euclidean,
/// Cosine similarity (measures the cosine of the angle between two vectors).
Cosine,
/// Dot product distance.
DotProduct,
}
impl TryFrom<FirestoreFindNearestDistanceMeasure>
for structured_query::find_nearest::DistanceMeasure
{
type Error = FirestoreError;
fn try_from(measure: FirestoreFindNearestDistanceMeasure) -> Result<Self, Self::Error> {
match measure {
FirestoreFindNearestDistanceMeasure::Euclidean => {
Ok(structured_query::find_nearest::DistanceMeasure::Euclidean)
}
FirestoreFindNearestDistanceMeasure::Cosine => {
Ok(structured_query::find_nearest::DistanceMeasure::Cosine)
}
FirestoreFindNearestDistanceMeasure::DotProduct => {
Ok(structured_query::find_nearest::DistanceMeasure::DotProduct)
}
}
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/list.rs | src/db/list.rs | use crate::db::FirestoreDbInner;
use crate::*;
use async_trait::async_trait;
use chrono::prelude::*;
use futures::future::BoxFuture;
use futures::stream::BoxStream;
use futures::FutureExt;
use futures::StreamExt;
use futures::TryFutureExt;
use futures::TryStreamExt;
use gcloud_sdk::google::firestore::v1::*;
use rand::Rng;
use rsb_derive::*;
use serde::Deserialize;
use std::future;
use std::sync::Arc;
use tracing::*;
#[derive(Debug, Eq, PartialEq, Clone, Builder)]
pub struct FirestoreListDocParams {
pub collection_id: String,
pub parent: Option<String>,
#[default = "100"]
pub page_size: usize,
pub page_token: Option<String>,
pub order_by: Option<Vec<FirestoreQueryOrder>>,
pub return_only_fields: Option<Vec<String>>,
}
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreListDocResult {
pub documents: Vec<Document>,
pub page_token: Option<String>,
}
#[derive(Debug, Eq, PartialEq, Clone, Builder)]
pub struct FirestoreListCollectionIdsParams {
pub parent: Option<String>,
#[default = "100"]
pub page_size: usize,
pub page_token: Option<String>,
}
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreListCollectionIdsResult {
pub collection_ids: Vec<String>,
pub page_token: Option<String>,
}
#[async_trait]
pub trait FirestoreListingSupport {
async fn list_doc(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<FirestoreListDocResult>;
async fn stream_list_doc<'b>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, Document>>;
async fn stream_list_doc_with_errors<'b>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>>;
async fn stream_list_obj<'b, T>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, T>>
where
for<'de> T: Deserialize<'de> + 'b;
async fn stream_list_obj_with_errors<'b, T>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
for<'de> T: Deserialize<'de> + 'b;
async fn list_collection_ids(
&self,
params: FirestoreListCollectionIdsParams,
) -> FirestoreResult<FirestoreListCollectionIdsResult>;
async fn stream_list_collection_ids_with_errors(
&self,
params: FirestoreListCollectionIdsParams,
) -> FirestoreResult<BoxStream<FirestoreResult<String>>>;
async fn stream_list_collection_ids(
&self,
params: FirestoreListCollectionIdsParams,
) -> FirestoreResult<BoxStream<String>>;
}
#[async_trait]
impl FirestoreListingSupport for FirestoreDb {
async fn list_doc(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<FirestoreListDocResult> {
let span = span!(
Level::DEBUG,
"Firestore ListDocs",
"/firestore/collection_name" = params.collection_id.as_str(),
"/firestore/response_time" = field::Empty
);
self.list_doc_with_retries(params, 0, span).await
}
async fn stream_list_doc_with_errors<'b>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>> {
self.stream_list_doc_with_retries(params).await
}
async fn stream_list_doc<'b>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, Document>> {
let doc_stream = self.stream_list_doc_with_errors(params).await?;
Ok(Box::pin(doc_stream.filter_map(|doc_res| {
future::ready(match doc_res {
Ok(doc) => Some(doc),
Err(err) => {
error!(%err, "Error occurred while consuming documents.");
None
}
})
})))
}
async fn stream_list_obj<'b, T>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, T>>
where
for<'de> T: Deserialize<'de> + 'b,
{
let doc_stream = self.stream_list_doc(params).await?;
Ok(Box::pin(doc_stream.filter_map(|doc| async move {
match Self::deserialize_doc_to::<T>(&doc) {
Ok(obj) => Some(obj),
Err(err) => {
error!(
%err,
"Error occurred while deserializing a document inside a stream. Document: {}",
doc.name
);
None
}
}
})))
}
async fn stream_list_obj_with_errors<'b, T>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
for<'de> T: Deserialize<'de> + 'b,
{
let doc_stream = self.stream_list_doc_with_errors(params).await?;
Ok(Box::pin(doc_stream.and_then(|doc| async move {
Self::deserialize_doc_to::<T>(&doc)
})))
}
async fn list_collection_ids(
&self,
params: FirestoreListCollectionIdsParams,
) -> FirestoreResult<FirestoreListCollectionIdsResult> {
let span = span!(
Level::DEBUG,
"Firestore ListCollectionIds",
"/firestore/response_time" = field::Empty
);
self.list_collection_ids_with_retries(params, 0, &span)
.await
}
async fn stream_list_collection_ids(
&self,
params: FirestoreListCollectionIdsParams,
) -> FirestoreResult<BoxStream<String>> {
let stream = self.stream_list_collection_ids_with_errors(params).await?;
Ok(Box::pin(stream.filter_map(|col_res| {
future::ready(match col_res {
Ok(col) => Some(col),
Err(err) => {
error!(%err, "Error occurred while consuming collection IDs.");
None
}
})
})))
}
async fn stream_list_collection_ids_with_errors(
&self,
params: FirestoreListCollectionIdsParams,
) -> FirestoreResult<BoxStream<FirestoreResult<String>>> {
let stream: BoxStream<FirestoreResult<String>> = Box::pin(
futures::stream::unfold(Some(params), move |maybe_params| async move {
if let Some(params) = maybe_params {
let span = span!(
Level::DEBUG,
"Firestore Streaming ListCollections",
"/firestore/response_time" = field::Empty
);
match self
.list_collection_ids_with_retries(params.clone(), 0, &span)
.await
{
Ok(results) => {
if let Some(next_page_token) = results.page_token.clone() {
Some((Ok(results), Some(params.with_page_token(next_page_token))))
} else {
Some((Ok(results), None))
}
}
Err(err) => {
error!(%err, "Error occurred while consuming documents.");
Some((Err(err), None))
}
}
} else {
None
}
})
.flat_map(|doc_res| {
futures::stream::iter(match doc_res {
Ok(results) => results
.collection_ids
.into_iter()
.map(Ok::<String, FirestoreError>)
.collect(),
Err(err) => vec![Err(err)],
})
}),
);
Ok(stream)
}
}
impl FirestoreDb {
fn create_list_doc_request(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<ListDocumentsRequest> {
Ok(ListDocumentsRequest {
parent: params
.parent
.as_ref()
.unwrap_or_else(|| self.get_documents_path())
.clone(),
collection_id: params.collection_id,
page_size: params.page_size as i32,
page_token: params.page_token.unwrap_or_default(),
order_by: params
.order_by
.map(|fields| {
fields
.into_iter()
.map(|field| field.to_string_format())
.collect::<Vec<String>>()
.join(", ")
})
.unwrap_or_default(),
mask: params
.return_only_fields
.map(|masks| DocumentMask { field_paths: masks }),
consistency_selector: self
.session_params
.consistency_selector
.as_ref()
.map(|selector| selector.try_into())
.transpose()?,
show_missing: false,
})
}
fn list_doc_with_retries<'b>(
&self,
params: FirestoreListDocParams,
retries: usize,
span: Span,
) -> BoxFuture<'b, FirestoreResult<FirestoreListDocResult>> {
match self.create_list_doc_request(params) {
Ok(list_request) => {
Self::list_doc_with_retries_inner(self.inner.clone(), list_request, retries, span)
.boxed()
}
Err(err) => futures::future::err(err).boxed(),
}
}
fn list_doc_with_retries_inner<'b>(
db_inner: Arc<FirestoreDbInner>,
list_request: ListDocumentsRequest,
retries: usize,
span: Span,
) -> BoxFuture<'b, FirestoreResult<FirestoreListDocResult>> {
async move {
let begin_utc: DateTime<Utc> = Utc::now();
match db_inner.client.get()
.list_documents(
gcloud_sdk::tonic::Request::new(list_request.clone())
)
.map_err(|e| e.into())
.await
{
Ok(listing_response) => {
let list_inner = listing_response.into_inner();
let result = FirestoreListDocResult::new(list_inner.documents).opt_page_token(
if !list_inner.next_page_token.is_empty() {
Some(list_inner.next_page_token)
} else {
None
},
);
let end_query_utc: DateTime<Utc> = Utc::now();
let listing_duration = end_query_utc.signed_duration_since(begin_utc);
span.record(
"/firestore/response_time",
listing_duration.num_milliseconds(),
);
span.in_scope(|| {
debug!(
collection_id = list_request.collection_id.as_str(),
duration_milliseconds = listing_duration.num_milliseconds(),
num_documents = result.documents.len(),
"Listed documents.",
);
});
Ok(result)
}
Err(err) => match err {
FirestoreError::DatabaseError(ref db_err)
if db_err.retry_possible && retries < db_inner.options.max_retries =>
{
let sleep_duration = tokio::time::Duration::from_millis(
rand::rng().random_range(0..2u64.pow(retries as u32) * 1000 + 1),
);
warn!(
err = %db_err,
current_retry = retries + 1,
max_retries = db_inner.options.max_retries,
delay = sleep_duration.as_millis(),
"Failed to list documents. Retrying up to the specified number of times.",
);
tokio::time::sleep(sleep_duration).await;
Self::list_doc_with_retries_inner(db_inner, list_request, retries + 1, span).await
}
_ => Err(err),
},
}
}
.boxed()
}
async fn stream_list_doc_with_retries<'b>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>> {
#[cfg(feature = "caching")]
{
if let FirestoreCachedValue::UseCached(stream) =
self.list_docs_from_cache(¶ms).await?
{
return Ok(stream);
}
}
let list_request = self.create_list_doc_request(params.clone())?;
Self::stream_list_doc_with_retries_inner(self.inner.clone(), list_request)
}
fn stream_list_doc_with_retries_inner<'b>(
db_inner: Arc<FirestoreDbInner>,
list_request: ListDocumentsRequest,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>> {
let stream: BoxStream<FirestoreResult<Document>> = Box::pin(
futures::stream::unfold(
(db_inner, Some(list_request)),
move |(db_inner, list_request)| async move {
if let Some(mut list_request) = list_request {
let span = span!(
Level::DEBUG,
"Firestore Streaming ListDocs",
"/firestore/collection_name" = list_request.collection_id.as_str(),
"/firestore/response_time" = field::Empty
);
match Self::list_doc_with_retries_inner(
db_inner.clone(),
list_request.clone(),
0,
span,
)
.await
{
Ok(results) => {
if let Some(next_page_token) = results.page_token.clone() {
list_request.page_token = next_page_token;
Some((Ok(results), (db_inner, Some(list_request))))
} else {
Some((Ok(results), (db_inner, None)))
}
}
Err(err) => {
error!(%err, "Error occurred while consuming documents.");
Some((Err(err), (db_inner, None)))
}
}
} else {
None
}
},
)
.flat_map(|doc_res| {
futures::stream::iter(match doc_res {
Ok(results) => results
.documents
.into_iter()
.map(Ok::<Document, FirestoreError>)
.collect(),
Err(err) => vec![Err(err)],
})
}),
);
Ok(stream)
}
fn create_list_collection_ids_request(
&self,
params: &FirestoreListCollectionIdsParams,
) -> FirestoreResult<gcloud_sdk::tonic::Request<ListCollectionIdsRequest>> {
Ok(gcloud_sdk::tonic::Request::new(ListCollectionIdsRequest {
parent: params
.parent
.as_ref()
.unwrap_or_else(|| self.get_documents_path())
.clone(),
page_size: params.page_size as i32,
page_token: params.page_token.clone().unwrap_or_default(),
consistency_selector: self
.session_params
.consistency_selector
.as_ref()
.map(|selector| selector.try_into())
.transpose()?,
}))
}
fn list_collection_ids_with_retries<'a>(
&'a self,
params: FirestoreListCollectionIdsParams,
retries: usize,
span: &'a Span,
) -> BoxFuture<'a, FirestoreResult<FirestoreListCollectionIdsResult>> {
async move {
let list_request = self.create_list_collection_ids_request(¶ms)?;
let begin_utc: DateTime<Utc> = Utc::now();
match self
.client()
.get()
.list_collection_ids(list_request)
.map_err(|e| e.into())
.await
{
Ok(listing_response) => {
let list_inner = listing_response.into_inner();
let result = FirestoreListCollectionIdsResult::new(list_inner.collection_ids)
.opt_page_token(if !list_inner.next_page_token.is_empty() {
Some(list_inner.next_page_token)
} else {
None
});
let end_query_utc: DateTime<Utc> = Utc::now();
let listing_duration = end_query_utc.signed_duration_since(begin_utc);
span.record(
"/firestore/response_time",
listing_duration.num_milliseconds(),
);
span.in_scope(|| {
debug!(
duration_milliseconds = listing_duration.num_milliseconds(),
"Listed collections.",
);
});
Ok(result)
}
Err(err) => match err {
FirestoreError::DatabaseError(ref db_err)
if db_err.retry_possible && retries < self.inner.options.max_retries =>
{
let sleep_duration = tokio::time::Duration::from_millis(
rand::rng().random_range(0..2u64.pow(retries as u32) * 1000 + 1),
);
warn!(
err = %db_err,
current_retry = retries + 1,
max_retries = self.inner.options.max_retries,
delay = sleep_duration.as_millis(),
"Failed to list collection IDs. Retrying up to the specified number of times.",
);
tokio::time::sleep(sleep_duration).await;
self.list_collection_ids_with_retries(params, retries + 1, span)
.await
}
_ => Err(err),
},
}
}
.boxed()
}
#[cfg(feature = "caching")]
#[inline]
pub async fn list_docs_from_cache<'b>(
&self,
params: &FirestoreListDocParams,
) -> FirestoreResult<FirestoreCachedValue<BoxStream<'b, FirestoreResult<FirestoreDocument>>>>
{
if let FirestoreDbSessionCacheMode::ReadCachedOnly(ref cache) =
self.session_params.cache_mode
{
let span = span!(
Level::DEBUG,
"Firestore List Cached",
"/firestore/collection_name" = params.collection_id,
"/firestore/cache_result" = field::Empty,
"/firestore/response_time" = field::Empty
);
let begin_query_utc: DateTime<Utc> = Utc::now();
let collection_path = if let Some(parent) = params.parent.as_ref() {
format!("{}/{}", parent, params.collection_id.as_str())
} else {
format!(
"{}/{}",
self.get_documents_path(),
params.collection_id.as_str()
)
};
let cached_result = cache.list_all_docs(&collection_path).await?;
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
span.record(
"/firestore/response_time",
query_duration.num_milliseconds(),
);
match cached_result {
FirestoreCachedValue::UseCached(stream) => {
span.record("/firestore/cache_result", "hit");
span.in_scope(|| {
debug!(
collection_id = params.collection_id,
"Reading all documents from cache."
);
});
Ok(FirestoreCachedValue::UseCached(stream))
}
FirestoreCachedValue::SkipCache => {
span.record("/firestore/cache_result", "miss");
if matches!(
self.session_params.cache_mode,
FirestoreDbSessionCacheMode::ReadCachedOnly(_)
) {
span.in_scope(|| {
debug!(
collection_id = params.collection_id,
"Cache doesn't have suitable documents for specified collection, but cache mode is ReadCachedOnly so returning empty stream.",
);
});
Ok(FirestoreCachedValue::UseCached(Box::pin(
futures::stream::empty(),
)))
} else {
span.in_scope(|| {
debug!(
collection_id = params.collection_id,
"Cache doesn't have suitable documents for specified collection, so skipping cache and reading from Firestore.",
);
});
Ok(FirestoreCachedValue::SkipCache)
}
}
}
} else {
Ok(FirestoreCachedValue::SkipCache)
}
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/consistency_selector.rs | src/db/consistency_selector.rs | use crate::errors::*;
use crate::timestamp_utils::to_timestamp;
use crate::{FirestoreError, FirestoreTransactionId};
use chrono::prelude::*;
/// Specifies the consistency guarantee for Firestore read operations.
///
/// When performing reads, Firestore offers different consistency models. This enum
/// allows selecting the desired consistency for an operation, typically by associating
/// it with [`FirestoreDbSessionParams`](crate::FirestoreDbSessionParams).
///
/// See Google Cloud documentation for more details on Firestore consistency:
/// [Data consistency](https://cloud.google.com/firestore/docs/concepts/transaction-options#data_consistency)
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum FirestoreConsistencySelector {
/// Reads documents within an existing transaction.
///
/// When this variant is used, the read operation will be part of the transaction
/// identified by the provided [`FirestoreTransactionId`]. This ensures that all reads
/// within the transaction see a consistent snapshot of the data.
///
/// Note: Not all operations support being part of a transaction (e.g., `PartitionQuery`).
Transaction(FirestoreTransactionId),
/// Reads documents at a specific point in time.
///
/// This ensures that the read operation sees the version of the documents as they
/// existed at or before the given `read_time`. This is useful for reading historical
/// data or ensuring that a sequence of reads sees a consistent snapshot without
/// the overhead of a full transaction. The timestamp must not be older than
/// one hour, with some exceptions for Point-in-Time Recovery enabled databases.
ReadTime(DateTime<Utc>),
}
impl TryFrom<&FirestoreConsistencySelector>
for gcloud_sdk::google::firestore::v1::get_document_request::ConsistencySelector
{
type Error = FirestoreError;
fn try_from(selector: &FirestoreConsistencySelector) -> Result<Self, Self::Error> {
match selector {
FirestoreConsistencySelector::Transaction(tid) => Ok(gcloud_sdk::google::firestore::v1::get_document_request::ConsistencySelector::Transaction(tid.clone())),
FirestoreConsistencySelector::ReadTime(ts) => Ok(gcloud_sdk::google::firestore::v1::get_document_request::ConsistencySelector::ReadTime(to_timestamp(*ts)))
}
}
}
impl TryFrom<&FirestoreConsistencySelector>
for gcloud_sdk::google::firestore::v1::batch_get_documents_request::ConsistencySelector
{
type Error = FirestoreError;
fn try_from(selector: &FirestoreConsistencySelector) -> Result<Self, Self::Error> {
match selector {
FirestoreConsistencySelector::Transaction(tid) => Ok(gcloud_sdk::google::firestore::v1::batch_get_documents_request::ConsistencySelector::Transaction(tid.clone())),
FirestoreConsistencySelector::ReadTime(ts) => Ok(gcloud_sdk::google::firestore::v1::batch_get_documents_request::ConsistencySelector::ReadTime(to_timestamp(*ts)))
}
}
}
impl TryFrom<&FirestoreConsistencySelector>
for gcloud_sdk::google::firestore::v1::list_documents_request::ConsistencySelector
{
type Error = FirestoreError;
fn try_from(selector: &FirestoreConsistencySelector) -> Result<Self, Self::Error> {
match selector {
FirestoreConsistencySelector::Transaction(tid) => Ok(gcloud_sdk::google::firestore::v1::list_documents_request::ConsistencySelector::Transaction(tid.clone())),
FirestoreConsistencySelector::ReadTime(ts) => Ok(gcloud_sdk::google::firestore::v1::list_documents_request::ConsistencySelector::ReadTime(to_timestamp(*ts)))
}
}
}
impl TryFrom<&FirestoreConsistencySelector>
for gcloud_sdk::google::firestore::v1::run_query_request::ConsistencySelector
{
type Error = FirestoreError;
fn try_from(selector: &FirestoreConsistencySelector) -> Result<Self, Self::Error> {
match selector {
FirestoreConsistencySelector::Transaction(tid) => Ok(gcloud_sdk::google::firestore::v1::run_query_request::ConsistencySelector::Transaction(tid.clone())),
FirestoreConsistencySelector::ReadTime(ts) => Ok(gcloud_sdk::google::firestore::v1::run_query_request::ConsistencySelector::ReadTime(to_timestamp(*ts)))
}
}
}
impl TryFrom<&FirestoreConsistencySelector>
for gcloud_sdk::google::firestore::v1::partition_query_request::ConsistencySelector
{
type Error = FirestoreError;
fn try_from(selector: &FirestoreConsistencySelector) -> Result<Self, Self::Error> {
match selector {
FirestoreConsistencySelector::Transaction(_) => Err(FirestoreError::DatabaseError(
FirestoreDatabaseError::new(FirestoreErrorPublicGenericDetails::new("Unsupported consistency selector".into()),"Unsupported consistency selector".into(), false)
)),
FirestoreConsistencySelector::ReadTime(ts) => Ok(gcloud_sdk::google::firestore::v1::partition_query_request::ConsistencySelector::ReadTime(to_timestamp(*ts)))
}
}
}
impl TryFrom<&FirestoreConsistencySelector>
for gcloud_sdk::google::firestore::v1::run_aggregation_query_request::ConsistencySelector
{
type Error = FirestoreError;
fn try_from(selector: &FirestoreConsistencySelector) -> Result<Self, Self::Error> {
match selector {
FirestoreConsistencySelector::Transaction(tid) => Ok(gcloud_sdk::google::firestore::v1::run_aggregation_query_request::ConsistencySelector::Transaction(tid.clone())),
FirestoreConsistencySelector::ReadTime(ts) => Ok(gcloud_sdk::google::firestore::v1::run_aggregation_query_request::ConsistencySelector::ReadTime(to_timestamp(*ts)))
}
}
}
impl TryFrom<&FirestoreConsistencySelector>
for gcloud_sdk::google::firestore::v1::transaction_options::read_only::ConsistencySelector
{
type Error = FirestoreError;
fn try_from(selector: &FirestoreConsistencySelector) -> Result<Self, Self::Error> {
match selector {
FirestoreConsistencySelector::Transaction(_) => Err(FirestoreError::DatabaseError(
FirestoreDatabaseError::new(FirestoreErrorPublicGenericDetails::new("Unsupported consistency selector".into()),"Unsupported consistency selector".into(), false)
)),
FirestoreConsistencySelector::ReadTime(ts) => Ok(gcloud_sdk::google::firestore::v1::transaction_options::read_only::ConsistencySelector::ReadTime(to_timestamp(*ts)))
}
}
}
impl TryFrom<&FirestoreConsistencySelector>
for gcloud_sdk::google::firestore::v1::list_collection_ids_request::ConsistencySelector
{
type Error = FirestoreError;
fn try_from(selector: &FirestoreConsistencySelector) -> Result<Self, Self::Error> {
match selector {
FirestoreConsistencySelector::Transaction(_) => Err(FirestoreError::DatabaseError(
FirestoreDatabaseError::new(FirestoreErrorPublicGenericDetails::new("Unsupported consistency selector".into()),"Unsupported consistency selector".into(), false)
)),
FirestoreConsistencySelector::ReadTime(ts) => Ok(gcloud_sdk::google::firestore::v1::list_collection_ids_request::ConsistencySelector::ReadTime(to_timestamp(*ts)))
}
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/precondition_models.rs | src/db/precondition_models.rs | use crate::errors::FirestoreError;
use crate::timestamp_utils::to_timestamp;
use chrono::prelude::*;
use gcloud_sdk::google::firestore::v1::Precondition;
/// A precondition on a document, used for conditional write operations in Firestore.
///
/// Preconditions allow you to specify conditions that must be met for a write
/// operation (create, update, delete) to succeed. If the precondition is not met,
/// the operation will fail, typically with a `DataConflictError` or similar.
/// ```
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum FirestoreWritePrecondition {
/// The target document must exist (if `true`) or must not exist (if `false`).
///
/// - `Exists(true)`: The operation will only succeed if the document already exists.
/// Useful for conditional updates or deletes.
/// - `Exists(false)`: The operation will only succeed if the document does not already exist.
/// Useful for conditional creates (to prevent overwriting).
Exists(bool),
/// The target document must exist and its `update_time` must match the provided timestamp.
///
/// This is used for optimistic concurrency control. The operation will only succeed
/// if the document has not been modified since the specified `update_time`.
/// The `DateTime<Utc>` must be microsecond-aligned, as Firestore timestamps have
/// microsecond precision.
UpdateTime(DateTime<Utc>),
}
impl TryInto<gcloud_sdk::google::firestore::v1::Precondition> for FirestoreWritePrecondition {
type Error = FirestoreError;
fn try_into(self) -> Result<Precondition, Self::Error> {
match self {
FirestoreWritePrecondition::Exists(value) => {
Ok(gcloud_sdk::google::firestore::v1::Precondition {
condition_type: Some(
gcloud_sdk::google::firestore::v1::precondition::ConditionType::Exists(
value,
),
),
})
}
FirestoreWritePrecondition::UpdateTime(value) => {
Ok(gcloud_sdk::google::firestore::v1::Precondition {
condition_type: Some(
gcloud_sdk::google::firestore::v1::precondition::ConditionType::UpdateTime(
to_timestamp(value),
),
),
})
}
}
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/options.rs | src/db/options.rs | use gcloud_sdk::GoogleEnvironment;
use rsb_derive::Builder;
/// Configuration options for the [`FirestoreDb`](crate::FirestoreDb) client.
///
/// This struct allows customization of various aspects of the Firestore client,
/// such as the project ID, database ID, retry behavior, and API endpoint.
/// It uses the `rsb_derive::Builder` to provide a convenient builder pattern
/// for constructing options.
///
/// # Examples
///
/// ```rust
/// use firestore::FirestoreDbOptions;
///
/// let options = FirestoreDbOptions::new("my-gcp-project-id".to_string())
/// .with_database_id("my-custom-db".to_string())
/// .with_max_retries(5);
///
/// // To use the default database ID:
/// let default_db_options = FirestoreDbOptions::new("my-gcp-project-id".to_string());
/// assert_eq!(default_db_options.database_id, firestore::FIREBASE_DEFAULT_DATABASE_ID);
/// ```
#[derive(Debug, Eq, PartialEq, Clone, Builder)]
pub struct FirestoreDbOptions {
/// The Google Cloud Project ID that owns the Firestore database.
pub google_project_id: String,
/// The ID of the Firestore database. Defaults to `"(default)"`.
/// Use [`FIREBASE_DEFAULT_DATABASE_ID`](crate::FIREBASE_DEFAULT_DATABASE_ID) for the default.
#[default = "FIREBASE_DEFAULT_DATABASE_ID.to_string()"]
pub database_id: String,
/// The maximum number of times to retry a failed operation. Defaults to `3`.
/// Retries are typically applied to transient errors.
#[default = "3"]
pub max_retries: usize,
/// An optional custom URL for the Firestore API.
/// If `None`, the default Google Firestore API endpoint is used.
/// This can be useful for targeting a Firestore emulator.
/// If the `FIRESTORE_EMULATOR_HOST` environment variable is set, it will
/// typically override this and the default URL.
pub firebase_api_url: Option<String>,
}
impl FirestoreDbOptions {
/// Attempts to create `FirestoreDbOptions` by detecting the Google Project ID
/// from the environment (e.g., Application Default Credentials or GCE metadata server).
///
/// If the project ID can be detected, it returns `Some(FirestoreDbOptions)` with
/// default values for other fields. Otherwise, it returns `None`.
///
/// # Examples
///
/// ```rust,no_run
/// # async fn run() {
/// use firestore::FirestoreDbOptions;
///
/// if let Some(options) = FirestoreDbOptions::for_default_project_id().await {
/// // Use options to create a FirestoreDb client
/// println!("Detected project ID: {}", options.google_project_id);
/// } else {
/// println!("Could not detect default project ID.");
/// }
/// # }
/// ```
pub async fn for_default_project_id() -> Option<FirestoreDbOptions> {
let google_project_id = GoogleEnvironment::detect_google_project_id().await;
google_project_id.map(FirestoreDbOptions::new)
}
}
/// The default database ID for Firestore, which is `"(default)"`.
pub const FIREBASE_DEFAULT_DATABASE_ID: &str = "(default)";
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/mod.rs | src/db/mod.rs | // Linter allowance for functions that might have many arguments,
// often seen in builder patterns or comprehensive configuration methods.
#![allow(clippy::too_many_arguments)]
/// Module for document retrieval operations (get).
mod get;
pub use get::*;
/// Module for document creation operations.
mod create;
pub use create::*;
/// Module for document update operations.
mod update;
pub use update::*;
/// Module for document deletion operations.
mod delete;
pub use delete::*;
/// Module defining models used in queries (filters, orders, etc.).
mod query_models;
pub use query_models::*;
/// Module defining models for preconditions (e.g., last update time).
mod precondition_models;
pub use precondition_models::*;
/// Module for query execution.
mod query;
pub use query::*;
/// Module for aggregated query execution.
mod aggregated_query;
pub use aggregated_query::*;
/// Module for listing documents or collections.
mod list;
pub use list::*;
/// Module for listening to real-time document changes.
mod listen_changes;
pub use listen_changes::*;
/// Module for storing the state of listen operations (e.g., resume tokens).
mod listen_changes_state_storage;
pub use listen_changes_state_storage::*;
use crate::*;
use gcloud_sdk::google::firestore::v1::firestore_client::FirestoreClient;
use gcloud_sdk::google::firestore::v1::*;
use gcloud_sdk::*;
// Re-export serde for convenience as it's often used with Firestore documents.
use serde::{Deserialize, Serialize};
use tracing::*;
/// Module for database client options and configuration.
mod options;
pub use options::*;
/// Module for Firestore transactions.
mod transaction;
pub use transaction::*;
/// Module defining models related to transactions.
mod transaction_models;
pub use transaction_models::*;
/// Internal module for transaction operations.
mod transaction_ops;
use transaction_ops::*;
/// Module for session-specific parameters (e.g., consistency, caching).
mod session_params;
pub use session_params::*;
/// Module for defining read consistency (e.g., read_time, transaction_id).
mod consistency_selector;
pub use consistency_selector::*;
/// Module for building parent paths for sub-collections.
mod parent_path_builder;
pub use parent_path_builder::*;
/// Module for batch writing operations.
mod batch_writer;
pub use batch_writer::*;
/// Module for streaming batch write operations.
mod batch_streaming_writer;
pub use batch_streaming_writer::*;
/// Module for simple (non-streaming) batch write operations.
mod batch_simple_writer;
pub use batch_simple_writer::*;
use crate::errors::{
FirestoreError, FirestoreInvalidParametersError, FirestoreInvalidParametersPublicDetails,
};
use std::fmt::Formatter;
use std::sync::Arc;
/// Module defining models for document transformations (e.g., server-side increments).
mod transform_models;
pub use transform_models::*;
/// Internal struct holding the core components of the Firestore database client.
/// This includes the database path, document path prefix, options, and the gRPC client.
struct FirestoreDbInner {
database_path: String,
doc_path: String,
options: FirestoreDbOptions,
client: GoogleApi<FirestoreClient<GoogleAuthMiddleware>>,
}
/// The main entry point for interacting with a Google Firestore database.
///
/// `FirestoreDb` provides methods for database operations such as creating, reading,
/// updating, and deleting documents, as well as querying collections and running transactions.
/// It manages the connection and authentication with the Firestore service.
///
/// Instances of `FirestoreDb` are cloneable and internally use `Arc` for shared state,
/// making them cheap to clone and safe to share across threads.
#[derive(Clone)]
pub struct FirestoreDb {
inner: Arc<FirestoreDbInner>,
session_params: Arc<FirestoreDbSessionParams>,
}
const GOOGLE_FIREBASE_API_URL: &str = "https://firestore.googleapis.com";
const GOOGLE_FIRESTORE_EMULATOR_HOST_ENV: &str = "FIRESTORE_EMULATOR_HOST";
impl FirestoreDb {
/// Creates a new `FirestoreDb` instance with the specified Google Project ID.
///
/// This is a convenience method that uses default [`FirestoreDbOptions`].
/// For more control over configuration, use [`FirestoreDb::with_options`].
///
/// # Arguments
/// * `google_project_id`: The Google Cloud Project ID that owns the Firestore database.
///
/// # Example
/// ```rust,no_run
/// use firestore::*; // Imports FirestoreDb, FirestoreResult, etc.
///
/// # async fn run() -> FirestoreResult<()> {
/// let db = FirestoreDb::new("my-gcp-project-id").await?;
/// // Use db for Firestore operations
/// # Ok(())
/// # }
/// ```
pub async fn new<S>(google_project_id: S) -> FirestoreResult<Self>
where
S: AsRef<str>,
{
Self::with_options(FirestoreDbOptions::new(
google_project_id.as_ref().to_string(),
))
.await
}
/// Creates a new `FirestoreDb` instance with the specified options.
///
/// This method allows for detailed configuration of the Firestore client,
/// such as setting a custom database ID or API URL.
/// It uses default token scopes and token source.
///
/// # Arguments
/// * `options`: The [`FirestoreDbOptions`] to configure the client.
pub async fn with_options(options: FirestoreDbOptions) -> FirestoreResult<Self> {
Self::with_options_token_source(
options,
GCP_DEFAULT_SCOPES.clone(),
TokenSourceType::Default,
)
.await
}
/// Creates a new `FirestoreDb` instance attempting to infer the Google Project ID
/// from the environment (e.g., Application Default Credentials).
///
/// This is useful in environments where the project ID is implicitly available.
///
/// # Errors
/// Returns an [`FirestoreError::InvalidParametersError`] if the project ID cannot be inferred.
pub async fn for_default_project_id() -> FirestoreResult<Self> {
match FirestoreDbOptions::for_default_project_id().await {
Some(options) => Self::with_options(options).await,
_ => Err(FirestoreError::InvalidParametersError(
FirestoreInvalidParametersError::new(FirestoreInvalidParametersPublicDetails::new(
"google_project_id".to_string(),
"Unable to retrieve google_project_id".to_string(),
)),
)),
}
}
/// Creates a new `FirestoreDb` instance with specified options and a service account key file
/// for authentication.
///
/// # Arguments
/// * `options`: The [`FirestoreDbOptions`] to configure the client.
/// * `service_account_key_path`: Path to the JSON service account key file.
pub async fn with_options_service_account_key_file(
options: FirestoreDbOptions,
service_account_key_path: std::path::PathBuf,
) -> FirestoreResult<Self> {
Self::with_options_token_source(
options,
gcloud_sdk::GCP_DEFAULT_SCOPES.clone(),
gcloud_sdk::TokenSourceType::File(service_account_key_path),
)
.await
}
/// Creates a new `FirestoreDb` instance with full control over options, token scopes,
/// and token source type.
///
/// This is the most flexible constructor, allowing customization of authentication
/// and authorization aspects.
///
/// # Arguments
/// * `options`: The [`FirestoreDbOptions`] to configure the client.
/// * `token_scopes`: A list of OAuth2 scopes required for Firestore access.
/// * `token_source_type`: The [`TokenSourceType`](gcloud_sdk::TokenSourceType)
/// specifying how to obtain authentication tokens (e.g., default, file, metadata server).
pub async fn with_options_token_source(
options: FirestoreDbOptions,
token_scopes: Vec<String>,
token_source_type: TokenSourceType,
) -> FirestoreResult<Self> {
let firestore_database_path = format!(
"projects/{}/databases/{}",
options.google_project_id, options.database_id
);
let firestore_database_doc_path = format!("{firestore_database_path}/documents");
let effective_firebase_api_url = options
.firebase_api_url
.clone()
.or_else(|| {
std::env::var(GOOGLE_FIRESTORE_EMULATOR_HOST_ENV)
.ok()
.map(ensure_url_scheme)
})
.unwrap_or_else(|| GOOGLE_FIREBASE_API_URL.to_string());
info!(
database_path = firestore_database_path,
api_url = effective_firebase_api_url,
token_scopes = token_scopes.join(", "),
"Creating a new database client.",
);
let client = GoogleApiClient::from_function_with_token_source(
FirestoreClient::new,
effective_firebase_api_url,
Some(firestore_database_path.clone()),
token_scopes,
token_source_type,
)
.await?;
let inner = FirestoreDbInner {
database_path: firestore_database_path,
doc_path: firestore_database_doc_path,
client,
options,
};
Ok(Self {
inner: Arc::new(inner),
session_params: Arc::new(FirestoreDbSessionParams::new()),
})
}
/// Deserializes a Firestore [`Document`] into a Rust type `T`.
///
/// This function uses the custom Serde deserializer provided by this crate
/// to map Firestore's native data types to Rust structs.
///
/// # Arguments
/// * `doc`: A reference to the Firestore [`Document`] to deserialize.
///
/// # Type Parameters
/// * `T`: The target Rust type that implements `serde::Deserialize`.
///
/// # Errors
/// Returns a [`FirestoreError::DeserializeError`] if deserialization fails.
pub fn deserialize_doc_to<T>(doc: &Document) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
{
crate::firestore_serde::firestore_document_to_serializable(doc)
}
/// Serializes a Rust type `T` into a Firestore [`Document`].
///
/// This function uses the custom Serde serializer to convert Rust structs
/// into Firestore's native data format.
///
/// # Arguments
/// * `document_path`: The full path to the document (e.g., "projects/my-project/databases/(default)/documents/my-collection/my-doc").
/// This is used to set the `name` field of the resulting Firestore document.
/// * `obj`: A reference to the Rust object to serialize.
///
/// # Type Parameters
/// * `S`: A type that can be converted into a string for the document path.
/// * `T`: The source Rust type that implements `serde::Serialize`.
///
/// # Errors
/// Returns a [`FirestoreError::SerializeError`] if serialization fails.
pub fn serialize_to_doc<S, T>(document_path: S, obj: &T) -> FirestoreResult<Document>
where
S: AsRef<str>,
T: Serialize,
{
crate::firestore_serde::firestore_document_from_serializable(document_path, obj)
}
/// Serializes a map of field names to [`FirestoreValue`]s into a Firestore [`Document`].
///
/// This is useful for constructing documents dynamically or when working with
/// partially structured data.
///
/// # Arguments
/// * `document_path`: The full path to the document.
/// * `fields`: An iterator yielding pairs of field names (as strings) and their
/// corresponding [`FirestoreValue`]s.
///
/// # Type Parameters
/// * `S`: A type that can be converted into a string for the document path.
/// * `I`: An iterator type for the fields.
/// * `IS`: A type that can be converted into a string for field names.
///
/// # Errors
/// Returns a [`FirestoreError::SerializeError`] if serialization fails.
pub fn serialize_map_to_doc<S, I, IS>(
document_path: S,
fields: I,
) -> FirestoreResult<FirestoreDocument>
where
S: AsRef<str>,
I: IntoIterator<Item = (IS, FirestoreValue)>,
IS: AsRef<str>,
{
crate::firestore_serde::firestore_document_from_map(document_path, fields)
}
/// Performs a simple "ping" to the Firestore database to check connectivity.
///
/// This method attempts to read a non-existent document. A successful outcome
/// (even if the document is not found) indicates that the database is reachable
/// and the client is authenticated.
///
/// # Errors
/// May return network or authentication errors if the database is unreachable.
pub async fn ping(&self) -> FirestoreResult<()> {
// Reading non-existing document just to check that database is available to read
self.get_doc_by_path(
"-ping-".to_string(), // A document ID that is unlikely to exist
self.get_database_path().clone(), // Use the root database path for this check
None, // No specific consistency required
0, // No retries needed for a ping
)
.await
.map(|_| ()) // If it's Ok(None) or Ok(Some(_)), it's a success for ping
.or_else(|err| {
// If the error is DataNotFoundError, it's still a successful ping.
// Other errors (network, auth) are real failures.
if matches!(err, FirestoreError::DataNotFoundError(_)) {
Ok(())
} else {
Err(err)
}
})
}
/// Returns the full database path string (e.g., "projects/my-project/databases/(default)").
#[inline]
pub fn get_database_path(&self) -> &String {
&self.inner.database_path
}
/// Returns the base path for documents within this database
/// (e.g., "projects/my-project/databases/(default)/documents").
#[inline]
pub fn get_documents_path(&self) -> &String {
&self.inner.doc_path
}
/// Constructs a [`ParentPathBuilder`] for creating paths to sub-collections
/// under a specified document.
///
/// # Arguments
/// * `collection_name`: The name of the collection containing the parent document.
/// * `document_id`: The ID of the parent document.
///
/// # Errors
/// Returns [`FirestoreError::InvalidParametersError`] if the `document_id` is invalid.
#[inline]
pub fn parent_path<S>(
&self,
collection_name: &str,
document_id: S,
) -> FirestoreResult<ParentPathBuilder>
where
S: AsRef<str>,
{
Ok(ParentPathBuilder::new(safe_document_path(
self.inner.doc_path.as_str(),
collection_name,
document_id.as_ref(),
)?))
}
/// Returns a reference to the [`FirestoreDbOptions`] used to configure this client.
#[inline]
pub fn get_options(&self) -> &FirestoreDbOptions {
&self.inner.options
}
/// Returns a reference to the current [`FirestoreDbSessionParams`] for this client instance.
/// Session parameters can control aspects like consistency and caching for operations
/// performed with this specific `FirestoreDb` instance.
#[inline]
pub fn get_session_params(&self) -> &FirestoreDbSessionParams {
&self.session_params
}
/// Returns a reference to the underlying gRPC client.
///
/// This provides access to the raw `FirestoreClient` from the `gcloud-sdk`
/// if direct interaction with the gRPC layer is needed.
#[inline]
pub fn client(&self) -> &GoogleApi<FirestoreClient<GoogleAuthMiddleware>> {
&self.inner.client
}
/// Clones the `FirestoreDb` instance, replacing its session parameters.
///
/// This is useful for creating a new client instance that shares the same
/// underlying connection and configuration but has different session-level
/// settings (e.g., for a specific transaction or consistency requirement).
///
/// # Arguments
/// * `session_params`: The new [`FirestoreDbSessionParams`] to use.
#[inline]
pub fn clone_with_session_params(&self, session_params: FirestoreDbSessionParams) -> Self {
Self {
session_params: session_params.into(),
..self.clone()
}
}
/// Consumes the `FirestoreDb` instance and returns a new one with replaced session parameters.
///
/// Similar to [`clone_with_session_params`](FirestoreDb::clone_with_session_params)
/// but takes ownership of `self`.
///
/// # Arguments
/// * `session_params`: The new [`FirestoreDbSessionParams`] to use.
#[inline]
pub fn with_session_params(self, session_params: FirestoreDbSessionParams) -> Self {
Self {
session_params: session_params.into(),
..self
}
}
/// Clones the `FirestoreDb` instance with a specific consistency selector.
///
/// This creates a new `FirestoreDb` instance configured to use the provided
/// [`FirestoreConsistencySelector`] for subsequent operations.
///
/// # Arguments
/// * `consistency_selector`: The consistency mode to apply (e.g., read at a specific time).
#[inline]
pub fn clone_with_consistency_selector(
&self,
consistency_selector: FirestoreConsistencySelector,
) -> Self {
let existing_session_params = (*self.session_params).clone();
self.clone_with_session_params(
existing_session_params.with_consistency_selector(consistency_selector),
)
}
/// Clones the `FirestoreDb` instance with a specific cache mode.
///
/// This method is only available if the `caching` feature is enabled.
///
/// # Arguments
/// * `cache_mode`: The [`FirestoreDbSessionCacheMode`] to apply.
#[cfg(feature = "caching")]
pub fn with_cache(&self, cache_mode: crate::FirestoreDbSessionCacheMode) -> Self {
let existing_session_params = (*self.session_params).clone();
self.clone_with_session_params(existing_session_params.with_cache_mode(cache_mode))
}
/// Clones the `FirestoreDb` instance to enable read-through caching with the provided cache.
///
/// Operations using the returned `FirestoreDb` instance will first attempt to read
/// from the cache. If data is not found, it will be fetched from Firestore and
/// then stored in the cache.
///
/// This method is only available if the `caching` feature is enabled.
///
/// # Arguments
/// * `cache`: A reference to the [`FirestoreCache`](crate::FirestoreCache) to use.
#[cfg(feature = "caching")]
pub fn read_through_cache<B, LS>(&self, cache: &FirestoreCache<B, LS>) -> Self
where
B: FirestoreCacheBackend + Send + Sync + 'static,
LS: FirestoreResumeStateStorage + Clone + Send + Sync + 'static,
{
self.with_cache(crate::FirestoreDbSessionCacheMode::ReadThroughCache(
cache.backend(),
))
}
/// Clones the `FirestoreDb` instance to read exclusively from the cache.
///
/// Operations using the returned `FirestoreDb` instance will only attempt to read
/// from the cache and will not fetch data from Firestore if it's not found in the cache.
///
/// This method is only available if the `caching` feature is enabled.
///
/// # Arguments
/// * `cache`: A reference to the [`FirestoreCache`](crate::FirestoreCache) to use.
#[cfg(feature = "caching")]
pub fn read_cached_only<B, LS>(&self, cache: &FirestoreCache<B, LS>) -> Self
where
B: FirestoreCacheBackend + Send + Sync + 'static,
LS: FirestoreResumeStateStorage + Clone + Send + Sync + 'static,
{
self.with_cache(crate::FirestoreDbSessionCacheMode::ReadCachedOnly(
cache.backend(),
))
}
}
/// Ensures that a URL string has a scheme (e.g., "http://").
/// If no scheme is present, "http://" is prepended.
fn ensure_url_scheme(url: String) -> String {
if !url.contains("://") {
format!("http://{url}")
} else {
url
}
}
impl std::fmt::Debug for FirestoreDb {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("FirestoreDb")
.field("options", &self.inner.options)
.field("database_path", &self.inner.database_path)
.field("doc_path", &self.inner.doc_path)
.finish()
}
}
pub(crate) fn safe_document_path<S>(
parent: &str,
collection_id: &str,
document_id: S,
) -> FirestoreResult<String>
where
S: AsRef<str>,
{
// All restrictions described here: https://firebase.google.com/docs/firestore/quotas#collections_documents_and_fields
// Here we check only the most dangerous one for `/` to avoid document_id injections, leaving other validation to the server side.
let document_id_ref = document_id.as_ref();
if document_id_ref.chars().all(|c| c != '/') && document_id_ref.len() <= 1500 {
Ok(format!("{parent}/{collection_id}/{document_id_ref}"))
} else {
Err(FirestoreError::InvalidParametersError(
FirestoreInvalidParametersError::new(FirestoreInvalidParametersPublicDetails::new(
"document_id".to_string(),
format!("Invalid document ID provided: {document_id_ref}"),
)),
))
}
}
pub(crate) fn split_document_path(path: &str) -> (&str, &str) {
// Return string range the last part after '/'
let split_pos = path.rfind('/').map(|pos| pos + 1).unwrap_or(0);
if split_pos == 0 {
("", path)
} else {
(&path[0..split_pos - 1], &path[split_pos..])
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_safe_document_path() {
assert_eq!(
safe_document_path(
"projects/test-project/databases/(default)/documents",
"test",
"test1"
)
.ok(),
Some("projects/test-project/databases/(default)/documents/test/test1".to_string())
);
assert_eq!(
safe_document_path(
"projects/test-project/databases/(default)/documents",
"test",
"test1#test2"
)
.ok(),
Some(
"projects/test-project/databases/(default)/documents/test/test1#test2".to_string()
)
);
assert_eq!(
safe_document_path(
"projects/test-project/databases/(default)/documents",
"test",
"test1/test2"
)
.ok(),
None
);
}
#[test]
fn test_ensure_url_scheme() {
assert_eq!(
ensure_url_scheme("localhost:8080".into()),
"http://localhost:8080"
);
assert_eq!(
ensure_url_scheme("any://localhost:8080".into()),
"any://localhost:8080"
);
assert_eq!(
ensure_url_scheme("invalid:localhost:8080".into()),
"http://invalid:localhost:8080"
);
}
#[test]
fn test_split_document_path() {
assert_eq!(
split_document_path("projects/test-project/databases/(default)/documents/test/test1"),
(
"projects/test-project/databases/(default)/documents/test",
"test1"
)
);
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/transform_models.rs | src/db/transform_models.rs | use crate::errors::*;
use crate::FirestoreValue;
use crate::timestamp_utils::from_timestamp;
use chrono::prelude::*;
use rsb_derive::Builder;
/// The result of a Firestore write operation (create, update, delete).
///
/// This struct provides information about the outcome of a write, such as the
/// document's update time and the results of any field transformations that were applied.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreWriteResult {
/// The time at which the document was last updated after this write operation.
///
/// This is `None` if the write operation did not result in a changed document
/// (e.g., deleting a non-existent document, or an update that didn't change any values).
pub update_time: Option<DateTime<Utc>>,
/// A list of values that are the result of applying field transformations.
///
/// Each [`FirestoreValue`] in this list corresponds to a
/// [`FirestoreFieldTransform`] applied in the write request.
/// The order of results matches the order of transformations in the request.
/// For example, if an `Increment` transform was used, this would contain the new
/// value of the incremented field.
pub transform_results: Vec<FirestoreValue>,
}
impl TryInto<FirestoreWriteResult> for gcloud_sdk::google::firestore::v1::WriteResult {
type Error = FirestoreError;
fn try_into(self) -> Result<FirestoreWriteResult, Self::Error> {
Ok(FirestoreWriteResult::new(
self.transform_results
.into_iter()
.map(FirestoreValue::from)
.collect(),
)
.opt_update_time(self.update_time.map(from_timestamp).transpose()?))
}
}
/// Represents a transformation to apply to a specific field within a document.
///
/// Field transformations allow for atomic, server-side modifications of document fields,
/// such as incrementing a number, setting a field to the server's timestamp, or
/// manipulating array elements.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreFieldTransform {
/// The dot-separated path to the field to transform (e.g., "user.profile.age").
pub field: String,
/// The type of transformation to apply.
pub transform_type: FirestoreFieldTransformType,
}
impl TryInto<gcloud_sdk::google::firestore::v1::document_transform::FieldTransform>
for FirestoreFieldTransform
{
type Error = FirestoreError;
fn try_into(
self,
) -> Result<gcloud_sdk::google::firestore::v1::document_transform::FieldTransform, Self::Error>
{
Ok(
gcloud_sdk::google::firestore::v1::document_transform::FieldTransform {
field_path: self.field,
transform_type: Some(self.transform_type.try_into()?),
},
)
}
}
/// Defines the specific type of transformation to apply to a field.
#[derive(Debug, PartialEq, Clone)]
pub enum FirestoreFieldTransformType {
/// Sets the field to a server-generated value, such as the request timestamp.
SetToServerValue(FirestoreTransformServerValue),
/// Atomically increments the field's numeric value by the given `FirestoreValue` (which must be an integer or double).
Increment(FirestoreValue),
/// Atomically sets the field to the maximum of its current value and the given `FirestoreValue`.
Maximum(FirestoreValue),
/// Atomically sets the field to the minimum of its current value and the given `FirestoreValue`.
Minimum(FirestoreValue),
/// Appends the given elements to an array field, but only if they are not already present.
/// The `Vec<FirestoreValue>` contains the elements to append.
AppendMissingElements(Vec<FirestoreValue>),
/// Removes all instances of the given elements from an array field.
/// The `Vec<FirestoreValue>` contains the elements to remove.
RemoveAllFromArray(Vec<FirestoreValue>),
}
impl TryInto<gcloud_sdk::google::firestore::v1::document_transform::field_transform::TransformType>
for FirestoreFieldTransformType
{
type Error = FirestoreError;
fn try_into(
self,
) -> Result<
gcloud_sdk::google::firestore::v1::document_transform::field_transform::TransformType,
Self::Error,
> {
Ok(match self {
FirestoreFieldTransformType::SetToServerValue(FirestoreTransformServerValue::Unspecified) => {
gcloud_sdk::google::firestore::v1::document_transform::field_transform::TransformType::SetToServerValue(
gcloud_sdk::google::firestore::v1::document_transform::field_transform::ServerValue::Unspecified as i32
)
},
FirestoreFieldTransformType::SetToServerValue(FirestoreTransformServerValue::RequestTime) => {
gcloud_sdk::google::firestore::v1::document_transform::field_transform::TransformType::SetToServerValue(
gcloud_sdk::google::firestore::v1::document_transform::field_transform::ServerValue::RequestTime as i32
)
},
FirestoreFieldTransformType::Increment(value) => {
gcloud_sdk::google::firestore::v1::document_transform::field_transform::TransformType::Increment(
value.value
)
},
FirestoreFieldTransformType::Maximum(value) => {
gcloud_sdk::google::firestore::v1::document_transform::field_transform::TransformType::Maximum(
value.value
)
},
FirestoreFieldTransformType::Minimum(value) => {
gcloud_sdk::google::firestore::v1::document_transform::field_transform::TransformType::Minimum(
value.value
)
},
FirestoreFieldTransformType::AppendMissingElements(value) => {
gcloud_sdk::google::firestore::v1::document_transform::field_transform::TransformType::AppendMissingElements(
gcloud_sdk::google::firestore::v1::ArrayValue {
values: value.into_iter().map( | s| s.value).collect()
}
)
},
FirestoreFieldTransformType::RemoveAllFromArray(value) => {
gcloud_sdk::google::firestore::v1::document_transform::field_transform::TransformType::RemoveAllFromArray(
gcloud_sdk::google::firestore::v1::ArrayValue {
values: value.into_iter().map( | s| s.value).collect()
}
)
},
})
}
}
/// Specifies a server-generated value for a field transformation.
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum FirestoreTransformServerValue {
/// The server value is unspecified. This typically should not be used directly.
Unspecified,
/// Sets the field to the timestamp of when the server processes the request.
/// This is commonly used for `createdAt` or `updatedAt` fields.
RequestTime,
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/query.rs | src/db/query.rs | use crate::*;
use async_trait::async_trait;
use chrono::prelude::*;
use futures::future::BoxFuture;
use futures::stream::BoxStream;
use futures::FutureExt;
use futures::TryFutureExt;
use futures::TryStreamExt;
use futures::{future, StreamExt};
use gcloud_sdk::google::firestore::v1::*;
use rand::Rng;
use serde::Deserialize;
use tokio::sync::mpsc;
use tracing::*;
pub type PeekableBoxStream<'a, T> = futures::stream::Peekable<BoxStream<'a, T>>;
#[async_trait]
pub trait FirestoreQuerySupport {
async fn query_doc(&self, params: FirestoreQueryParams) -> FirestoreResult<Vec<Document>>;
async fn stream_query_doc<'b>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, Document>>;
async fn stream_query_doc_with_errors<'b>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>>;
async fn stream_query_doc_with_metadata<'b>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreWithMetadata<FirestoreDocument>>>>;
async fn query_obj<T>(&self, params: FirestoreQueryParams) -> FirestoreResult<Vec<T>>
where
for<'de> T: Deserialize<'de>;
async fn stream_query_obj<'b, T>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, T>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'b;
async fn stream_query_obj_with_errors<'b, T>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'b;
async fn stream_query_obj_with_metadata<'b, T>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreWithMetadata<T>>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'b;
fn stream_partition_cursors_with_errors(
&self,
params: FirestorePartitionQueryParams,
) -> BoxFuture<'_, FirestoreResult<PeekableBoxStream<'_, FirestoreResult<FirestoreQueryCursor>>>>;
async fn stream_partition_query_doc_with_errors(
&self,
parallelism: usize,
partition_params: FirestorePartitionQueryParams,
) -> FirestoreResult<BoxStream<'_, FirestoreResult<(FirestorePartition, Document)>>>;
async fn stream_partition_query_obj_with_errors<'a, T>(
&'a self,
parallelism: usize,
partition_params: FirestorePartitionQueryParams,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(FirestorePartition, T)>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'a;
}
impl FirestoreDb {
fn create_query_request(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<gcloud_sdk::tonic::Request<RunQueryRequest>> {
Ok(gcloud_sdk::tonic::Request::new(RunQueryRequest {
parent: params
.parent
.as_ref()
.unwrap_or_else(|| self.get_documents_path())
.clone(),
consistency_selector: self
.session_params
.consistency_selector
.as_ref()
.map(|selector| selector.try_into())
.transpose()?,
explain_options: params
.explain_options
.as_ref()
.map(|eo| eo.try_into())
.transpose()?,
query_type: Some(run_query_request::QueryType::StructuredQuery(
params.try_into()?,
)),
}))
}
fn stream_query_doc_with_retries<'b>(
&self,
params: FirestoreQueryParams,
retries: usize,
span: Span,
) -> BoxFuture<
'_,
FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreWithMetadata<Document>>>>,
> {
async move {
let query_request = self.create_query_request(params.clone())?;
let begin_query_utc: DateTime<Utc> = Utc::now();
match self
.client()
.get()
.run_query(query_request)
.map_err(|e| e.into())
.await
{
Ok(query_response) => {
let query_stream = query_response
.into_inner()
.map_err(|e| e.into())
.map(|r| r.and_then(|r| r.try_into()))
.boxed();
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
span.record(
"/firestore/response_time",
query_duration.num_milliseconds(),
);
span.in_scope(|| {
debug!(
collection_id = ?params.collection_id,
duration_milliseconds = query_duration.num_milliseconds(),
"Queried stream of documents.",
);
});
Ok(query_stream)
}
Err(err) => match err {
FirestoreError::DatabaseError(ref db_err)
if db_err.retry_possible && retries < self.inner.options.max_retries =>
{
let sleep_duration = tokio::time::Duration::from_millis(
rand::rng().random_range(0..2u64.pow(retries as u32) * 1000 + 1),
);
warn!(
err = %db_err,
current_retry = retries + 1,
max_retries = self.inner.options.max_retries,
delay = sleep_duration.as_millis(),
"Failed to stream query. Retrying up to the specified number of times."
);
tokio::time::sleep(sleep_duration).await;
self.stream_query_doc_with_retries(params, retries + 1, span)
.await
}
_ => Err(err),
},
}
}
.boxed()
}
#[cfg(feature = "caching")]
#[inline]
async fn query_docs_from_cache<'b>(
&self,
params: &FirestoreQueryParams,
) -> FirestoreResult<FirestoreCachedValue<BoxStream<'b, FirestoreResult<FirestoreDocument>>>>
{
match ¶ms.collection_id {
FirestoreQueryCollection::Group(_) => Ok(FirestoreCachedValue::SkipCache),
FirestoreQueryCollection::Single(collection_id) => {
if let FirestoreDbSessionCacheMode::ReadCachedOnly(ref cache) =
self.session_params.cache_mode
{
let span = span!(
Level::DEBUG,
"Firestore Query Cached",
"/firestore/collection_name" = collection_id.as_str(),
"/firestore/cache_result" = field::Empty,
"/firestore/response_time" = field::Empty
);
let begin_query_utc: DateTime<Utc> = Utc::now();
let collection_path = if let Some(parent) = params.parent.as_ref() {
format!("{}/{}", parent, collection_id)
} else {
format!("{}/{}", self.get_documents_path(), collection_id.as_str())
};
let result = cache.query_docs(&collection_path, params).await?;
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
span.record(
"/firestore/response_time",
query_duration.num_milliseconds(),
);
match result {
FirestoreCachedValue::UseCached(stream) => {
span.record("/firestore/cache_result", "hit");
span.in_scope(|| {
debug!(collection_id, "Querying documents from cache.");
});
Ok(FirestoreCachedValue::UseCached(stream))
}
FirestoreCachedValue::SkipCache => {
span.record("/firestore/cache_result", "miss");
if matches!(
self.session_params.cache_mode,
FirestoreDbSessionCacheMode::ReadCachedOnly(_)
) {
span.in_scope(|| {
debug!(collection_id,
"Cache doesn't have suitable documents, but cache mode is ReadCachedOnly so returning empty stream.",
);
});
Ok(FirestoreCachedValue::UseCached(Box::pin(
futures::stream::empty(),
)))
} else {
span.in_scope(|| {
debug!(
collection_id,
"Querying documents from cache skipped.",
);
});
Ok(FirestoreCachedValue::SkipCache)
}
}
}
} else {
Ok(FirestoreCachedValue::SkipCache)
}
}
}
}
}
#[async_trait]
impl FirestoreQuerySupport for FirestoreDb {
async fn query_doc(&self, params: FirestoreQueryParams) -> FirestoreResult<Vec<Document>> {
let doc_stream = self.stream_query_doc_with_errors(params).await?;
Ok(doc_stream.try_collect::<Vec<Document>>().await?)
}
async fn stream_query_doc<'b>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, Document>> {
let doc_stream = self.stream_query_doc_with_errors(params).await?;
Ok(Box::pin(doc_stream.filter_map(|doc_res| {
future::ready(match doc_res {
Ok(doc) => Some(doc),
Err(err) => {
error!(%err, "Error occurred while consuming query.");
None
}
})
})))
}
async fn stream_query_doc_with_errors<'b>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>> {
#[cfg(feature = "caching")]
{
if let FirestoreCachedValue::UseCached(stream) =
self.query_docs_from_cache(¶ms).await?
{
return Ok(stream);
}
}
let collection_str = params.collection_id.to_string();
let span = span!(
Level::DEBUG,
"Firestore Streaming Query",
"/firestore/collection_name" = collection_str.as_str(),
"/firestore/response_time" = field::Empty
);
let doc_stream = self.stream_query_doc_with_retries(params, 0, span).await?;
Ok(Box::pin(doc_stream.filter_map(|doc_res| {
future::ready(match doc_res {
Ok(resp) => resp.document.map(Ok),
Err(err) => {
error!(%err, "Error occurred while consuming query.");
Some(Err(err))
}
})
})))
}
async fn stream_query_doc_with_metadata<'b>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreWithMetadata<Document>>>> {
let collection_str = params.collection_id.to_string();
let span = span!(
Level::DEBUG,
"Firestore Streaming Query with Metadata",
"/firestore/collection_name" = collection_str.as_str(),
"/firestore/response_time" = field::Empty
);
self.stream_query_doc_with_retries(params, 0, span).await
}
async fn query_obj<T>(&self, params: FirestoreQueryParams) -> FirestoreResult<Vec<T>>
where
for<'de> T: Deserialize<'de>,
{
let doc_vec = self.query_doc(params).await?;
doc_vec
.iter()
.map(|doc| Self::deserialize_doc_to(doc))
.collect()
}
async fn stream_query_obj<'b, T>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, T>>
where
for<'de> T: Deserialize<'de>,
T: 'b,
{
let doc_stream = self.stream_query_doc(params).await?;
Ok(Box::pin(doc_stream.filter_map(|doc| async move {
match Self::deserialize_doc_to::<T>(&doc) {
Ok(obj) => Some(obj),
Err(err) => {
error!(
%err,
"Error occurred while converting query document in a stream. Document: {}",
doc.name
);
None
}
}
})))
}
async fn stream_query_obj_with_errors<'b, T>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'b,
{
let doc_stream = self.stream_query_doc_with_errors(params).await?;
Ok(Box::pin(doc_stream.and_then(|doc| {
future::ready(Self::deserialize_doc_to::<T>(&doc))
})))
}
async fn stream_query_obj_with_metadata<'b, T>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreWithMetadata<T>>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'b,
{
let res_stream = self.stream_query_doc_with_metadata(params).await?;
Ok(Box::pin(res_stream.map(|res| {
res.and_then(|with_meta| {
Ok(FirestoreWithMetadata {
document: with_meta
.document
.map(|document| Self::deserialize_doc_to::<T>(&document))
.transpose()?,
metadata: with_meta.metadata,
})
})
})))
}
fn stream_partition_cursors_with_errors(
&self,
params: FirestorePartitionQueryParams,
) -> BoxFuture<'_, FirestoreResult<PeekableBoxStream<'_, FirestoreResult<FirestoreQueryCursor>>>>
{
Box::pin(async move {
let consistency_selector: Option<
gcloud_sdk::google::firestore::v1::partition_query_request::ConsistencySelector,
> = self
.session_params
.consistency_selector
.as_ref()
.map(|selector| selector.try_into())
.transpose()?;
let stream: PeekableBoxStream<FirestoreResult<FirestoreQueryCursor>> =
futures::stream::unfold(
Some((params, consistency_selector)),
move |maybe_params| async move {
if let Some((params, maybe_consistency_selector)) = maybe_params {
match params.query_params.clone().try_into() {
Ok(query_params) => {
let request =
gcloud_sdk::tonic::Request::new(PartitionQueryRequest {
page_size: params.page_size as i32,
partition_count: params.partition_count as i64,
parent: params
.query_params
.parent
.as_ref()
.unwrap_or_else(|| self.get_documents_path())
.clone(),
consistency_selector: maybe_consistency_selector,
query_type: Some(
partition_query_request::QueryType::StructuredQuery(
query_params,
),
),
page_token: params
.page_token
.clone()
.unwrap_or_default(),
});
match self.client().get().partition_query(request).await {
Ok(response) => {
let partition_response = response.into_inner();
let firestore_cursors: Vec<FirestoreQueryCursor> =
partition_response
.partitions
.into_iter()
.map(|e| e.into())
.collect();
if !partition_response.next_page_token.is_empty() {
Some((
Ok(firestore_cursors),
Some((
params.with_page_token(
partition_response.next_page_token,
),
maybe_consistency_selector,
)),
))
} else {
Some((Ok(firestore_cursors), None))
}
}
Err(err) => Some((Err(FirestoreError::from(err)), None)),
}
}
Err(err) => Some((Err(err), None)),
}
} else {
None
}
},
)
.flat_map(|s| {
futures::stream::iter(match s {
Ok(results) => results
.into_iter()
.map(Ok::<FirestoreQueryCursor, FirestoreError>)
.collect(),
Err(err) => vec![Err(err)],
})
})
.boxed()
.peekable();
Ok(stream)
})
}
async fn stream_partition_query_doc_with_errors(
&self,
parallelism: usize,
partition_params: FirestorePartitionQueryParams,
) -> FirestoreResult<BoxStream<'_, FirestoreResult<(FirestorePartition, Document)>>> {
let collection_str = partition_params.query_params.collection_id.to_string();
let span = span!(
Level::DEBUG,
"Firestore Streaming Partition Query",
"/firestore/collection_name" = collection_str
);
span.in_scope(|| {
debug!(
parallelism,
"Running query on partitions with specified max parallelism.",
)
});
let mut cursors: Vec<FirestoreQueryCursor> = self
.stream_partition_cursors_with_errors(partition_params.clone())
.await?
.try_collect()
.await?;
if cursors.is_empty() {
span.in_scope(|| {
debug!(
"The server detected the query has too few results to be partitioned. Falling back to normal query."
)
});
let doc_stream = self
.stream_query_doc_with_errors(partition_params.query_params)
.await?;
Ok(doc_stream
.and_then(|doc| future::ready(Ok((FirestorePartition::new(), doc))))
.boxed())
} else {
let mut cursors_pairs: Vec<Option<FirestoreQueryCursor>> =
Vec::with_capacity(cursors.len() + 2);
cursors_pairs.push(None);
cursors_pairs.extend(cursors.drain(..).map(Some));
cursors_pairs.push(None);
let (tx, rx) =
mpsc::unbounded_channel::<FirestoreResult<(FirestorePartition, Document)>>();
futures::stream::iter(cursors_pairs.windows(2))
.map(|cursor_pair| {
(
cursor_pair,
tx.clone(),
partition_params.clone(),
span.clone(),
)
})
.for_each_concurrent(
Some(parallelism),
|(cursor_pair, tx, partition_params, span)| async move {
span.in_scope(|| debug!(?cursor_pair, "Streaming partition cursor."));
let mut params_with_cursors = partition_params.query_params;
if let Some(first_cursor) = cursor_pair.first() {
params_with_cursors.mopt_start_at(first_cursor.clone());
}
if let Some(last_cursor) = cursor_pair.last() {
params_with_cursors.mopt_end_at(last_cursor.clone());
}
let partition = FirestorePartition::new()
.opt_start_at(params_with_cursors.start_at.clone())
.opt_end_at(params_with_cursors.end_at.clone());
match self.stream_query_doc_with_errors(params_with_cursors).await {
Ok(result_stream) => {
result_stream
.map(|doc_res| {
(doc_res, tx.clone(), span.clone(), partition.clone())
})
.for_each(|(doc_res, tx, span, partition)| async move {
let message = doc_res.map(|doc| (partition.clone(), doc));
if let Err(err) = tx.send(message) {
span.in_scope(|| {
warn!(
%err,
?partition,
"Unable to send result for partition.",
)
})
};
})
.await;
}
Err(err) => {
if let Err(err) = tx.send(Err(err)) {
span.in_scope(|| {
warn!(
?err,
?cursor_pair,
"Unable to send result for partition cursor.",
)
})
};
}
}
},
)
.await;
Ok(Box::pin(
tokio_stream::wrappers::UnboundedReceiverStream::new(rx),
))
}
}
async fn stream_partition_query_obj_with_errors<'a, T>(
&'a self,
parallelism: usize,
partition_params: FirestorePartitionQueryParams,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(FirestorePartition, T)>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'a,
{
let doc_stream = self
.stream_partition_query_doc_with_errors(parallelism, partition_params)
.await?;
Ok(Box::pin(doc_stream.and_then(|(partition, doc)| {
future::ready(Self::deserialize_doc_to::<T>(&doc).map(|obj| (partition, obj)))
})))
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/aggregated_query.rs | src/db/aggregated_query.rs | #![allow(clippy::derive_partial_eq_without_eq)] // Since we may not be able to implement Eq for the changes coming from Firestore protos
use crate::{FirestoreDb, FirestoreError, FirestoreQueryParams, FirestoreResult};
use async_trait::async_trait;
use chrono::prelude::*;
use futures::future::BoxFuture;
use futures::stream::BoxStream;
use futures::FutureExt;
use futures::TryFutureExt;
use futures::TryStreamExt;
use futures::{future, StreamExt};
use gcloud_sdk::google::firestore::v1::*;
use rand::Rng;
use rsb_derive::*;
use serde::Deserialize;
use tracing::*;
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreAggregatedQueryParams {
pub query_params: FirestoreQueryParams,
pub aggregations: Vec<FirestoreAggregation>,
}
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreAggregation {
pub alias: String,
pub operator: Option<FirestoreAggregationOperator>,
}
impl From<&FirestoreAggregation> for structured_aggregation_query::Aggregation {
fn from(aggregation: &FirestoreAggregation) -> Self {
structured_aggregation_query::Aggregation {
alias: aggregation.alias.clone(),
operator: aggregation.operator.as_ref().map(|agg| agg.into()),
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum FirestoreAggregationOperator {
Count(FirestoreAggregationOperatorCount),
Sum(FirestoreAggregationOperatorSum),
Avg(FirestoreAggregationOperatorAvg),
}
impl From<&FirestoreAggregationOperator> for structured_aggregation_query::aggregation::Operator {
fn from(op: &FirestoreAggregationOperator) -> Self {
match op {
FirestoreAggregationOperator::Count(opts) => {
structured_aggregation_query::aggregation::Operator::Count(opts.into())
}
FirestoreAggregationOperator::Sum(opts) => {
structured_aggregation_query::aggregation::Operator::Sum(opts.into())
}
FirestoreAggregationOperator::Avg(opts) => {
structured_aggregation_query::aggregation::Operator::Avg(opts.into())
}
}
}
}
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreAggregationOperatorCount {
pub up_to: Option<usize>,
}
impl From<&FirestoreAggregationOperatorCount> for structured_aggregation_query::aggregation::Count {
fn from(cnt: &FirestoreAggregationOperatorCount) -> Self {
structured_aggregation_query::aggregation::Count {
up_to: cnt.up_to.map(|v| v as i64),
}
}
}
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreAggregationOperatorSum {
pub field_name: String,
}
impl From<&FirestoreAggregationOperatorSum> for structured_aggregation_query::aggregation::Sum {
fn from(operator: &FirestoreAggregationOperatorSum) -> Self {
structured_aggregation_query::aggregation::Sum {
field: Some(structured_query::FieldReference {
field_path: operator.field_name.clone(),
}),
}
}
}
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreAggregationOperatorAvg {
pub field_name: String,
}
impl From<&FirestoreAggregationOperatorAvg> for structured_aggregation_query::aggregation::Avg {
fn from(operator: &FirestoreAggregationOperatorAvg) -> Self {
structured_aggregation_query::aggregation::Avg {
field: Some(structured_query::FieldReference {
field_path: operator.field_name.clone(),
}),
}
}
}
#[async_trait]
pub trait FirestoreAggregatedQuerySupport {
async fn aggregated_query_doc(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<Vec<Document>>;
async fn stream_aggregated_query_doc<'b>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, Document>>;
async fn stream_aggregated_query_doc_with_errors<'b>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>>;
async fn aggregated_query_obj<T>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<Vec<T>>
where
for<'de> T: Deserialize<'de>;
async fn stream_aggregated_query_obj<'b, T>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, T>>
where
for<'de> T: Deserialize<'de>;
async fn stream_aggregated_query_obj_with_errors<'b, T>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'b;
}
#[async_trait]
impl FirestoreAggregatedQuerySupport for FirestoreDb {
async fn aggregated_query_doc(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<Vec<Document>> {
let collection_str = params.query_params.collection_id.to_string();
let span = span!(
Level::DEBUG,
"Firestore Aggregated Query",
"/firestore/collection_name" = collection_str.as_str(),
"/firestore/response_time" = field::Empty
);
self.aggregated_query_doc_with_retries(params, 0, &span)
.await
}
async fn stream_aggregated_query_doc<'b>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, Document>> {
let collection_str = params.query_params.collection_id.to_string();
let span = span!(
Level::DEBUG,
"Firestore Streaming Aggregated Query",
"/firestore/collection_name" = collection_str.as_str(),
"/firestore/response_time" = field::Empty
);
let doc_stream = self
.stream_aggregated_query_doc_with_retries(params, 0, &span)
.await?;
Ok(Box::pin(doc_stream.filter_map(|doc_res| {
future::ready(match doc_res {
Ok(Some(doc)) => Some(doc),
Ok(None) => None,
Err(err) => {
error!(%err, "Error occurred while consuming query.");
None
}
})
})))
}
async fn stream_aggregated_query_doc_with_errors<'b>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>> {
let collection_str = params.query_params.collection_id.to_string();
let span = span!(
Level::DEBUG,
"Firestore Streaming Aggregated Query",
"/firestore/collection_name" = collection_str.as_str(),
"/firestore/response_time" = field::Empty
);
let doc_stream = self
.stream_aggregated_query_doc_with_retries(params, 0, &span)
.await?;
Ok(Box::pin(doc_stream.filter_map(|doc_res| {
future::ready(match doc_res {
Ok(Some(doc)) => Some(Ok(doc)),
Ok(None) => None,
Err(err) => {
error!(%err, "Error occurred while consuming query.");
Some(Err(err))
}
})
})))
}
async fn aggregated_query_obj<T>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<Vec<T>>
where
for<'de> T: Deserialize<'de>,
{
let doc_vec = self.aggregated_query_doc(params).await?;
doc_vec
.iter()
.map(|doc| Self::deserialize_doc_to(doc))
.collect()
}
async fn stream_aggregated_query_obj<'b, T>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, T>>
where
for<'de> T: Deserialize<'de>,
{
let doc_stream = self.stream_aggregated_query_doc(params).await?;
Ok(Box::pin(doc_stream.filter_map(|doc| async move {
match Self::deserialize_doc_to::<T>(&doc) {
Ok(obj) => Some(obj),
Err(err) => {
error!(
%err,
"Error occurred while consuming query document as a stream.",
);
None
}
}
})))
}
async fn stream_aggregated_query_obj_with_errors<'b, T>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'b,
{
let doc_stream = self.stream_aggregated_query_doc_with_errors(params).await?;
Ok(Box::pin(doc_stream.and_then(|doc| {
future::ready(Self::deserialize_doc_to::<T>(&doc))
})))
}
}
impl FirestoreDb {
fn create_aggregated_query_request(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<gcloud_sdk::tonic::Request<RunAggregationQueryRequest>> {
Ok(gcloud_sdk::tonic::Request::new(RunAggregationQueryRequest {
parent: params
.query_params
.parent
.as_ref()
.unwrap_or_else(|| self.get_documents_path())
.clone(),
consistency_selector: self
.session_params
.consistency_selector
.as_ref()
.map(|selector| selector.try_into())
.transpose()?,
query_type: Some(run_aggregation_query_request::QueryType::StructuredAggregationQuery(
StructuredAggregationQuery {
aggregations: params.aggregations.iter().map(|agg| agg.into()).collect(),
query_type: Some(gcloud_sdk::google::firestore::v1::structured_aggregation_query::QueryType::StructuredQuery(params.query_params.try_into()?)),
}
)),
explain_options: None,
}))
}
fn stream_aggregated_query_doc_with_retries<'a, 'b>(
&'a self,
params: FirestoreAggregatedQueryParams,
retries: usize,
span: &'a Span,
) -> BoxFuture<'a, FirestoreResult<BoxStream<'b, FirestoreResult<Option<Document>>>>> {
async move {
let query_request = self.create_aggregated_query_request(params.clone())?;
let begin_query_utc: DateTime<Utc> = Utc::now();
match self
.client()
.get()
.run_aggregation_query(query_request)
.map_err(|e| e.into())
.await
{
Ok(query_response) => {
let query_stream = query_response
.into_inner()
.map_ok(Self::aggregated_response_to_doc)
.map_err(|e| e.into())
.boxed();
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
span.record(
"/firestore/response_time",
query_duration.num_milliseconds(),
);
span.in_scope(|| {
debug!(
collection_id = ?params.query_params.collection_id,
duration_milliseconds = query_duration.num_milliseconds(),
"Querying stream of documents in specified collection.",
);
});
Ok(query_stream)
}
Err(err) => match err {
FirestoreError::DatabaseError(ref db_err)
if db_err.retry_possible && retries < self.inner.options.max_retries =>
{
let sleep_duration = tokio::time::Duration::from_millis(
rand::rng().random_range(0..2u64.pow(retries as u32) * 1000 + 1),
);
warn!(
err = %db_err,
current_retry = retries + 1,
max_retries = self.inner.options.max_retries,
delay = sleep_duration.as_millis(),
"Failed to run aggregation query. Retrying up to the specified number of times.",
);
tokio::time::sleep(sleep_duration).await;
self.stream_aggregated_query_doc_with_retries(params, retries + 1, span)
.await
}
_ => Err(err),
},
}
}
.boxed()
}
fn aggregated_query_doc_with_retries<'a>(
&'a self,
params: FirestoreAggregatedQueryParams,
retries: usize,
span: &'a Span,
) -> BoxFuture<'a, FirestoreResult<Vec<Document>>> {
async move {
let query_request = self.create_aggregated_query_request(params.clone())?;
let begin_query_utc: DateTime<Utc> = Utc::now();
match self
.client()
.get()
.run_aggregation_query(query_request)
.map_err(|e| e.into())
.await
{
Ok(query_response) => {
let query_stream = query_response
.into_inner()
.map_ok(Self::aggregated_response_to_doc)
.try_collect::<Vec<Option<Document>>>()
.await?
.into_iter()
.flatten()
.collect();
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
span.record(
"/firestore/response_time",
query_duration.num_milliseconds(),
);
span.in_scope(|| {
debug!(
collection_id = ?params.query_params.collection_id,
duration_milliseconds = query_duration.num_milliseconds(),
"Querying documents in specified collection.",
);
});
Ok(query_stream)
}
Err(err) => match err {
FirestoreError::DatabaseError(ref db_err)
if db_err.retry_possible && retries < self.inner.options.max_retries =>
{
let sleep_duration = tokio::time::Duration::from_millis(
rand::rng().random_range(0..2u64.pow(retries as u32) * 1000 + 1),
);
warn!(
err = %db_err,
current_retry = retries + 1,
max_retries = self.inner.options.max_retries,
delay = sleep_duration.as_millis(),
"Failed to run aggregation query. Retrying up to the specified number of times.",
);
tokio::time::sleep(sleep_duration).await;
self.aggregated_query_doc_with_retries(params, retries + 1, span)
.await
}
_ => Err(err),
},
}
}
.boxed()
}
fn aggregated_response_to_doc(mut agg_res: RunAggregationQueryResponse) -> Option<Document> {
agg_res.result.take().map(|agg_res_doc| Document {
name: "".to_string(),
fields: agg_res_doc.aggregate_fields,
create_time: None,
update_time: None,
})
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/parent_path_builder.rs | src/db/parent_path_builder.rs | use crate::db::safe_document_path;
use crate::{FirestoreReference, FirestoreResult};
use std::fmt::{Display, Formatter};
/// A builder for constructing Firestore document paths, typically for parent documents
/// when dealing with sub-collections.
///
/// `ParentPathBuilder` allows for fluently creating nested document paths.
/// It starts with an initial document path and allows appending further
/// collection and document ID segments.
///
/// This is often used to specify the parent document when performing operations
/// on a sub-collection.
///
/// # Examples
///
/// ```rust
/// use firestore::{FirestoreDb, FirestoreResult, ParentPathBuilder};
///
/// # async fn run() -> FirestoreResult<()> {
/// let db = FirestoreDb::new("my-project").await?;
///
/// // Path to "my-collection/my-doc"
/// let parent_path = db.parent_path("my-collection", "my-doc")?;
/// assert_eq!(parent_path.to_string(), "projects/my-project/databases/(default)/documents/my-collection/my-doc");
///
/// // Path to "my-collection/my-doc/sub-collection/sub-doc"
/// let sub_collection_path = parent_path.at("sub-collection", "sub-doc")?;
/// assert_eq!(sub_collection_path.to_string(), "projects/my-project/databases/(default)/documents/my-collection/my-doc/sub-collection/sub-doc");
/// # Ok(())
/// # }
/// ```
#[derive(Debug, Clone)]
pub struct ParentPathBuilder {
value: String,
}
impl ParentPathBuilder {
/// Creates a new `ParentPathBuilder` with an initial path.
/// This is typically called internally by [`FirestoreDb::parent_path()`](crate::FirestoreDb::parent_path).
#[inline]
pub(crate) fn new(initial: String) -> Self {
Self { value: initial }
}
/// Appends a collection name and document ID to the current path.
///
/// This method extends the existing path with `/collection_name/document_id`.
///
/// # Arguments
/// * `collection_name`: The name of the collection to append.
/// * `document_id`: The ID of the document within that collection.
///
/// # Errors
/// Returns [`FirestoreError::InvalidParametersError`](crate::errors::FirestoreError::InvalidParametersError)
/// if the `document_id` is invalid (e.g., contains `/`).
#[inline]
pub fn at<S>(self, collection_name: &str, document_id: S) -> FirestoreResult<Self>
where
S: AsRef<str>,
{
Ok(Self::new(safe_document_path(
self.value.as_str(),
collection_name,
document_id.as_ref(),
)?))
}
}
impl Display for ParentPathBuilder {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.value.fmt(f)
}
}
impl AsRef<str> for ParentPathBuilder {
fn as_ref(&self) -> &str {
self.value.as_str()
}
}
impl From<ParentPathBuilder> for String {
fn from(pb: ParentPathBuilder) -> Self {
pb.value
}
}
impl<'a> From<&'a ParentPathBuilder> for &'a str {
fn from(pb: &'a ParentPathBuilder) -> &'a str {
pb.value.as_str()
}
}
impl From<ParentPathBuilder> for FirestoreReference {
fn from(pb: ParentPathBuilder) -> Self {
FirestoreReference(pb.value)
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/listen_changes_state_storage.rs | src/db/listen_changes_state_storage.rs | use crate::errors::AnyBoxedErrResult;
use crate::{FirestoreListenerTarget, FirestoreListenerTargetResumeType, FirestoreListenerToken};
use async_trait::async_trait;
use rvstruct::ValueStruct;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::*;
#[async_trait]
pub trait FirestoreResumeStateStorage {
async fn read_resume_state(
&self,
target: &FirestoreListenerTarget,
) -> AnyBoxedErrResult<Option<FirestoreListenerTargetResumeType>>;
async fn update_resume_token(
&self,
target: &FirestoreListenerTarget,
token: FirestoreListenerToken,
) -> AnyBoxedErrResult<()>;
}
#[derive(Clone, Debug)]
pub struct FirestoreTempFilesListenStateStorage {
temp_dir: Option<std::path::PathBuf>,
}
impl FirestoreTempFilesListenStateStorage {
pub fn new() -> Self {
Self { temp_dir: None }
}
pub fn with_temp_dir<P: AsRef<std::path::Path>>(temp_dir: P) -> Self {
debug!(
directory = ?temp_dir.as_ref(),
"Using temp dir for listen state storage.",
);
Self {
temp_dir: Some(temp_dir.as_ref().to_path_buf()),
}
}
fn get_file_path(&self, target: &FirestoreListenerTarget) -> std::path::PathBuf {
let target_state_file_name = format!("{}.{}.tmp", TOKEN_FILENAME_PREFIX, target.value());
match &self.temp_dir {
Some(temp_dir) => temp_dir.join(target_state_file_name),
None => std::path::PathBuf::from(target_state_file_name),
}
}
}
const TOKEN_FILENAME_PREFIX: &str = "firestore-listen-token";
#[async_trait]
impl FirestoreResumeStateStorage for FirestoreTempFilesListenStateStorage {
async fn read_resume_state(
&self,
target: &FirestoreListenerTarget,
) -> Result<Option<FirestoreListenerTargetResumeType>, Box<dyn std::error::Error + Send + Sync>>
{
let target_state_file_name = self.get_file_path(target);
let token = std::fs::read_to_string(target_state_file_name)
.ok()
.map(|str| {
hex::decode(str)
.map(FirestoreListenerToken::new)
.map(FirestoreListenerTargetResumeType::Token)
.map_err(Box::new)
})
.transpose()?;
Ok(token)
}
async fn update_resume_token(
&self,
target: &FirestoreListenerTarget,
token: FirestoreListenerToken,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let target_state_file_name = self.get_file_path(target);
Ok(std::fs::write(
target_state_file_name,
hex::encode(token.value()),
)?)
}
}
#[derive(Clone, Debug)]
pub struct FirestoreMemListenStateStorage {
tokens: Arc<RwLock<HashMap<FirestoreListenerTarget, FirestoreListenerToken>>>,
}
impl FirestoreMemListenStateStorage {
pub fn new() -> Self {
Self {
tokens: Arc::new(RwLock::new(HashMap::new())),
}
}
pub async fn get_token(
&self,
target: &FirestoreListenerTarget,
) -> Option<FirestoreListenerToken> {
self.tokens.read().await.get(target).cloned()
}
}
#[async_trait]
impl FirestoreResumeStateStorage for FirestoreMemListenStateStorage {
async fn read_resume_state(
&self,
target: &FirestoreListenerTarget,
) -> Result<Option<FirestoreListenerTargetResumeType>, Box<dyn std::error::Error + Send + Sync>>
{
Ok(self
.get_token(target)
.await
.map(FirestoreListenerTargetResumeType::Token))
}
async fn update_resume_token(
&self,
target: &FirestoreListenerTarget,
token: FirestoreListenerToken,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
self.tokens.write().await.insert(target.clone(), token);
Ok(())
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/batch_writer.rs | src/db/batch_writer.rs | use crate::db::transaction_ops::{TransformObjectOperation, UpdateObjectOperation};
use crate::db::DeleteOperation;
use crate::errors::FirestoreError;
use crate::{
FirestoreDb, FirestoreFieldTransform, FirestoreResult, FirestoreWritePrecondition,
FirestoreWriteResult,
};
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use gcloud_sdk::google::firestore::v1::Write;
use gcloud_sdk::google::rpc::Status;
use rsb_derive::*;
use serde::Serialize;
#[async_trait]
pub trait FirestoreBatchWriter {
type WriteResult;
async fn write(&self, writes: Vec<Write>) -> FirestoreResult<Self::WriteResult>;
}
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreBatchWriteResponse {
pub position: u64,
pub write_results: Vec<FirestoreWriteResult>,
pub statuses: Vec<Status>,
pub commit_time: Option<DateTime<Utc>>,
}
pub struct FirestoreBatch<'a, W>
where
W: FirestoreBatchWriter,
{
pub db: &'a FirestoreDb,
pub writer: &'a W,
pub writes: Vec<Write>,
}
impl<'a, W> FirestoreBatch<'a, W>
where
W: FirestoreBatchWriter,
{
pub(crate) fn new(db: &'a FirestoreDb, writer: &'a W) -> Self {
Self {
db,
writer,
writes: Vec::new(),
}
}
#[inline]
pub fn add<I>(&mut self, write: I) -> FirestoreResult<&mut Self>
where
I: TryInto<gcloud_sdk::google::firestore::v1::Write, Error = FirestoreError>,
{
self.writes.push(write.try_into()?);
Ok(self)
}
#[inline]
pub async fn write(self) -> FirestoreResult<W::WriteResult> {
self.writer.write(self.writes).await
}
pub fn update_object<T, S>(
&mut self,
collection_id: &str,
document_id: S,
obj: &T,
update_only: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
update_transforms: Vec<FirestoreFieldTransform>,
) -> FirestoreResult<&mut Self>
where
T: Serialize + Sync + Send,
S: AsRef<str>,
{
self.update_object_at(
self.db.get_documents_path(),
collection_id,
document_id,
obj,
update_only,
precondition,
update_transforms,
)
}
pub fn update_object_at<T, S>(
&mut self,
parent: &str,
collection_id: &str,
document_id: S,
obj: &T,
update_only: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
update_transforms: Vec<FirestoreFieldTransform>,
) -> FirestoreResult<&mut Self>
where
T: Serialize + Sync + Send,
S: AsRef<str>,
{
self.add(UpdateObjectOperation {
parent: parent.to_string(),
collection_id: collection_id.to_string(),
document_id,
obj,
update_only,
precondition,
update_transforms,
})
}
pub fn delete_by_id<S>(
&mut self,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<&mut Self>
where
S: AsRef<str>,
{
self.delete_by_id_at(
self.db.get_documents_path(),
collection_id,
document_id,
precondition,
)
}
pub fn delete_by_id_at<S>(
&mut self,
parent: &str,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<&mut Self>
where
S: AsRef<str>,
{
self.add(DeleteOperation {
parent: parent.to_string(),
collection_id: collection_id.to_string(),
document_id,
precondition,
})
}
pub fn transform<S>(
&mut self,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
) -> FirestoreResult<&mut Self>
where
S: AsRef<str>,
{
self.transform_at(
self.db.get_documents_path(),
collection_id,
document_id,
precondition,
transforms,
)
}
pub fn transform_at<S>(
&mut self,
parent: &str,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
) -> FirestoreResult<&mut Self>
where
S: AsRef<str>,
{
self.add(TransformObjectOperation {
parent: parent.to_string(),
collection_id: collection_id.to_string(),
document_id,
precondition,
transforms,
})
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/batch_streaming_writer.rs | src/db/batch_streaming_writer.rs | use crate::{
FirestoreBatch, FirestoreBatchWriteResponse, FirestoreBatchWriter, FirestoreDb,
FirestoreResult, FirestoreWriteResult,
};
use async_trait::async_trait;
use futures::stream::BoxStream;
use futures::{StreamExt, TryStreamExt};
use gcloud_sdk::google::firestore::v1::{Write, WriteRequest};
use rsb_derive::*;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use tokio::sync::{mpsc, RwLock};
use tokio::task::JoinHandle;
use crate::timestamp_utils::from_timestamp;
use tracing::*;
#[derive(Debug, Eq, PartialEq, Clone, Builder)]
pub struct FirestoreStreamingBatchWriteOptions {
#[default = "Duration::from_millis(500)"]
pub throttle_batch_duration: Duration,
}
pub struct FirestoreStreamingBatchWriter {
pub db: FirestoreDb,
pub options: FirestoreStreamingBatchWriteOptions,
pub batch_span: Span,
finished: Arc<AtomicBool>,
writer: UnboundedSender<WriteRequest>,
thread: Option<JoinHandle<()>>,
last_token: Arc<RwLock<Vec<u8>>>,
sent_counter: Arc<AtomicU64>,
received_counter: Arc<AtomicU64>,
init_wait_reader: UnboundedReceiver<()>,
}
impl Drop for FirestoreStreamingBatchWriter {
fn drop(&mut self) {
if !self.finished.load(Ordering::Relaxed) {
self.batch_span
.in_scope(|| warn!("Batch was not finished."));
}
}
}
impl FirestoreStreamingBatchWriter {
pub async fn new<'b>(
db: FirestoreDb,
options: FirestoreStreamingBatchWriteOptions,
) -> FirestoreResult<(
FirestoreStreamingBatchWriter,
BoxStream<'b, FirestoreResult<FirestoreBatchWriteResponse>>,
)> {
let batch_span = span!(Level::DEBUG, "Firestore Batch Write");
let (requests_writer, requests_receiver) = mpsc::unbounded_channel::<WriteRequest>();
let (responses_writer, responses_receiver) =
mpsc::unbounded_channel::<FirestoreResult<FirestoreBatchWriteResponse>>();
let (init_wait_sender, mut init_wait_reader) = mpsc::unbounded_channel::<()>();
let finished = Arc::new(AtomicBool::new(false));
let thread_finished = finished.clone();
let sent_counter = Arc::new(AtomicU64::new(0));
let thread_sent_counter = sent_counter.clone();
let received_counter = Arc::new(AtomicU64::new(0));
let thread_received_counter = received_counter.clone();
let last_token: Arc<RwLock<Vec<u8>>> = Arc::new(RwLock::new(vec![]));
let thread_last_token = last_token.clone();
let mut thread_db_client = db.client().get();
let thread_options = options.clone();
let thread = tokio::spawn(async move {
let stream = {
use tokio_stream::StreamExt;
tokio_stream::wrappers::UnboundedReceiverStream::new(requests_receiver)
.throttle(thread_options.throttle_batch_duration)
};
match thread_db_client.write(stream).await {
Ok(response) => {
let mut response_stream = response.into_inner().boxed();
loop {
let response_result = response_stream.try_next().await;
let received_counter = thread_received_counter.load(Ordering::Relaxed);
match response_result {
Ok(Some(response)) => {
{
let mut locked = thread_last_token.write().await;
*locked = response.stream_token;
}
if received_counter == 0 {
init_wait_sender.send(()).ok();
} else {
let write_results: FirestoreResult<Vec<FirestoreWriteResult>> =
response
.write_results
.into_iter()
.map(|s| s.try_into())
.collect();
match write_results {
Ok(write_results) => {
responses_writer
.send(Ok(FirestoreBatchWriteResponse::new(
received_counter - 1,
write_results,
vec![],
)
.opt_commit_time(
response
.commit_time
.and_then(|ts| from_timestamp(ts).ok()),
)))
.ok();
}
Err(err) => {
error!(
%err,
received_counter,
"Batch write operation failed.",
);
responses_writer.send(Err(err)).ok();
break;
}
}
}
}
Ok(None) => {
responses_writer
.send(Ok(FirestoreBatchWriteResponse::new(
received_counter - 1,
vec![],
vec![],
)))
.ok();
break;
}
Err(err) if err.code() == gcloud_sdk::tonic::Code::Cancelled => {
debug!(received_counter, "Batch write operation finished.");
responses_writer
.send(Ok(FirestoreBatchWriteResponse::new(
received_counter - 1,
vec![],
vec![],
)))
.ok();
break;
}
Err(err) => {
error!(
%err,
received_counter,
"Batch write operation failed.",
);
responses_writer.send(Err(err.into())).ok();
break;
}
}
{
let _locked = thread_last_token.read().await;
if thread_finished.load(Ordering::Relaxed)
&& thread_sent_counter.load(Ordering::Relaxed) == received_counter
{
init_wait_sender.send(()).ok();
break;
}
}
thread_received_counter.fetch_add(1, Ordering::Relaxed);
}
{
let _locked = thread_last_token.write().await;
thread_finished.store(true, Ordering::Relaxed);
init_wait_sender.send(()).ok();
}
}
Err(err) => {
error!(
%err,
"Batch write operation failed.",
);
responses_writer.send(Err(err.into())).ok();
}
}
});
requests_writer.send(WriteRequest {
database: db.get_database_path().to_string(),
stream_id: "".to_string(),
writes: vec![],
stream_token: vec![],
labels: HashMap::new(),
})?;
init_wait_reader.recv().await;
let responses_stream =
tokio_stream::wrappers::UnboundedReceiverStream::new(responses_receiver).boxed();
Ok((
Self {
db,
options,
batch_span,
finished,
writer: requests_writer,
thread: Some(thread),
last_token,
sent_counter,
received_counter,
init_wait_reader,
},
responses_stream,
))
}
pub async fn finish(mut self) {
let locked = self.last_token.write().await;
if !self.finished.load(Ordering::Relaxed) {
self.finished.store(true, Ordering::Relaxed);
if self.sent_counter.load(Ordering::Relaxed)
> self.received_counter.load(Ordering::Relaxed) - 1
{
drop(locked);
debug!("Still waiting to receive responses for batch writes.");
self.init_wait_reader.recv().await;
} else {
drop(locked);
}
self.writer
.send(WriteRequest {
database: self.db.get_database_path().to_string(),
stream_id: "".to_string(),
writes: vec![],
stream_token: {
let locked = self.last_token.read().await;
locked.clone()
},
labels: HashMap::new(),
})
.ok();
} else {
drop(locked);
}
if let Some(thread) = self.thread.take() {
let _ = tokio::join!(thread);
}
}
async fn write_iterator<I>(&self, writes: I) -> FirestoreResult<()>
where
I: IntoIterator,
I::Item: Into<Write>,
{
self.sent_counter.fetch_add(1, Ordering::Relaxed);
Ok(self.writer.send(WriteRequest {
database: self.db.get_database_path().to_string(),
stream_id: "".to_string(),
writes: writes.into_iter().map(|write| write.into()).collect(),
stream_token: {
let locked = self.last_token.read().await;
locked.clone()
},
labels: HashMap::new(),
})?)
}
pub fn new_batch(&self) -> FirestoreBatch<'_, FirestoreStreamingBatchWriter> {
FirestoreBatch::new(&self.db, self)
}
}
#[async_trait]
impl FirestoreBatchWriter for FirestoreStreamingBatchWriter {
type WriteResult = ();
async fn write(&self, writes: Vec<Write>) -> FirestoreResult<()> {
self.write_iterator(writes).await
}
}
impl FirestoreDb {
pub async fn create_streaming_batch_writer<'b>(
&self,
) -> FirestoreResult<(
FirestoreStreamingBatchWriter,
BoxStream<'b, FirestoreResult<FirestoreBatchWriteResponse>>,
)> {
self.create_streaming_batch_writer_with_options(FirestoreStreamingBatchWriteOptions::new())
.await
}
pub async fn create_streaming_batch_writer_with_options<'b>(
&self,
options: FirestoreStreamingBatchWriteOptions,
) -> FirestoreResult<(
FirestoreStreamingBatchWriter,
BoxStream<'b, FirestoreResult<FirestoreBatchWriteResponse>>,
)> {
FirestoreStreamingBatchWriter::new(self.clone(), options).await
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/transaction_ops.rs | src/db/transaction_ops.rs | use crate::db::safe_document_path;
use crate::{
FirestoreDb, FirestoreError, FirestoreFieldTransform, FirestoreResult,
FirestoreWritePrecondition,
};
use gcloud_sdk::google::firestore::v1::Write;
use serde::Serialize;
#[derive(Debug, PartialEq, Clone)]
pub(crate) struct UpdateObjectOperation<'a, T, S>
where
T: Serialize + Sync + Send,
S: AsRef<str>,
{
pub parent: String,
pub collection_id: String,
pub document_id: S,
pub obj: &'a T,
pub update_only: Option<Vec<String>>,
pub precondition: Option<FirestoreWritePrecondition>,
pub update_transforms: Vec<FirestoreFieldTransform>,
}
impl<'a, T, S> TryInto<Write> for UpdateObjectOperation<'a, T, S>
where
T: Serialize + Sync + Send,
S: AsRef<str>,
{
type Error = FirestoreError;
fn try_into(self) -> Result<Write, Self::Error> {
Ok(Write {
update_mask: self.update_only.map({
|vf| gcloud_sdk::google::firestore::v1::DocumentMask {
field_paths: vf.iter().map(|f| f.to_string()).collect(),
}
}),
update_transforms: self
.update_transforms
.into_iter()
.map(|s| s.try_into())
.collect::<FirestoreResult<
Vec<gcloud_sdk::google::firestore::v1::document_transform::FieldTransform>,
>>()?,
current_document: self.precondition.map(|cond| cond.try_into()).transpose()?,
operation: Some(gcloud_sdk::google::firestore::v1::write::Operation::Update(
FirestoreDb::serialize_to_doc(
safe_document_path(
&self.parent,
self.collection_id.as_str(),
self.document_id.as_ref(),
)?,
&self.obj,
)?,
)),
})
}
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub(crate) struct DeleteOperation<S>
where
S: AsRef<str>,
{
pub parent: String,
pub collection_id: String,
pub document_id: S,
pub precondition: Option<FirestoreWritePrecondition>,
}
impl<S> TryInto<Write> for DeleteOperation<S>
where
S: AsRef<str>,
{
type Error = FirestoreError;
fn try_into(self) -> Result<Write, Self::Error> {
Ok(Write {
update_mask: None,
update_transforms: vec![],
current_document: self.precondition.map(|cond| cond.try_into()).transpose()?,
operation: Some(gcloud_sdk::google::firestore::v1::write::Operation::Delete(
safe_document_path(
&self.parent,
self.collection_id.as_str(),
self.document_id.as_ref(),
)?,
)),
})
}
}
#[derive(Debug, PartialEq, Clone)]
pub(crate) struct TransformObjectOperation<S>
where
S: AsRef<str>,
{
pub parent: String,
pub collection_id: String,
pub document_id: S,
pub precondition: Option<FirestoreWritePrecondition>,
pub transforms: Vec<FirestoreFieldTransform>,
}
impl<S> TryInto<Write> for TransformObjectOperation<S>
where
S: AsRef<str>,
{
type Error = FirestoreError;
fn try_into(self) -> Result<Write, Self::Error> {
Ok(Write {
update_mask: None,
update_transforms: vec![],
current_document: self.precondition.map(|cond| cond.try_into()).transpose()?,
operation: Some(gcloud_sdk::google::firestore::v1::write::Operation::Transform(
gcloud_sdk::google::firestore::v1::DocumentTransform {
document: safe_document_path(
&self.parent,
self.collection_id.as_str(),
self.document_id.as_ref(),
)?,
field_transforms: self.transforms
.into_iter()
.map(|s| s.try_into())
.collect::<FirestoreResult<Vec<gcloud_sdk::google::firestore::v1::document_transform::FieldTransform>>>()?
}
)),
})
}
}
pub trait FirestoreTransactionOps {
fn add<I>(&mut self, write: I) -> FirestoreResult<&mut Self>
where
I: TryInto<gcloud_sdk::google::firestore::v1::Write, Error = FirestoreError>;
fn get_documents_path(&self) -> &String;
fn update_object<T, S>(
&mut self,
collection_id: &str,
document_id: S,
obj: &T,
update_only: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
update_transforms: Vec<FirestoreFieldTransform>,
) -> FirestoreResult<&mut Self>
where
T: Serialize + Sync + Send,
S: AsRef<str>,
{
self.update_object_at(
&self.get_documents_path().clone(),
collection_id,
document_id,
obj,
update_only,
precondition,
update_transforms,
)
}
fn update_object_at<T, S>(
&mut self,
parent: &str,
collection_id: &str,
document_id: S,
obj: &T,
update_only: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
update_transforms: Vec<FirestoreFieldTransform>,
) -> FirestoreResult<&mut Self>
where
T: Serialize + Sync + Send,
S: AsRef<str>,
{
self.add(UpdateObjectOperation {
parent: parent.to_string(),
collection_id: collection_id.to_string(),
document_id,
obj,
update_only,
precondition,
update_transforms,
})
}
fn delete_by_id<S>(
&mut self,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<&mut Self>
where
S: AsRef<str>,
{
self.delete_by_id_at(
&self.get_documents_path().clone(),
collection_id,
document_id,
precondition,
)
}
fn delete_by_id_at<S>(
&mut self,
parent: &str,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<&mut Self>
where
S: AsRef<str>,
{
self.add(DeleteOperation {
parent: parent.to_string(),
collection_id: collection_id.to_string(),
document_id,
precondition,
})
}
fn transform<S>(
&mut self,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
) -> FirestoreResult<&mut Self>
where
S: AsRef<str>,
{
self.transform_at(
&self.get_documents_path().clone(),
collection_id,
document_id,
precondition,
transforms,
)
}
fn transform_at<S>(
&mut self,
parent: &str,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
) -> FirestoreResult<&mut Self>
where
S: AsRef<str>,
{
self.add(TransformObjectOperation {
parent: parent.to_string(),
collection_id: collection_id.to_string(),
document_id,
precondition,
transforms,
})
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/create.rs | src/db/create.rs | use crate::{FirestoreDb, FirestoreResult};
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use gcloud_sdk::google::firestore::v1::*;
use serde::{Deserialize, Serialize};
use tracing::*;
#[async_trait]
pub trait FirestoreCreateSupport {
async fn create_doc<S>(
&self,
collection_id: &str,
document_id: Option<S>,
input_doc: Document,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send;
async fn create_doc_at<S>(
&self,
parent: &str,
collection_id: &str,
document_id: Option<S>,
input_doc: Document,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send;
async fn create_obj<I, O, S>(
&self,
collection_id: &str,
document_id: Option<S>,
obj: &I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send;
async fn create_obj_at<I, O, S>(
&self,
parent: &str,
collection_id: &str,
document_id: Option<S>,
obj: &I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send;
}
#[async_trait]
impl FirestoreCreateSupport for FirestoreDb {
async fn create_doc<S>(
&self,
collection_id: &str,
document_id: Option<S>,
input_doc: Document,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send,
{
self.create_doc_at(
self.get_documents_path().as_str(),
collection_id,
document_id,
input_doc,
return_only_fields,
)
.await
}
async fn create_doc_at<S>(
&self,
parent: &str,
collection_id: &str,
document_id: Option<S>,
input_doc: Document,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send,
{
let span = span!(
Level::DEBUG,
"Firestore Create Document",
"/firestore/collection_name" = collection_id,
"/firestore/response_time" = field::Empty,
"/firestore/document_name" = field::Empty,
);
let create_document_request = gcloud_sdk::tonic::Request::new(CreateDocumentRequest {
parent: parent.into(),
document_id: document_id
.as_ref()
.map(|id| id.as_ref().to_string())
.unwrap_or_default(),
mask: return_only_fields.as_ref().map(|masks| DocumentMask {
field_paths: masks.clone(),
}),
collection_id: collection_id.into(),
document: Some(input_doc),
});
let begin_query_utc: DateTime<Utc> = Utc::now();
let create_response = self
.client()
.get()
.create_document(create_document_request)
.await?;
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
span.record(
"/firestore/response_time",
query_duration.num_milliseconds(),
);
let response_inner = create_response.into_inner();
span.record("/firestore/document_name", &response_inner.name);
span.in_scope(|| {
debug!(
collection_id,
document_id = document_id.as_ref().map(|id| id.as_ref()),
"Created a new document.",
);
});
Ok(response_inner)
}
async fn create_obj<I, O, S>(
&self,
collection_id: &str,
document_id: Option<S>,
obj: &I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send,
{
self.create_obj_at(
self.get_documents_path().as_str(),
collection_id,
document_id,
obj,
return_only_fields,
)
.await
}
async fn create_obj_at<I, O, S>(
&self,
parent: &str,
collection_id: &str,
document_id: Option<S>,
obj: &I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send,
{
let input_doc = Self::serialize_to_doc("", obj)?;
let doc = self
.create_doc_at(
parent,
collection_id,
document_id,
input_doc,
return_only_fields,
)
.await?;
Self::deserialize_doc_to(&doc)
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/get.rs | src/db/get.rs | use crate::db::safe_document_path;
use crate::errors::*;
use crate::*;
use async_trait::async_trait;
use chrono::prelude::*;
use futures::future::{BoxFuture, FutureExt};
use futures::stream::BoxStream;
use futures::TryFutureExt;
use futures::TryStreamExt;
use futures::{future, StreamExt};
use gcloud_sdk::google::firestore::v1::*;
use rand::Rng;
use serde::Deserialize;
use tracing::*;
#[async_trait]
pub trait FirestoreGetByIdSupport {
async fn get_doc<S>(
&self,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send;
async fn get_doc_at<S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send;
async fn get_obj<T, S>(&self, collection_id: &str, document_id: S) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send;
async fn get_obj_return_fields<T, S>(
&self,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send;
async fn get_obj_at<T, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send;
async fn get_obj_at_return_fields<T, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send;
async fn get_obj_if_exists<T, S>(
&self,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Option<T>>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send;
async fn get_obj_at_if_exists<T, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Option<T>>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send;
async fn batch_stream_get_docs<S, I>(
&self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<(String, Option<Document>)>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send;
async fn batch_stream_get_docs_with_errors<S, I>(
&self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<FirestoreResult<(String, Option<Document>)>>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send;
async fn batch_stream_get_docs_at<S, I>(
&self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<(String, Option<Document>)>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send;
async fn batch_stream_get_docs_at_with_errors<S, I>(
&self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<FirestoreResult<(String, Option<Document>)>>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send;
async fn batch_stream_get_objects<'a, T, S, I>(
&'a self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, (String, Option<T>)>>
where
for<'de> T: Deserialize<'de> + Send + 'a,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send;
async fn batch_stream_get_objects_with_errors<'a, T, S, I>(
&'a self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(String, Option<T>)>>>
where
for<'de> T: Deserialize<'de> + Send + 'a,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send;
async fn batch_stream_get_objects_at<'a, T, S, I>(
&'a self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, (String, Option<T>)>>
where
for<'de> T: Deserialize<'de> + Send + 'a,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send;
async fn batch_stream_get_objects_at_with_errors<'a, T, S, I>(
&'a self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(String, Option<T>)>>>
where
for<'de> T: Deserialize<'de> + Send + 'a,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send;
}
#[async_trait]
impl FirestoreGetByIdSupport for FirestoreDb {
async fn get_doc_at<S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send,
{
let document_path = safe_document_path(parent, collection_id, document_id.as_ref())?;
self.get_doc_by_path(
collection_id.to_string(),
document_path,
return_only_fields,
0,
)
.await
}
async fn get_doc<S>(
&self,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send,
{
self.get_doc_at(
self.get_documents_path().as_str(),
collection_id,
document_id,
return_only_fields,
)
.await
}
async fn get_obj<T, S>(&self, collection_id: &str, document_id: S) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
self.get_obj_at(
self.get_documents_path().as_str(),
collection_id,
document_id,
)
.await
}
async fn get_obj_return_fields<T, S>(
&self,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
self.get_obj_at_return_fields(
self.get_documents_path().as_str(),
collection_id,
document_id,
return_only_fields,
)
.await
}
async fn get_obj_at<T, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
let doc: Document = self
.get_doc_at(parent, collection_id, document_id, None)
.await?;
let obj: T = Self::deserialize_doc_to(&doc)?;
Ok(obj)
}
async fn get_obj_at_return_fields<T, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
let doc: Document = self
.get_doc_at(parent, collection_id, document_id, return_only_fields)
.await?;
let obj: T = Self::deserialize_doc_to(&doc)?;
Ok(obj)
}
async fn get_obj_if_exists<T, S>(
&self,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Option<T>>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
self.get_obj_at_if_exists(
self.get_documents_path().as_str(),
collection_id,
document_id,
return_only_fields,
)
.await
}
async fn get_obj_at_if_exists<T, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Option<T>>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
match self
.get_obj_at_return_fields::<T, S>(
parent,
collection_id,
document_id,
return_only_fields,
)
.await
{
Ok(obj) => Ok(Some(obj)),
Err(err) => match err {
FirestoreError::DataNotFoundError(_) => Ok(None),
_ => Err(err),
},
}
}
async fn batch_stream_get_docs_at_with_errors<S, I>(
&self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<FirestoreResult<(String, Option<Document>)>>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
let full_doc_ids: Vec<String> = document_ids
.into_iter()
.map(|document_id| safe_document_path(parent, collection_id, document_id.as_ref()))
.collect::<FirestoreResult<Vec<String>>>()?;
self.get_docs_by_ids(collection_id.to_string(), full_doc_ids, return_only_fields)
.await
}
async fn batch_stream_get_docs_at<S, I>(
&self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<(String, Option<Document>)>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
let doc_stream = self
.batch_stream_get_docs_at_with_errors(
parent,
collection_id,
document_ids,
return_only_fields,
)
.await?;
Ok(Box::pin(doc_stream.filter_map(|doc_res| {
future::ready(match doc_res {
Ok(doc_pair) => Some(doc_pair),
Err(err) => {
error!(
%err,
"Error occurred while consuming batch get as a stream.",
);
None
}
})
})))
}
async fn batch_stream_get_docs<S, I>(
&self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<(String, Option<Document>)>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
self.batch_stream_get_docs_at(
self.get_documents_path(),
collection_id,
document_ids,
return_only_fields,
)
.await
}
async fn batch_stream_get_docs_with_errors<S, I>(
&self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<FirestoreResult<(String, Option<Document>)>>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
self.batch_stream_get_docs_at_with_errors(
self.get_documents_path(),
collection_id,
document_ids,
return_only_fields,
)
.await
}
async fn batch_stream_get_objects<'b, T, S, I>(
&'b self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'b, (String, Option<T>)>>
where
for<'de> T: Deserialize<'de> + Send + 'b,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
self.batch_stream_get_objects_at(
self.get_documents_path(),
collection_id,
document_ids,
return_only_fields,
)
.await
}
async fn batch_stream_get_objects_at<'a, T, S, I>(
&'a self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, (String, Option<T>)>>
where
for<'de> T: Deserialize<'de> + Send + 'a,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
let doc_stream = self
.batch_stream_get_docs_at(parent, collection_id, document_ids, return_only_fields)
.await?;
Ok(Box::pin(doc_stream.filter_map(
|(doc_id, maybe_doc)| async move {
match maybe_doc {
Some(doc) => match Self::deserialize_doc_to(&doc) {
Ok(obj) => Some((doc_id, Some(obj))),
Err(err) => {
error!(
%err,
"Error occurred while consuming batch documents as a stream. Document: {}",
doc_id
);
None
}
},
None => Some((doc_id, None)),
}
},
)))
}
async fn batch_stream_get_objects_at_with_errors<'a, T, S, I>(
&'a self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(String, Option<T>)>>>
where
for<'de> T: Deserialize<'de> + Send + 'a,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
let doc_stream = self
.batch_stream_get_docs_at_with_errors(
parent,
collection_id,
document_ids,
return_only_fields,
)
.await?;
Ok(Box::pin(doc_stream.and_then(|(doc_id, maybe_doc)| {
future::ready({
maybe_doc
.map(|doc| Self::deserialize_doc_to::<T>(&doc))
.transpose()
.map(|obj| (doc_id, obj))
})
})))
}
async fn batch_stream_get_objects_with_errors<'a, T, S, I>(
&'a self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(String, Option<T>)>>>
where
for<'de> T: Deserialize<'de> + Send + 'a,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
let doc_stream = self
.batch_stream_get_docs_at_with_errors(
self.get_documents_path(),
collection_id,
document_ids,
return_only_fields,
)
.await?;
Ok(Box::pin(doc_stream.and_then(|(doc_id, maybe_doc)| {
future::ready({
maybe_doc
.map(|doc| Self::deserialize_doc_to::<T>(&doc))
.transpose()
.map(|obj| (doc_id, obj))
})
})))
}
}
impl FirestoreDb {
pub(crate) fn get_doc_by_path(
&self,
collection_id: String,
document_path: String,
return_only_fields: Option<Vec<String>>,
retries: usize,
) -> BoxFuture<'_, FirestoreResult<Document>> {
async move {
#[cfg(feature = "caching")]
{
if let FirestoreCachedValue::UseCached(doc) = self
.get_doc_from_cache(
collection_id.as_str(),
document_path.as_str(),
&return_only_fields,
)
.await?
{
return Ok(doc);
}
}
let _return_only_fields_empty = return_only_fields.is_none();
let span = span!(
Level::DEBUG,
"Firestore Get Doc",
"/firestore/collection_name" = collection_id,
"/firestore/response_time" = field::Empty,
"/firestore/document_name" = document_path.as_str()
);
let begin_query_utc: DateTime<Utc> = Utc::now();
let request = gcloud_sdk::tonic::Request::new(GetDocumentRequest {
name: document_path.clone(),
consistency_selector: self
.session_params
.consistency_selector
.as_ref()
.map(|selector| selector.try_into())
.transpose()?,
mask: return_only_fields.map({
|vf| gcloud_sdk::google::firestore::v1::DocumentMask {
field_paths: vf.iter().map(|f| f.to_string()).collect(),
}
}),
});
let response = self
.client()
.get()
.get_document(request)
.map_err(|e| e.into())
.await;
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
span.record(
"/firestore/response_time",
query_duration.num_milliseconds(),
);
match response {
Ok(doc_response) => {
span.in_scope(|| {
debug!(
document_path,
duration_milliseconds = query_duration.num_milliseconds(),
"Read document.",
);
});
let doc = doc_response.into_inner();
#[cfg(feature = "caching")]
if _return_only_fields_empty {
self.offer_doc_update_to_cache(&doc).await?;
}
Ok(doc)
}
Err(err) => match err {
FirestoreError::DatabaseError(ref db_err)
if db_err.retry_possible && retries < self.get_options().max_retries =>
{
let sleep_duration = tokio::time::Duration::from_millis(
rand::rng().random_range(0..2u64.pow(retries as u32) * 1000 + 1),
);
span.in_scope(|| {
warn!(
err = %db_err,
current_retry = retries + 1,
max_retries = self.get_options().max_retries,
delay = sleep_duration.as_millis(),
"Failed to get document. Retrying up to the specified number of times.",
);
});
tokio::time::sleep(sleep_duration).await;
self.get_doc_by_path(collection_id, document_path, None, retries + 1)
.await
}
_ => Err(err),
},
}
}
.boxed()
}
pub(crate) async fn get_docs_by_ids(
&self,
collection_id: String,
full_doc_ids: Vec<String>,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'_, FirestoreResult<(String, Option<Document>)>>> {
#[cfg(feature = "caching")]
{
if let FirestoreCachedValue::UseCached(stream) = self
.get_docs_by_ids_from_cache(
collection_id.as_str(),
&full_doc_ids,
&return_only_fields,
)
.await?
{
return Ok(stream);
}
}
let span = span!(
Level::DEBUG,
"Firestore Batch Get",
"/firestore/collection_name" = collection_id.as_str(),
"/firestore/ids_count" = full_doc_ids.len()
);
let request = gcloud_sdk::tonic::Request::new(BatchGetDocumentsRequest {
database: self.get_database_path().clone(),
documents: full_doc_ids,
consistency_selector: self
.session_params
.consistency_selector
.as_ref()
.map(|selector| selector.try_into())
.transpose()?,
mask: return_only_fields.map({
|vf| gcloud_sdk::google::firestore::v1::DocumentMask {
field_paths: vf.iter().map(|f| f.to_string()).collect(),
}
}),
});
match self.client().get().batch_get_documents(request).await {
Ok(response) => {
span.in_scope(|| debug!("Start consuming a batch of documents by IDs."));
let stream = response
.into_inner()
.filter_map(move |r| async move {
match r {
Ok(doc_response) => match doc_response.result {
Some(batch_get_documents_response::Result::Found(document)) => {
let doc_id = document
.name
.split('/')
.next_back()
.map(|s| s.to_string())
.unwrap_or_else(|| document.name.clone());
#[cfg(feature = "caching")]
{
self.offer_doc_update_to_cache(&document).await.ok();
Some(Ok((doc_id, Some(document))))
}
#[cfg(not(feature = "caching"))]
{
Some(Ok((doc_id, Some(document))))
}
}
Some(batch_get_documents_response::Result::Missing(
full_doc_id,
)) => {
let doc_id = full_doc_id
.split('/')
.next_back()
.map(|s| s.to_string())
.unwrap_or_else(|| full_doc_id);
Some(Ok((doc_id, None)))
}
None => None,
},
Err(err) => Some(Err(err.into())),
}
})
.boxed();
Ok(stream)
}
Err(err) => Err(err.into()),
}
}
#[cfg(feature = "caching")]
pub(crate) async fn get_doc_from_cache(
&self,
collection_id: &str,
document_path: &str,
_return_only_fields: &Option<Vec<String>>,
) -> FirestoreResult<FirestoreCachedValue<FirestoreDocument>> {
if let FirestoreDbSessionCacheMode::ReadThroughCache(ref cache)
| FirestoreDbSessionCacheMode::ReadCachedOnly(ref cache) = self.session_params.cache_mode
{
let begin_query_utc: DateTime<Utc> = Utc::now();
let cache_response = cache.get_doc_by_path(document_path).await?;
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
let span = span!(
Level::DEBUG,
"Firestore Get Cache",
"/firestore/collection_name" = collection_id,
"/firestore/response_time" = query_duration.num_milliseconds(),
"/firestore/document_name" = document_path,
"/firestore/cache_result" = field::Empty,
);
if let Some(doc) = cache_response {
span.record("/firestore/cache_result", "hit");
span.in_scope(|| {
debug!(
document_path,
duration_milliseconds = query_duration.num_milliseconds(),
"Read document from cache.",
);
});
return Ok(FirestoreCachedValue::UseCached(doc));
} else {
span.record("/firestore/cache_result", "miss");
span.in_scope(|| {
debug!(document_path, "Missing document in cache.");
});
if let FirestoreDbSessionCacheMode::ReadCachedOnly(_) =
self.session_params.cache_mode
{
return Err(FirestoreError::DataNotFoundError(
FirestoreDataNotFoundError::new(
FirestoreErrorPublicGenericDetails::new("CACHE_MISS".to_string()),
format!("Document {document_path} not found in cache"),
),
));
}
}
}
Ok(FirestoreCachedValue::SkipCache)
}
#[cfg(feature = "caching")]
#[inline]
pub(crate) async fn get_docs_by_ids_from_cache(
&self,
collection_id: &str,
full_doc_ids: &[String],
_return_only_fields: &Option<Vec<String>>,
) -> FirestoreResult<
FirestoreCachedValue<BoxStream<'_, FirestoreResult<(String, Option<Document>)>>>,
> {
if let FirestoreDbSessionCacheMode::ReadThroughCache(ref cache)
| FirestoreDbSessionCacheMode::ReadCachedOnly(ref cache) = self.session_params.cache_mode
{
let span = span!(
Level::DEBUG,
"Firestore Batch Get Cached",
"/firestore/collection_name" = collection_id,
"/firestore/ids_count" = full_doc_ids.len(),
"/firestore/cache_result" = field::Empty,
"/firestore/response_time" = field::Empty
);
let begin_query_utc: DateTime<Utc> = Utc::now();
let cached_stream: BoxStream<FirestoreResult<(String, Option<FirestoreDocument>)>> =
cache.get_docs_by_paths(full_doc_ids).await?;
let cached_vec: Vec<(String, Option<FirestoreDocument>)> =
cached_stream.try_collect::<Vec<_>>().await?;
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
span.record(
"/firestore/response_time",
query_duration.num_milliseconds(),
);
if cached_vec.len() == full_doc_ids.len()
|| matches!(
self.session_params.cache_mode,
FirestoreDbSessionCacheMode::ReadCachedOnly(_)
)
{
span.record("/firestore/cache_result", "hit");
span.in_scope(|| {
debug!(
num_documents = full_doc_ids.len(),
"Reading documents from cache."
);
});
return Ok(FirestoreCachedValue::UseCached(Box::pin(
futures::stream::iter(cached_vec)
.map(|(doc_id, maybe_doc)| Ok((doc_id, maybe_doc))),
)));
} else {
span.record("/firestore/cache_result", "miss");
span.in_scope(|| {
debug!("Not all documents were found in cache. Reading from Firestore.")
});
return Ok(FirestoreCachedValue::SkipCache);
}
}
Ok(FirestoreCachedValue::SkipCache)
}
#[cfg(feature = "caching")]
#[inline]
pub(crate) async fn offer_doc_update_to_cache(
&self,
document: &FirestoreDocument,
) -> FirestoreResult<()> {
if let FirestoreDbSessionCacheMode::ReadThroughCache(ref cache) =
self.session_params.cache_mode
{
cache.update_doc_by_path(document).await?;
}
Ok(())
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/transaction.rs | src/db/transaction.rs | pub use crate::db::transaction_ops::FirestoreTransactionOps;
use crate::errors::*;
use crate::timestamp_utils::from_timestamp;
use crate::{
FirestoreConsistencySelector, FirestoreDb, FirestoreError, FirestoreResult,
FirestoreTransactionId, FirestoreTransactionMode, FirestoreTransactionOptions,
FirestoreTransactionResponse, FirestoreWriteResult,
};
use backoff::future::retry;
use backoff::ExponentialBackoffBuilder;
use futures::future::BoxFuture;
use gcloud_sdk::google::firestore::v1::{BeginTransactionRequest, CommitRequest, RollbackRequest};
use std::time::Duration;
use tracing::*;
#[derive(Debug, Clone)]
pub struct FirestoreTransactionData {
transaction_id: FirestoreTransactionId,
document_path: String,
transaction_span: Span,
writes: Vec<gcloud_sdk::google::firestore::v1::Write>,
}
impl FirestoreTransactionData {
pub fn new(
transaction_id: FirestoreTransactionId,
document_path: String,
transaction_span: Span,
writes: Vec<gcloud_sdk::google::firestore::v1::Write>,
) -> Self {
Self {
transaction_id,
document_path,
transaction_span,
writes,
}
}
#[inline]
pub fn transaction_id(&self) -> &FirestoreTransactionId {
&self.transaction_id
}
#[inline]
pub fn documents_path(&self) -> &String {
&self.document_path
}
#[inline]
pub fn transaction_span(&self) -> &Span {
&self.transaction_span
}
#[inline]
pub fn writes(&self) -> &Vec<gcloud_sdk::google::firestore::v1::Write> {
&self.writes
}
#[inline]
pub fn is_empty(&self) -> bool {
self.writes.is_empty()
}
}
impl From<FirestoreTransaction<'_>> for FirestoreTransactionData {
fn from(transaction: FirestoreTransaction) -> Self {
transaction.into_data()
}
}
impl FirestoreTransactionOps for FirestoreTransactionData {
fn add<I>(&mut self, write: I) -> FirestoreResult<&mut Self>
where
I: TryInto<gcloud_sdk::google::firestore::v1::Write, Error = FirestoreError>,
{
let write = write.try_into()?;
self.writes.push(write);
Ok(self)
}
fn get_documents_path(&self) -> &String {
&self.document_path
}
}
#[derive(Debug)]
pub struct FirestoreTransaction<'a> {
db: &'a FirestoreDb,
data: FirestoreTransactionData,
finished: bool,
}
impl<'a> FirestoreTransaction<'a> {
pub async fn new(
db: &'a FirestoreDb,
options: FirestoreTransactionOptions,
) -> FirestoreResult<FirestoreTransaction<'a>> {
let transaction_span = span!(
Level::DEBUG,
"Firestore Transaction",
"/firestore/transaction_id" = field::Empty,
"/firestore/commit_time" = field::Empty
);
let request = gcloud_sdk::tonic::Request::new(BeginTransactionRequest {
database: db.get_database_path().clone(),
options: Some(options.clone().try_into()?),
});
let response = db
.client()
.get()
.begin_transaction(request)
.await?
.into_inner();
let mut hex_trans_id = hex::encode(&response.transaction);
hex_trans_id.truncate(16);
transaction_span.record("/firestore/transaction_id", hex_trans_id);
transaction_span.in_scope(|| {
debug!(mode = ?options.mode, "Created a new transaction.");
});
let data = FirestoreTransactionData {
transaction_id: response.transaction,
document_path: db.get_documents_path().clone(),
transaction_span,
writes: Vec::new(),
};
Ok(Self {
db,
data,
finished: false,
})
}
#[inline]
pub fn transaction_id(&self) -> &FirestoreTransactionId {
&self.data.transaction_id
}
#[inline]
pub fn db(&self) -> &'a FirestoreDb {
self.db
}
pub async fn commit(mut self) -> FirestoreResult<FirestoreTransactionResponse> {
self.finished = true;
if self.data.writes.is_empty() {
self.data.transaction_span.in_scope(|| {
debug!("Transaction has been committed without any writes.");
});
}
let request = gcloud_sdk::tonic::Request::new(CommitRequest {
database: self.db.get_database_path().clone(),
writes: self.data.writes.drain(..).collect(),
transaction: self.data.transaction_id.clone(),
});
let response = self.db.client().get().commit(request).await?.into_inner();
let result = FirestoreTransactionResponse::new(
response
.write_results
.into_iter()
.map(|s| s.try_into())
.collect::<FirestoreResult<Vec<FirestoreWriteResult>>>()?,
)
.opt_commit_time(response.commit_time.map(from_timestamp).transpose()?);
if let Some(ref commit_time) = result.commit_time {
self.data
.transaction_span
.record("/firestore/commit_time", commit_time.to_rfc3339());
}
self.data.transaction_span.in_scope(|| {
debug!("Transaction has been committed.");
});
Ok(result)
}
pub async fn rollback(mut self) -> FirestoreResult<()> {
self.finished = true;
let request = gcloud_sdk::tonic::Request::new(RollbackRequest {
database: self.db.get_database_path().clone(),
transaction: self.data.transaction_id.clone(),
});
self.db.client().get().rollback(request).await?;
self.data.transaction_span.in_scope(|| {
debug!("Transaction has been rolled back.");
});
Ok(())
}
pub fn finish(&mut self) -> FirestoreResult<()> {
self.finished = true;
self.data.transaction_span.in_scope(|| {
debug!("Transaction has been finished locally without rolling back to be able to retry it again.");
});
Ok(())
}
#[inline]
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
#[inline]
pub fn transaction_data(&self) -> &FirestoreTransactionData {
&self.data
}
pub fn from_data(
db: &'a FirestoreDb,
data: FirestoreTransactionData,
) -> FirestoreTransaction<'a> {
Self {
db,
data,
finished: false,
}
}
#[inline]
pub fn into_data(mut self) -> FirestoreTransactionData {
self.finished = true;
FirestoreTransactionData::new(
self.data.transaction_id.clone(),
self.data.document_path.clone(),
self.data.transaction_span.clone(),
self.data.writes.drain(..).collect(),
)
}
}
impl FirestoreTransactionOps for FirestoreTransaction<'_> {
fn add<I>(&mut self, write: I) -> FirestoreResult<&mut Self>
where
I: TryInto<gcloud_sdk::google::firestore::v1::Write, Error = FirestoreError>,
{
self.data.add(write)?;
Ok(self)
}
fn get_documents_path(&self) -> &String {
self.data.get_documents_path()
}
}
impl<'a> Drop for FirestoreTransaction<'a> {
fn drop(&mut self) {
if !self.finished {
self.data
.transaction_span
.in_scope(|| warn!("Transaction was neither committed nor rolled back."));
}
}
}
impl FirestoreDb {
pub async fn begin_transaction(&self) -> FirestoreResult<FirestoreTransaction<'_>> {
Self::begin_transaction_with_options(self, FirestoreTransactionOptions::new()).await
}
pub async fn begin_transaction_with_options(
&self,
options: FirestoreTransactionOptions,
) -> FirestoreResult<FirestoreTransaction<'_>> {
FirestoreTransaction::new(self, options).await
}
pub async fn run_transaction<T, FN, E>(&self, func: FN) -> FirestoreResult<T>
where
for<'b> FN: Fn(
FirestoreDb,
&'b mut FirestoreTransaction,
) -> BoxFuture<'b, std::result::Result<T, BackoffError<E>>>,
E: std::error::Error + Send + Sync + 'static,
{
self.run_transaction_with_options(func, FirestoreTransactionOptions::new())
.await
}
pub async fn run_transaction_with_options<T, FN, E>(
&self,
func: FN,
options: FirestoreTransactionOptions,
) -> FirestoreResult<T>
where
for<'b> FN: Fn(
FirestoreDb,
&'b mut FirestoreTransaction,
) -> BoxFuture<'b, std::result::Result<T, BackoffError<E>>>,
E: std::error::Error + Send + Sync + 'static,
{
// Perform our initial attempt. If this fails and the backend tells us we can retry,
// we'll try again with exponential backoff using the first attempt's transaction ID.
let (transaction_id, transaction_span, initial_backoff_duration) = {
let mut transaction = self.begin_transaction_with_options(options.clone()).await?;
let transaction_id = transaction.transaction_id().clone();
let transaction_span = transaction.data.transaction_span.clone();
let mut initial_backoff_duration: Option<Duration> = None;
let cdb = self.clone_with_consistency_selector(
FirestoreConsistencySelector::Transaction(transaction_id.clone()),
);
match func(cdb, &mut transaction).await {
Ok(ret_val) => {
match transaction.commit().await {
Ok(_) => return Ok(ret_val),
Err(err) => match err {
FirestoreError::DatabaseError(ref db_err) if db_err.retry_possible => {
transaction_span.in_scope(|| {
warn!(
%err,
"Transient error occurred while committing transaction.",
)
});
// Ignore; we'll try again below
}
other => return Err(other),
},
}
}
Err(err) => match err {
BackoffError::Transient { err, retry_after } => {
transaction_span.in_scope(|| {
warn!(%err, delay = ?retry_after, "Transient error occurred in transaction function. Retrying after the specified delay.");
});
initial_backoff_duration = retry_after;
transaction.finish().ok();
}
BackoffError::Permanent(err) => {
transaction.rollback().await.ok();
return Err(FirestoreError::ErrorInTransaction(
FirestoreErrorInTransaction::new(transaction_id.clone(), Box::new(err)),
));
}
},
}
(transaction_id, transaction_span, initial_backoff_duration)
};
// We failed the first time. Now we must change the transaction mode to signal that we're retrying with the original transaction ID.
let backoff = ExponentialBackoffBuilder::new()
.with_max_elapsed_time(
options
.max_elapsed_time
// Convert to a std `Duration` and clamp any negative durations
.map(|v| v.to_std())
.transpose()?,
)
.with_initial_interval(initial_backoff_duration.unwrap_or(Duration::from_millis(
backoff::default::INITIAL_INTERVAL_MILLIS,
)))
.build();
let retry_result = retry(backoff, || async {
let options = FirestoreTransactionOptions {
mode: FirestoreTransactionMode::ReadWriteRetry(transaction_id.clone()),
..options
};
let mut transaction = self
.begin_transaction_with_options(options)
.await
.map_err(firestore_err_to_backoff)?;
let transaction_id = transaction.transaction_id().clone();
let cdb = self.clone_with_consistency_selector(
FirestoreConsistencySelector::Transaction(transaction_id.clone()),
);
let ret_val = func(cdb, &mut transaction).await.map_err(|backoff_err| {
transaction.finish().ok();
match backoff_err {
BackoffError::Transient { err, retry_after } => {
transaction_span.in_scope(|| {
warn!(%err, delay = ?retry_after, "Transient error occurred in transaction function. Retrying after the specified delay.");
});
let firestore_err = FirestoreError::ErrorInTransaction(
FirestoreErrorInTransaction::new(
transaction_id.clone(),
Box::new(err)
),
);
if let Some(retry_after_duration) = retry_after {
backoff::Error::retry_after(
firestore_err,
retry_after_duration
)
} else {
backoff::Error::transient(firestore_err)
}
}
BackoffError::Permanent(err) => {
backoff::Error::permanent(
FirestoreError::ErrorInTransaction(
FirestoreErrorInTransaction::new(
transaction_id.clone(),
Box::new(err)
),
)
)
}
}
})?;
transaction
.commit()
.await
.map_err(firestore_err_to_backoff)?;
Ok(ret_val)
})
.await;
if let Err(ref err) = retry_result {
transaction_span.in_scope(|| {
error!(
%err,
"Unable to commit transaction. Trying to roll it back.",
)
});
let options = FirestoreTransactionOptions {
mode: FirestoreTransactionMode::ReadWriteRetry(transaction_id.clone()),
..options
};
if let Ok(transaction) = self.begin_transaction_with_options(options).await {
transaction.rollback().await.ok();
}
}
retry_result
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/db/delete.rs | src/db/delete.rs | use crate::db::safe_document_path;
use crate::{FirestoreDb, FirestoreResult, FirestoreWritePrecondition};
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use gcloud_sdk::google::firestore::v1::*;
use tracing::*;
#[async_trait]
pub trait FirestoreDeleteSupport {
async fn delete_by_id<S>(
&self,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<()>
where
S: AsRef<str> + Send;
async fn delete_by_id_at<S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<()>
where
S: AsRef<str> + Send;
}
#[async_trait]
impl FirestoreDeleteSupport for FirestoreDb {
async fn delete_by_id<S>(
&self,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<()>
where
S: AsRef<str> + Send,
{
self.delete_by_id_at(
self.get_documents_path().as_str(),
collection_id,
document_id,
precondition,
)
.await
}
async fn delete_by_id_at<S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<()>
where
S: AsRef<str> + Send,
{
let document_path = safe_document_path(parent, collection_id, document_id.as_ref())?;
let span = span!(
Level::DEBUG,
"Firestore Delete Document",
"/firestore/collection_name" = collection_id,
"/firestore/response_time" = field::Empty,
"/firestore/document_name" = document_path.as_str(),
);
let request = gcloud_sdk::tonic::Request::new(DeleteDocumentRequest {
name: document_path,
current_document: precondition.map(|cond| cond.try_into()).transpose()?,
});
let begin_query_utc: DateTime<Utc> = Utc::now();
self.client().get().delete_document(request).await?;
let end_query_utc: DateTime<Utc> = Utc::now();
let query_duration = end_query_utc.signed_duration_since(begin_query_utc);
span.record(
"/firestore/response_time",
query_duration.num_milliseconds(),
);
span.in_scope(|| {
debug!(
collection_id,
document_id = document_id.as_ref(),
"Deleted a document.",
);
});
Ok(())
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_serde/serializer.rs | src/firestore_serde/serializer.rs | use crate::errors::*;
use crate::{FirestoreError, FirestoreValue};
use gcloud_sdk::google::firestore::v1::value;
use serde::Serialize;
use std::collections::HashMap;
pub struct FirestoreValueSerializer {
pub none_as_null: bool,
}
impl FirestoreValueSerializer {
pub fn new() -> Self {
Self {
none_as_null: false,
}
}
}
pub struct SerializeVec {
pub none_as_null: bool,
pub vec: Vec<gcloud_sdk::google::firestore::v1::Value>,
}
pub struct SerializeTupleVariant {
none_as_null: bool,
name: String,
vec: Vec<gcloud_sdk::google::firestore::v1::Value>,
}
pub struct SerializeMap {
none_as_null: bool,
fields: HashMap<String, gcloud_sdk::google::firestore::v1::Value>,
next_key: Option<String>,
}
pub struct SerializeStructVariant {
none_as_null: bool,
name: String,
fields: HashMap<String, gcloud_sdk::google::firestore::v1::Value>,
}
impl serde::Serializer for FirestoreValueSerializer {
type Ok = FirestoreValue;
type Error = FirestoreError;
type SerializeSeq = SerializeVec;
type SerializeTuple = SerializeVec;
type SerializeTupleStruct = SerializeVec;
type SerializeTupleVariant = SerializeTupleVariant;
type SerializeMap = SerializeMap;
type SerializeStruct = SerializeMap;
type SerializeStructVariant = SerializeStructVariant;
fn serialize_bool(self, v: bool) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::BooleanValue(v)),
},
))
}
fn serialize_i8(self, v: i8) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::IntegerValue(v.into())),
},
))
}
fn serialize_i16(self, v: i16) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::IntegerValue(v.into())),
},
))
}
fn serialize_i32(self, v: i32) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::IntegerValue(v.into())),
},
))
}
fn serialize_i64(self, v: i64) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::IntegerValue(v)),
},
))
}
fn serialize_u8(self, v: u8) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::IntegerValue(v.into())),
},
))
}
fn serialize_u16(self, v: u16) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::IntegerValue(v.into())),
},
))
}
fn serialize_u32(self, v: u32) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::IntegerValue(v.into())),
},
))
}
fn serialize_u64(self, v: u64) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::IntegerValue(v as i64)),
},
))
}
fn serialize_f32(self, v: f32) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::DoubleValue(v.into())),
},
))
}
fn serialize_f64(self, v: f64) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::DoubleValue(v)),
},
))
}
fn serialize_char(self, v: char) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::StringValue(v.to_string())),
},
))
}
fn serialize_str(self, v: &str) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::StringValue(v.to_string())),
},
))
}
fn serialize_bytes(self, v: &[u8]) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::BytesValue(v.into())),
},
))
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
if self.none_as_null {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::NullValue(0)),
},
))
} else {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value { value_type: None },
))
}
}
fn serialize_some<T: ?Sized + Serialize>(self, value: &T) -> Result<Self::Ok, Self::Error> {
value.serialize(self)
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value { value_type: None },
))
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
self.serialize_unit()
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
self.serialize_str(variant)
}
fn serialize_newtype_struct<T: ?Sized + Serialize>(
self,
name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error> {
match name {
crate::firestore_serde::timestamp_serializers::FIRESTORE_TS_TYPE_TAG_TYPE => {
crate::firestore_serde::timestamp_serializers::serialize_timestamp_for_firestore(
value, false,
)
}
crate::firestore_serde::timestamp_serializers::FIRESTORE_TS_NULL_TYPE_TAG_TYPE => {
crate::firestore_serde::timestamp_serializers::serialize_timestamp_for_firestore(
value, true,
)
}
crate::firestore_serde::null_serializers::FIRESTORE_NULL_TYPE_TAG_TYPE => {
value.serialize(Self { none_as_null: true })
}
crate::firestore_serde::latlng_serializers::FIRESTORE_LATLNG_TYPE_TAG_TYPE => {
crate::firestore_serde::latlng_serializers::serialize_latlng_for_firestore(value)
}
crate::firestore_serde::reference_serializers::FIRESTORE_REFERENCE_TYPE_TAG_TYPE => {
crate::firestore_serde::reference_serializers::serialize_reference_for_firestore(
value, false,
)
}
crate::firestore_serde::vector_serializers::FIRESTORE_VECTOR_TYPE_TAG_TYPE => {
crate::firestore_serde::vector_serializers::serialize_vector_for_firestore(
self, value,
)
}
_ => value.serialize(self),
}
}
fn serialize_newtype_variant<T: ?Sized + Serialize>(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error> {
let mut fields = HashMap::new();
fields.insert(String::from(variant), value.serialize(self)?.value);
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::MapValue(
gcloud_sdk::google::firestore::v1::MapValue { fields },
)),
},
))
}
fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Ok(SerializeVec {
none_as_null: self.none_as_null,
vec: Vec::with_capacity(len.unwrap_or(0)),
})
}
fn serialize_tuple(self, len: usize) -> Result<Self::SerializeTuple, Self::Error> {
self.serialize_seq(Some(len))
}
fn serialize_tuple_struct(
self,
_name: &'static str,
len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
self.serialize_seq(Some(len))
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Ok(SerializeTupleVariant {
none_as_null: self.none_as_null,
name: String::from(variant),
vec: Vec::with_capacity(len),
})
}
fn serialize_map(self, len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Ok(SerializeMap {
none_as_null: self.none_as_null,
fields: HashMap::with_capacity(len.unwrap_or(0)),
next_key: None,
})
}
fn serialize_struct(
self,
_name: &'static str,
len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
self.serialize_map(Some(len))
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Ok(SerializeStructVariant {
none_as_null: self.none_as_null,
name: String::from(variant),
fields: HashMap::with_capacity(len),
})
}
}
impl serde::ser::SerializeSeq for SerializeVec {
type Ok = FirestoreValue;
type Error = FirestoreError;
fn serialize_element<T: ?Sized + Serialize>(&mut self, value: &T) -> Result<(), Self::Error> {
let serialized_value = value
.serialize(FirestoreValueSerializer {
none_as_null: self.none_as_null,
})?
.value;
if serialized_value.value_type.is_some() {
self.vec.push(serialized_value);
}
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::ArrayValue(
gcloud_sdk::google::firestore::v1::ArrayValue { values: self.vec },
)),
},
))
}
}
impl serde::ser::SerializeTuple for SerializeVec {
type Ok = FirestoreValue;
type Error = FirestoreError;
fn serialize_element<T: ?Sized + Serialize>(&mut self, value: &T) -> Result<(), Self::Error> {
serde::ser::SerializeSeq::serialize_element(self, value)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
serde::ser::SerializeSeq::end(self)
}
}
impl serde::ser::SerializeTupleStruct for SerializeVec {
type Ok = FirestoreValue;
type Error = FirestoreError;
fn serialize_field<T: ?Sized + Serialize>(&mut self, value: &T) -> Result<(), Self::Error> {
serde::ser::SerializeSeq::serialize_element(self, value)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
serde::ser::SerializeSeq::end(self)
}
}
impl serde::ser::SerializeTupleVariant for SerializeTupleVariant {
type Ok = FirestoreValue;
type Error = FirestoreError;
fn serialize_field<T: ?Sized + Serialize>(&mut self, value: &T) -> Result<(), Self::Error> {
let serialized_value = value
.serialize(FirestoreValueSerializer {
none_as_null: self.none_as_null,
})?
.value;
if serialized_value.value_type.is_some() {
self.vec.push(serialized_value)
};
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
let mut fields = HashMap::new();
fields.insert(
self.name,
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::ArrayValue(
gcloud_sdk::google::firestore::v1::ArrayValue { values: self.vec },
)),
},
);
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::MapValue(
gcloud_sdk::google::firestore::v1::MapValue { fields },
)),
},
))
}
}
impl serde::ser::SerializeMap for SerializeMap {
type Ok = FirestoreValue;
type Error = FirestoreError;
fn serialize_key<T: ?Sized + Serialize>(&mut self, key: &T) -> Result<(), Self::Error> {
let serializer = FirestoreValueSerializer {
none_as_null: self.none_as_null,
};
match key.serialize(serializer)?.value.value_type {
Some(value::ValueType::StringValue(str)) => {
self.next_key = Some(str);
Ok(())
}
Some(value::ValueType::IntegerValue(num)) => {
self.next_key = Some(num.to_string());
Ok(())
}
_ => Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message("Map key should be a string format"),
)),
}
}
fn serialize_value<T: ?Sized + Serialize>(&mut self, value: &T) -> Result<(), Self::Error> {
match self.next_key.take() {
Some(key) => {
let serializer = FirestoreValueSerializer {
none_as_null: self.none_as_null,
};
let serialized_value = value.serialize(serializer)?.value;
if serialized_value.value_type.is_some() {
self.fields.insert(key, serialized_value);
}
Ok(())
}
None => Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message("Unexpected map value without key"),
)),
}
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::MapValue(
gcloud_sdk::google::firestore::v1::MapValue {
fields: self.fields,
},
)),
},
))
}
}
impl serde::ser::SerializeStruct for SerializeMap {
type Ok = FirestoreValue;
type Error = FirestoreError;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
key: &'static str,
value: &T,
) -> Result<(), Self::Error> {
let serializer = FirestoreValueSerializer {
none_as_null: self.none_as_null,
};
let serialized_value = value.serialize(serializer)?.value;
if serialized_value.value_type.is_some() {
self.fields.insert(key.to_string(), serialized_value);
}
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::MapValue(
gcloud_sdk::google::firestore::v1::MapValue {
fields: self.fields,
},
)),
},
))
}
}
impl serde::ser::SerializeStructVariant for SerializeStructVariant {
type Ok = FirestoreValue;
type Error = FirestoreError;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
key: &'static str,
value: &T,
) -> Result<(), Self::Error> {
let serializer = FirestoreValueSerializer {
none_as_null: self.none_as_null,
};
let serialized_value = value.serialize(serializer)?.value;
if serialized_value.value_type.is_some() {
self.fields.insert(key.to_string(), serialized_value);
}
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
let mut object = HashMap::new();
object.insert(
self.name,
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::MapValue(
gcloud_sdk::google::firestore::v1::MapValue {
fields: self.fields,
},
)),
},
);
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::MapValue(
gcloud_sdk::google::firestore::v1::MapValue { fields: object },
)),
},
))
}
}
pub fn firestore_document_from_serializable<S, T>(
document_path: S,
object: &T,
) -> Result<gcloud_sdk::google::firestore::v1::Document, FirestoreError>
where
S: AsRef<str>,
T: Serialize,
{
let serializer = crate::firestore_serde::serializer::FirestoreValueSerializer {
none_as_null: false,
};
let document_value = object.serialize(serializer).map_err(|err| match err {
FirestoreError::SerializeError(e) => {
FirestoreError::SerializeError(e.with_document_path(document_path.as_ref().to_string()))
}
_ => err,
})?;
match document_value.value.value_type {
Some(value::ValueType::MapValue(mv)) => Ok(gcloud_sdk::google::firestore::v1::Document {
fields: mv.fields,
name: document_path.as_ref().to_string(),
..Default::default()
}),
_ => Err(FirestoreError::SystemError(FirestoreSystemError::new(
FirestoreErrorPublicGenericDetails::new("SystemError".into()),
"Unable to create document from value. No object found".into(),
))),
}
}
pub fn firestore_document_from_map<S, I, IS>(
document_path: S,
fields: I,
) -> Result<gcloud_sdk::google::firestore::v1::Document, FirestoreError>
where
S: AsRef<str>,
I: IntoIterator<Item = (IS, FirestoreValue)>,
IS: AsRef<str>,
{
let fields_map: HashMap<String, gcloud_sdk::google::firestore::v1::Value> = fields
.into_iter()
.map(|(k, v)| (k.as_ref().to_string(), v.value))
.collect();
Ok(gcloud_sdk::google::firestore::v1::Document {
fields: fields_map,
name: document_path.as_ref().to_string(),
..Default::default()
})
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_serde/reference_serializers.rs | src/firestore_serde/reference_serializers.rs | use gcloud_sdk::google::firestore::v1::value;
use serde::{Deserialize, Serialize, Serializer};
use crate::db::split_document_path;
use crate::errors::*;
use crate::FirestoreValue;
pub(crate) const FIRESTORE_REFERENCE_TYPE_TAG_TYPE: &str = "FirestoreReference";
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash, Default)]
pub struct FirestoreReference(pub String);
impl FirestoreReference {
/// Creates a new reference
pub fn new(reference: String) -> Self {
FirestoreReference(reference)
}
/// Returns the reference as a string
pub fn as_str(&self) -> &str {
&self.0
}
/// Splits the reference into parent path, collection name and document id
/// Returns (parent_path, collection_name, document_id)
pub fn split(&self, document_path: &str) -> (Option<String>, String, String) {
let (parent_raw_path, document_id) = split_document_path(self.as_str());
let parent_path = parent_raw_path.replace(format!("{document_path}/").as_str(), "");
let split_pos = parent_path.rfind('/').map(|pos| pos + 1).unwrap_or(0);
if split_pos == 0 {
(None, parent_path, document_id.to_string())
} else {
(
Some(parent_path[..split_pos - 1].to_string()),
parent_path[split_pos..].to_string(),
document_id.to_string(),
)
}
}
}
pub mod serialize_as_reference {
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S>(str: &String, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_newtype_struct(
crate::firestore_serde::FIRESTORE_REFERENCE_TYPE_TAG_TYPE,
&str,
)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<String, D::Error>
where
D: Deserializer<'de>,
{
String::deserialize(deserializer)
}
}
pub fn serialize_reference_for_firestore<T: ?Sized + Serialize>(
value: &T,
none_as_null: bool,
) -> Result<FirestoreValue, FirestoreError> {
struct ReferenceSerializer {
none_as_null: bool,
}
impl Serializer for ReferenceSerializer {
type Ok = FirestoreValue;
type Error = FirestoreError;
type SerializeSeq = crate::firestore_serde::serializer::SerializeVec;
type SerializeTuple = crate::firestore_serde::serializer::SerializeVec;
type SerializeTupleStruct = crate::firestore_serde::serializer::SerializeVec;
type SerializeTupleVariant = crate::firestore_serde::serializer::SerializeTupleVariant;
type SerializeMap = crate::firestore_serde::serializer::SerializeMap;
type SerializeStruct = crate::firestore_serde::serializer::SerializeMap;
type SerializeStructVariant = crate::firestore_serde::serializer::SerializeStructVariant;
fn serialize_bool(self, _v: bool) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_i8(self, _v: i8) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_i16(self, _v: i16) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_i32(self, _v: i32) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_i64(self, _v: i64) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_u8(self, _v: u8) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_u16(self, _v: u16) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_u32(self, _v: u32) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_u64(self, _v: u64) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_f32(self, _v: f32) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_f64(self, _v: f64) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_char(self, _v: char) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_str(self, v: &str) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::ReferenceValue(v.to_string())),
},
))
}
fn serialize_bytes(self, _v: &[u8]) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
if self.none_as_null {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::NullValue(0)),
},
))
} else {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value { value_type: None },
))
}
}
fn serialize_some<T: ?Sized + Serialize>(self, value: &T) -> Result<Self::Ok, Self::Error> {
value.serialize(self)
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value { value_type: None },
))
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
self.serialize_unit()
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
self.serialize_str(variant)
}
fn serialize_newtype_struct<T: ?Sized + Serialize>(
self,
_name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error> {
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized + Serialize>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Reference serializer doesn't support this type",
),
))
}
}
value.serialize(ReferenceSerializer { none_as_null })
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_reference_split() {
let reference = FirestoreReference::new(
"projects/test-project/databases/(default)/documents/test-collection/test-document-id/child-collection/child-document-id"
.to_string(),
);
let (parent_path, collection_name, document_id) =
reference.split("projects/test-project/databases/(default)/documents");
assert_eq!(
parent_path,
Some("test-collection/test-document-id".to_string())
);
assert_eq!(collection_name, "child-collection");
assert_eq!(document_id, "child-document-id");
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_serde/mod.rs | src/firestore_serde/mod.rs | //! Provides custom Serde serializers and deserializers for converting between
//! Rust types and Firestore's native data representation.
//!
//! This module is central to enabling idiomatic Rust struct usage with Firestore.
//! It handles the mapping of Rust types (like `String`, `i64`, `bool`, `Vec`, `HashMap`,
//! and custom structs) to Firestore's `Value` protobuf message, and vice-versa.
//!
//! Key components:
//! - [`FirestoreValueSerializer`](serializer::FirestoreValueSerializer): Implements `serde::Serializer`
//! to convert Rust types into [`FirestoreValue`](crate::FirestoreValue).
//! - [`FirestoreValueDeserializer`](deserializer::FirestoreValueDeserializer): Implements `serde::Deserializer`
//! to convert [`FirestoreValue`](crate::FirestoreValue) back into Rust types.
//! - Helper modules (e.g., `timestamp_serializers`, `latlng_serializers`) provide
//! `#[serde(with = "...")]` compatible functions for specific Firestore types like
//! Timestamps and GeoPoints.
//!
//! The primary public functions re-exported here are:
//! - [`firestore_document_to_serializable`]: Deserializes a Firestore document into a Rust struct.
//! - [`firestore_document_from_serializable`]: Serializes a Rust struct into a Firestore document.
//! - [`firestore_document_from_map`]: Creates a Firestore document from a map of field names to `FirestoreValue`s.
//!
//! Additionally, a generic `From<T> for FirestoreValue where T: serde::Serialize`
//! implementation is provided, allowing easy conversion of any serializable Rust type
//! into a `FirestoreValue`.
mod deserializer;
mod serializer;
/// Provides `#[serde(with = "...")]` serializers and deserializers for Firestore Timestamps
/// (converting between `chrono::DateTime<Utc>` and `google::protobuf::Timestamp`).
mod timestamp_serializers;
pub use timestamp_serializers::*;
/// Provides `#[serde(with = "...")]` serializers and deserializers for Firestore Null values,
/// particularly for handling `Option<T>` where `None` maps to a NullValue.
mod null_serializers;
pub use null_serializers::*;
/// Provides `#[serde(with = "...")]` serializers and deserializers for Firestore GeoPoint values
/// (converting between a suitable Rust type like a struct with `latitude` and `longitude`
/// fields and `google::type::LatLng`).
mod latlng_serializers;
pub use latlng_serializers::*;
/// Provides `#[serde(with = "...")]` serializers and deserializers for Firestore DocumentReference values
/// (converting between `String` or a custom `FirestoreReference` type and Firestore's reference format).
mod reference_serializers;
pub use reference_serializers::*;
/// Provides `#[serde(with = "...")]` serializers and deserializers for Firestore Vector values.
mod vector_serializers;
pub use vector_serializers::*;
use crate::FirestoreValue;
use gcloud_sdk::google::firestore::v1::Value;
pub use deserializer::firestore_document_to_serializable;
pub use serializer::firestore_document_from_map;
pub use serializer::firestore_document_from_serializable;
/// Generic conversion from any `serde::Serialize` type into a [`FirestoreValue`].
///
/// This allows for convenient creation of `FirestoreValue` instances from various Rust types
/// using `.into()`. If serialization fails (which is rare for well-behaved `Serialize`
/// implementations), it defaults to a `FirestoreValue` representing a "null" or empty value.
///
/// # Examples
/// ```rust
/// use firestore::FirestoreValue;
///
/// let fv_string: FirestoreValue = "hello".into();
/// let fv_int: FirestoreValue = 42.into();
/// let fv_bool: FirestoreValue = true.into();
///
/// // Assuming MyStruct implements serde::Serialize
/// // struct MyStruct { field: String }
/// // let my_struct = MyStruct { field: "test".to_string() };
/// // let fv_struct: FirestoreValue = my_struct.into();
/// ```
impl<T> std::convert::From<T> for FirestoreValue
where
T: serde::Serialize,
{
fn from(value: T) -> Self {
let serializer = crate::firestore_serde::serializer::FirestoreValueSerializer::new();
value.serialize(serializer).unwrap_or_else(|_err| {
// It's generally better to panic or return a Result here if serialization
// is critical and failure indicates a programming error.
// However, matching existing behavior of defaulting to None/Null.
// Consider logging the error: eprintln!("Failed to serialize to FirestoreValue: {}", err);
FirestoreValue::from(Value { value_type: None })
})
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_serde/vector_serializers.rs | src/firestore_serde/vector_serializers.rs | use crate::errors::FirestoreError;
use crate::firestore_serde::serializer::FirestoreValueSerializer;
use crate::FirestoreValue;
use serde::de::{MapAccess, Visitor};
use serde::{Deserializer, Serialize};
pub(crate) const FIRESTORE_VECTOR_TYPE_TAG_TYPE: &str = "FirestoreVector";
#[derive(Serialize, Clone, Debug, PartialEq, PartialOrd, Default)]
pub struct FirestoreVector(pub Vec<f64>);
impl FirestoreVector {
pub fn new(vec: Vec<f64>) -> Self {
FirestoreVector(vec)
}
pub fn into_vec(self) -> Vec<f64> {
self.0
}
pub fn as_vec(&self) -> &Vec<f64> {
&self.0
}
}
impl From<FirestoreVector> for Vec<f64> {
fn from(val: FirestoreVector) -> Self {
val.into_vec()
}
}
impl<I> From<I> for FirestoreVector
where
I: IntoIterator<Item = f64>,
{
fn from(vec: I) -> Self {
FirestoreVector(vec.into_iter().collect())
}
}
pub fn serialize_vector_for_firestore<T: ?Sized + Serialize>(
firestore_value_serializer: FirestoreValueSerializer,
value: &T,
) -> Result<FirestoreValue, FirestoreError> {
let value_with_array = value.serialize(firestore_value_serializer)?;
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(gcloud_sdk::google::firestore::v1::value::ValueType::MapValue(
gcloud_sdk::google::firestore::v1::MapValue {
fields: vec![
(
"__type__".to_string(),
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(gcloud_sdk::google::firestore::v1::value::ValueType::StringValue(
"__vector__".to_string()
)),
}
),
(
"value".to_string(),
value_with_array.value
)].into_iter().collect()
}
))
}),
)
}
struct FirestoreVectorVisitor;
impl<'de> Visitor<'de> for FirestoreVectorVisitor {
type Value = FirestoreVector;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a FirestoreVector")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
let mut vec = Vec::new();
while let Some(value) = seq.next_element()? {
vec.push(value);
}
Ok(FirestoreVector(vec))
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
while let Some(field) = map.next_key::<String>()? {
match field.as_str() {
"__type__" => {
let value = map.next_value::<String>()?;
if value != "__vector__" {
return Err(serde::de::Error::custom(
"Expected __vector__ for FirestoreVector",
));
}
}
"value" => {
let value = map.next_value::<Vec<f64>>()?;
return Ok(FirestoreVector(value));
}
_ => {
return Err(serde::de::Error::custom(
"Unknown field for FirestoreVector",
));
}
}
}
Err(serde::de::Error::custom(
"Unknown structure for FirestoreVector",
))
}
}
impl<'de> serde::Deserialize<'de> for FirestoreVector {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(FirestoreVectorVisitor)
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_serde/timestamp_serializers.rs | src/firestore_serde/timestamp_serializers.rs | use chrono::prelude::*;
use gcloud_sdk::google::firestore::v1::value;
use serde::{Deserialize, Serialize, Serializer};
use crate::{
errors::FirestoreSerializationError, timestamp_utils::to_timestamp, FirestoreError,
FirestoreValue,
};
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, PartialOrd, Default)]
pub struct FirestoreTimestamp(pub DateTime<Utc>);
impl From<DateTime<Utc>> for FirestoreTimestamp {
fn from(dt: DateTime<Utc>) -> Self {
FirestoreTimestamp(dt)
}
}
pub(crate) const FIRESTORE_TS_TYPE_TAG_TYPE: &str = "FirestoreTimestamp";
pub(crate) const FIRESTORE_TS_NULL_TYPE_TAG_TYPE: &str = "FirestoreTimestampAsNull";
pub mod serialize_as_timestamp {
use chrono::{DateTime, Utc};
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S>(date: &DateTime<Utc>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer
.serialize_newtype_struct(crate::firestore_serde::FIRESTORE_TS_TYPE_TAG_TYPE, &date)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<DateTime<Utc>, D::Error>
where
D: Deserializer<'de>,
{
DateTime::<Utc>::deserialize(deserializer)
}
}
pub mod serialize_as_optional_timestamp {
use chrono::{DateTime, Utc};
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S>(date: &Option<DateTime<Utc>>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match date {
Some(v) => serializer
.serialize_newtype_struct(crate::firestore_serde::FIRESTORE_TS_TYPE_TAG_TYPE, v),
None => serializer.serialize_none(),
}
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<DateTime<Utc>>, D::Error>
where
D: Deserializer<'de>,
{
Option::<DateTime<Utc>>::deserialize(deserializer)
}
}
pub mod serialize_as_null_timestamp {
use chrono::{DateTime, Utc};
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S>(date: &Option<DateTime<Utc>>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_newtype_struct(
crate::firestore_serde::FIRESTORE_TS_NULL_TYPE_TAG_TYPE,
date,
)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<DateTime<Utc>>, D::Error>
where
D: Deserializer<'de>,
{
Option::<DateTime<Utc>>::deserialize(deserializer)
}
}
pub fn serialize_timestamp_for_firestore<T: ?Sized + Serialize>(
value: &T,
none_as_null: bool,
) -> Result<FirestoreValue, FirestoreError> {
struct TimestampSerializer {
none_as_null: bool,
}
impl Serializer for TimestampSerializer {
type Ok = FirestoreValue;
type Error = FirestoreError;
type SerializeSeq = crate::firestore_serde::serializer::SerializeVec;
type SerializeTuple = crate::firestore_serde::serializer::SerializeVec;
type SerializeTupleStruct = crate::firestore_serde::serializer::SerializeVec;
type SerializeTupleVariant = crate::firestore_serde::serializer::SerializeTupleVariant;
type SerializeMap = crate::firestore_serde::serializer::SerializeMap;
type SerializeStruct = crate::firestore_serde::serializer::SerializeMap;
type SerializeStructVariant = crate::firestore_serde::serializer::SerializeStructVariant;
fn serialize_bool(self, _v: bool) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_i8(self, _v: i8) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_i16(self, _v: i16) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_i32(self, _v: i32) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_i64(self, _v: i64) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_u8(self, _v: u8) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_u16(self, _v: u16) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_u32(self, _v: u32) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_u64(self, _v: u64) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_f32(self, _v: f32) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_f64(self, _v: f64) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_char(self, _v: char) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_str(self, v: &str) -> Result<Self::Ok, Self::Error> {
let dt = v.parse::<DateTime<Utc>>()?;
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::TimestampValue(to_timestamp(dt))),
},
))
}
fn serialize_bytes(self, _v: &[u8]) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
if self.none_as_null {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::NullValue(0)),
},
))
} else {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value { value_type: None },
))
}
}
fn serialize_some<T: ?Sized + Serialize>(self, value: &T) -> Result<Self::Ok, Self::Error> {
value.serialize(self)
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value { value_type: None },
))
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
self.serialize_unit()
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
self.serialize_str(variant)
}
fn serialize_newtype_struct<T: ?Sized + Serialize>(
self,
_name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error> {
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized + Serialize>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"Timestamp serializer doesn't support this type",
),
))
}
}
value.serialize(TimestampSerializer { none_as_null })
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_serde/deserializer.rs | src/firestore_serde/deserializer.rs | use crate::errors::FirestoreSerializationError;
use crate::timestamp_utils::from_timestamp;
use crate::{FirestoreError, FirestoreValue};
use gcloud_sdk::google::firestore::v1::value;
use serde::de::{DeserializeSeed, Visitor};
use serde::Deserialize;
use std::collections::HashMap;
use std::fmt::Formatter;
impl<'de> Deserialize<'de> for FirestoreValue {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<FirestoreValue, D::Error>
where
D: serde::Deserializer<'de>,
{
struct ValueVisitor;
impl<'de> Visitor<'de> for ValueVisitor {
type Value = FirestoreValue;
fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result {
formatter.write_str("any valid value")
}
#[inline]
fn visit_bool<E>(self, value: bool) -> Result<Self::Value, E> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::BooleanValue(value)),
},
))
}
#[inline]
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::IntegerValue(value)),
},
))
}
#[inline]
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::IntegerValue(value as i64)),
},
))
}
#[inline]
fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::DoubleValue(value)),
},
))
}
#[inline]
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
self.visit_string(String::from(value))
}
#[inline]
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::StringValue(value)),
},
))
}
#[inline]
fn visit_none<E>(self) -> Result<Self::Value, E> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value { value_type: None },
))
}
#[inline]
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: serde::Deserializer<'de>,
{
Deserialize::deserialize(deserializer)
}
#[inline]
fn visit_unit<E>(self) -> Result<Self::Value, E> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value { value_type: None },
))
}
#[inline]
fn visit_seq<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where
V: serde::de::SeqAccess<'de>,
{
let mut vec: Vec<gcloud_sdk::google::firestore::v1::Value> = Vec::new();
while let Some(elem) = visitor.next_element::<Self::Value>()? {
vec.push(elem.value);
}
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::ArrayValue(
gcloud_sdk::google::firestore::v1::ArrayValue { values: vec },
)),
},
))
}
fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where
V: serde::de::MapAccess<'de>,
{
let mut values = HashMap::new();
while let Some((key, value)) = visitor.next_entry::<String, Self::Value>()? {
values.insert(key, value.value);
}
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::MapValue(
gcloud_sdk::google::firestore::v1::MapValue { fields: values },
)),
},
))
}
}
deserializer.deserialize_any(ValueVisitor)
}
}
struct FirestoreValueSeqAccess {
iter: std::vec::IntoIter<FirestoreValue>,
}
impl FirestoreValueSeqAccess {
fn new(vec: Vec<gcloud_sdk::google::firestore::v1::Value>) -> Self {
FirestoreValueSeqAccess {
iter: vec
.into_iter()
.map(FirestoreValue::from)
.collect::<Vec<FirestoreValue>>()
.into_iter(),
}
}
}
impl<'de> serde::de::SeqAccess<'de> for FirestoreValueSeqAccess {
type Error = FirestoreError;
fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>, Self::Error>
where
T: DeserializeSeed<'de>,
{
match self.iter.next() {
Some(value) => seed.deserialize(value).map(Some),
None => Ok(None),
}
}
fn size_hint(&self) -> Option<usize> {
match self.iter.size_hint() {
(lower, Some(upper)) if lower == upper => Some(upper),
_ => None,
}
}
}
struct FirestoreValueMapAccess {
iter: <HashMap<String, FirestoreValue> as IntoIterator>::IntoIter,
value: Option<FirestoreValue>,
}
impl FirestoreValueMapAccess {
fn new(map: HashMap<String, gcloud_sdk::google::firestore::v1::Value>) -> Self {
FirestoreValueMapAccess {
iter: map
.into_iter()
.map(|(k, v)| (k, FirestoreValue::from(v)))
.collect::<HashMap<String, FirestoreValue>>()
.into_iter(),
value: None,
}
}
}
impl<'de> serde::de::MapAccess<'de> for FirestoreValueMapAccess {
type Error = FirestoreError;
fn next_key_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>, Self::Error>
where
T: DeserializeSeed<'de>,
{
match self.iter.next() {
Some((key, value)) => {
self.value = Some(value);
seed.deserialize(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::StringValue(key)),
},
))
.map(Some)
}
None => Ok(None),
}
}
fn next_value_seed<T>(&mut self, seed: T) -> Result<T::Value, Self::Error>
where
T: DeserializeSeed<'de>,
{
match self.value.take() {
Some(value) => seed.deserialize(value),
None => Err(serde::de::Error::custom("value is missing")),
}
}
fn size_hint(&self) -> Option<usize> {
match self.iter.size_hint() {
(lower, Some(upper)) if lower == upper => Some(upper),
_ => None,
}
}
}
#[derive(Debug, PartialEq, Clone)]
struct FirestoreVariantAccess {
de: FirestoreValue,
}
impl FirestoreVariantAccess {
fn new(de: FirestoreValue) -> Self {
Self { de }
}
}
impl<'de> serde::de::EnumAccess<'de> for FirestoreVariantAccess {
type Error = FirestoreError;
type Variant = FirestoreValue;
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error>
where
V: DeserializeSeed<'de>,
{
match self.de.value.value_type.clone() {
Some(value::ValueType::MapValue(v)) => {
if let Some((k, v)) = v.fields.iter().next() {
let variant = seed.deserialize(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::StringValue(k.clone())),
},
))?;
Ok((variant, FirestoreValue::from(v.clone())))
} else {
Err(FirestoreError::DeserializeError(
FirestoreSerializationError::from_message(format!(
"Unexpected enum empty map type: {:?}",
self.de.value.value_type
)),
))
}
}
Some(value::ValueType::StringValue(v)) => {
let variant = seed.deserialize(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::StringValue(v)),
},
))?;
Ok((variant, self.de))
}
_ => Err(FirestoreError::DeserializeError(
FirestoreSerializationError::from_message(format!(
"Unexpected enum type: {:?}",
self.de.value.value_type
)),
)),
}
}
}
impl<'de> serde::de::VariantAccess<'de> for FirestoreValue {
type Error = FirestoreError;
fn unit_variant(self) -> Result<(), Self::Error> {
Ok(())
}
fn newtype_variant_seed<T>(self, seed: T) -> Result<T::Value, Self::Error>
where
T: DeserializeSeed<'de>,
{
seed.deserialize(self)
}
fn tuple_variant<V>(self, _len: usize, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.value.value_type {
Some(value::ValueType::ArrayValue(v)) => {
visitor.visit_seq(FirestoreValueSeqAccess::new(v.values))
}
_ => Err(FirestoreError::DeserializeError(
FirestoreSerializationError::from_message(
"Unexpected tuple_variant for variant access",
),
)),
}
}
fn struct_variant<V>(
self,
_fields: &'static [&'static str],
_visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(FirestoreError::DeserializeError(
FirestoreSerializationError::from_message(
"Unexpected struct_variant for variant access",
),
))
}
}
impl<'de> serde::Deserializer<'de> for FirestoreValue {
type Error = FirestoreError;
#[inline]
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.value.value_type {
Some(value::ValueType::NullValue(_)) => visitor.visit_unit(),
Some(value::ValueType::BooleanValue(v)) => visitor.visit_bool(v),
Some(value::ValueType::IntegerValue(v)) => visitor.visit_i64(v),
Some(value::ValueType::StringValue(v)) => visitor.visit_string(v),
Some(value::ValueType::ArrayValue(v)) => {
visitor.visit_seq(FirestoreValueSeqAccess::new(v.values))
}
Some(value::ValueType::MapValue(v)) => {
visitor.visit_map(FirestoreValueMapAccess::new(v.fields))
}
Some(value::ValueType::DoubleValue(v)) => visitor.visit_f64(v),
Some(value::ValueType::BytesValue(ref v)) => visitor.visit_bytes(v),
Some(value::ValueType::ReferenceValue(v)) => visitor.visit_string(v),
Some(value::ValueType::GeoPointValue(v)) => {
let lat_lng_fields: HashMap<String, gcloud_sdk::google::firestore::v1::Value> =
vec![
("latitude".to_string(), gcloud_sdk::google::firestore::v1::Value {
value_type: Some(gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(v.latitude))
}),
("longitude".to_string(),
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(gcloud_sdk::google::firestore::v1::value::ValueType::DoubleValue(v.longitude))
}),
]
.into_iter()
.collect();
visitor.visit_map(FirestoreValueMapAccess::new(lat_lng_fields))
}
Some(value::ValueType::TimestampValue(ts)) => {
visitor.visit_string(from_timestamp(ts)?.to_rfc3339())
}
None => visitor.visit_unit(),
}
}
fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_i8<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_i16<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_i32<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_i64<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_u8<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_u16<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_u32<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_u64<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.value.value_type {
Some(value::ValueType::IntegerValue(v)) => visitor.visit_u64(v as u64),
_ => Err(FirestoreError::DeserializeError(
FirestoreSerializationError::from_message(
"Unexpected field type for u64 deserialization",
),
)),
}
}
fn deserialize_f32<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_f64<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_char<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_string<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.value.value_type {
Some(value::ValueType::NullValue(_)) => visitor.visit_none(),
None => visitor.visit_none(),
_ => visitor.visit_some(self),
}
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit_struct<V>(
self,
_name: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_unit(visitor)
}
fn deserialize_newtype_struct<V>(
self,
_name: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_tuple<V>(self, _len: usize, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_tuple_struct<V>(
self,
_name: &'static str,
_len: usize,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_seq(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_struct<V>(
self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_enum<V>(
self,
_name: &'static str,
_variants: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_enum(FirestoreVariantAccess::new(self))
}
fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
}
pub fn firestore_document_to_serializable<T>(
document: &gcloud_sdk::google::firestore::v1::Document,
) -> Result<T, FirestoreError>
where
for<'de> T: Deserialize<'de>,
{
let mut fields: HashMap<String, gcloud_sdk::google::firestore::v1::Value> =
HashMap::with_capacity(document.fields.len() + 4);
for (k, v) in document.fields.iter() {
fields.insert(k.to_owned(), v.to_owned());
}
let doc_name = document.name.clone();
let doc_id = doc_name
.split('/')
.next_back()
.map(|s| s.to_string())
.unwrap_or_else(|| doc_name.clone());
fields.insert(
"_firestore_id".to_string(),
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::StringValue(doc_id)),
},
);
fields.insert(
"_firestore_full_id".to_string(),
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::StringValue(doc_name.clone())),
},
);
if let Some(created_time) = &document.create_time {
fields.insert(
"_firestore_created".to_string(),
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::TimestampValue(*created_time)),
},
);
}
if let Some(updated_time) = &document.update_time {
fields.insert(
"_firestore_updated".to_string(),
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::TimestampValue(*updated_time)),
},
);
}
let firestore_value = FirestoreValue::from(gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::MapValue(
gcloud_sdk::google::firestore::v1::MapValue { fields },
)),
});
T::deserialize(firestore_value).map_err(|err| match err {
FirestoreError::DeserializeError(e) => {
FirestoreError::DeserializeError(e.with_document_path(doc_name))
}
_ => err,
})
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_serde/latlng_serializers.rs | src/firestore_serde/latlng_serializers.rs | use gcloud_sdk::google::firestore::v1::value;
use serde::{Deserialize, Serialize, Serializer};
use std::collections::HashMap;
use crate::errors::*;
use crate::firestore_serde::serializer::FirestoreValueSerializer;
use crate::FirestoreValue;
pub(crate) const FIRESTORE_LATLNG_TYPE_TAG_TYPE: &str = "FirestoreLatLng";
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, PartialOrd, Default)]
pub struct FirestoreGeoPoint {
pub latitude: f64,
pub longitude: f64,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, PartialOrd, Default)]
pub struct FirestoreLatLng(pub FirestoreGeoPoint);
pub fn serialize_latlng_for_firestore<T: ?Sized + Serialize>(
value: &T,
) -> Result<FirestoreValue, FirestoreError> {
struct LatLngSerializer;
struct SerializeLatLngStruct {
fields: HashMap<String, gcloud_sdk::google::firestore::v1::Value>,
}
impl serde::ser::SerializeStruct for SerializeLatLngStruct {
type Ok = FirestoreValue;
type Error = FirestoreError;
fn serialize_field<T: ?Sized + Serialize>(
&mut self,
key: &'static str,
value: &T,
) -> Result<(), Self::Error> {
let serializer = FirestoreValueSerializer {
none_as_null: false,
};
let serialized_value = value.serialize(serializer)?.value;
if serialized_value.value_type.is_some() {
self.fields.insert(key.to_string(), serialized_value);
}
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
let (lat, lng) = match (self.fields.get("latitude"), self.fields.get("longitude")) {
(
Some(gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::DoubleValue(lat)),
}),
Some(gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::DoubleValue(lng)),
}),
) => (*lat, *lng),
_ => {
return Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't recognize the structure of the object",
),
));
}
};
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value {
value_type: Some(value::ValueType::GeoPointValue(
gcloud_sdk::google::r#type::LatLng {
latitude: lat,
longitude: lng,
},
)),
},
))
}
}
impl Serializer for LatLngSerializer {
type Ok = FirestoreValue;
type Error = FirestoreError;
type SerializeSeq = crate::firestore_serde::serializer::SerializeVec;
type SerializeTuple = crate::firestore_serde::serializer::SerializeVec;
type SerializeTupleStruct = crate::firestore_serde::serializer::SerializeVec;
type SerializeTupleVariant = crate::firestore_serde::serializer::SerializeTupleVariant;
type SerializeMap = crate::firestore_serde::serializer::SerializeMap;
type SerializeStruct = SerializeLatLngStruct;
type SerializeStructVariant = crate::firestore_serde::serializer::SerializeStructVariant;
fn serialize_bool(self, _v: bool) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_i8(self, _v: i8) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_i16(self, _v: i16) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_i32(self, _v: i32) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_i64(self, _v: i64) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_u8(self, _v: u8) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_u16(self, _v: u16) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_u32(self, _v: u32) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_u64(self, _v: u64) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_f32(self, _v: f32) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_f64(self, _v: f64) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_char(self, _v: char) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_str(self, _v: &str) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_bytes(self, _v: &[u8]) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value { value_type: None },
))
}
fn serialize_some<T: ?Sized + Serialize>(self, value: &T) -> Result<Self::Ok, Self::Error> {
value.serialize(self)
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Ok(FirestoreValue::from(
gcloud_sdk::google::firestore::v1::Value { value_type: None },
))
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
self.serialize_unit()
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
self.serialize_str(variant)
}
fn serialize_newtype_struct<T: ?Sized + Serialize>(
self,
_name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error> {
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized + Serialize>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<Self::Ok, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
fn serialize_struct(
self,
_name: &'static str,
len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Ok(SerializeLatLngStruct {
fields: HashMap::with_capacity(len),
})
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(FirestoreError::SerializeError(
FirestoreSerializationError::from_message(
"LatLng serializer doesn't support this type",
),
))
}
}
value.serialize(LatLngSerializer {})
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_serde/null_serializers.rs | src/firestore_serde/null_serializers.rs | pub(crate) const FIRESTORE_NULL_TYPE_TAG_TYPE: &str = "FirestoreNull";
pub mod serialize_as_null {
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub fn serialize<S, T>(date: &Option<T>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: Serialize,
{
serializer
.serialize_newtype_struct(crate::firestore_serde::FIRESTORE_NULL_TYPE_TAG_TYPE, &date)
}
pub fn deserialize<'de, D, T>(deserializer: D) -> Result<Option<T>, D::Error>
where
D: Deserializer<'de>,
T: for<'tde> Deserialize<'tde>,
{
Option::<T>::deserialize(deserializer)
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/fluent_api/select_builder.rs | src/fluent_api/select_builder.rs | //! Builder for constructing Firestore select (query) operations.
//!
//! This module provides a fluent API for building complex queries to retrieve documents
//! from Firestore. It supports filtering, ordering, limiting, pagination (cursors),
//! projections, and fetching documents by ID. It also serves as a base for
//! aggregation queries and real-time listeners.
use crate::errors::FirestoreError;
use crate::select_aggregation_builder::FirestoreAggregationBuilder;
use crate::select_filter_builder::FirestoreQueryFilterBuilder;
use crate::{
FirestoreAggregatedQueryParams, FirestoreAggregatedQuerySupport, FirestoreAggregation,
FirestoreCollectionDocuments, FirestoreExplainOptions, FirestoreFindNearestDistanceMeasure,
FirestoreFindNearestOptions, FirestoreGetByIdSupport, FirestoreListenSupport,
FirestoreListener, FirestoreListenerParams, FirestoreListenerTarget,
FirestoreListenerTargetParams, FirestorePartition, FirestorePartitionQueryParams,
FirestoreQueryCollection, FirestoreQueryCursor, FirestoreQueryFilter, FirestoreQueryOrder,
FirestoreQueryParams, FirestoreQuerySupport, FirestoreResult, FirestoreResumeStateStorage,
FirestoreTargetType, FirestoreVector, FirestoreWithMetadata,
};
use futures::stream::BoxStream;
use gcloud_sdk::google::firestore::v1::Document;
use serde::Deserialize;
use std::collections::HashMap;
use std::marker::PhantomData;
/// The initial builder for a Firestore select/query operation.
///
/// Created by calling [`FirestoreExprBuilder::select()`](crate::FirestoreExprBuilder::select).
/// This builder allows specifying initial query parameters like projections (fields to return)
/// before defining the target collection or document IDs.
#[derive(Clone, Debug)]
pub struct FirestoreSelectInitialBuilder<'a, D>
where
D: FirestoreQuerySupport
+ FirestoreGetByIdSupport
+ FirestoreListenSupport
+ FirestoreAggregatedQuerySupport
+ Clone
+ 'static,
{
db: &'a D,
return_only_fields: Option<Vec<String>>,
}
impl<'a, D> FirestoreSelectInitialBuilder<'a, D>
where
D: FirestoreQuerySupport
+ FirestoreGetByIdSupport
+ FirestoreListenSupport
+ FirestoreAggregatedQuerySupport
+ Clone
+ Send
+ Sync
+ 'static,
{
/// Creates a new `FirestoreSelectInitialBuilder`.
#[inline]
pub(crate) fn new(db: &'a D) -> Self {
Self {
db,
return_only_fields: None,
}
}
/// Specifies which fields of the documents should be returned by the query (projection).
///
/// If not set, all fields are returned.
///
/// # Arguments
/// * `return_only_fields`: An iterator of field paths to return.
///
/// # Returns
/// The builder instance with the projection mask set.
#[inline]
pub fn fields<I>(self, return_only_fields: I) -> Self
where
I: IntoIterator,
I::Item: AsRef<str>,
{
Self {
return_only_fields: Some(
return_only_fields
.into_iter()
.map(|field| field.as_ref().to_string())
.collect(),
),
..self
}
}
/// Specifies the collection or collection group to query documents from.
///
/// # Arguments
/// * `collection`: The ID of the collection or a [`FirestoreQueryCollection`] enum
/// specifying a single collection or a collection group.
///
/// # Returns
/// A [`FirestoreSelectDocBuilder`] to further configure the query (filters, ordering, etc.).
#[inline]
pub fn from<C>(self, collection: C) -> FirestoreSelectDocBuilder<'a, D>
where
C: Into<FirestoreQueryCollection>,
{
let params: FirestoreQueryParams = FirestoreQueryParams::new(collection.into())
.opt_return_only_fields(self.return_only_fields);
FirestoreSelectDocBuilder::new(self.db, params)
}
/// Specifies that documents should be fetched by their IDs from a specific collection.
///
/// # Arguments
/// * `collection`: The ID of the collection where the documents reside.
///
/// # Returns
/// A [`FirestoreSelectByIdBuilder`] to specify the document IDs and other options.
#[inline]
pub fn by_id_in(self, collection: &str) -> FirestoreSelectByIdBuilder<'a, D> {
FirestoreSelectByIdBuilder::new(self.db, collection.to_string(), self.return_only_fields)
}
}
/// A builder for configuring and executing a Firestore query on a collection.
///
/// This builder allows setting filters, ordering, limits, cursors, and other
/// query parameters.
#[derive(Clone, Debug)]
pub struct FirestoreSelectDocBuilder<'a, D>
where
D: FirestoreQuerySupport
+ FirestoreListenSupport
+ FirestoreAggregatedQuerySupport
+ Clone
+ Send
+ Sync,
{
db: &'a D,
params: FirestoreQueryParams,
}
impl<'a, D> FirestoreSelectDocBuilder<'a, D>
where
D: FirestoreQuerySupport
+ FirestoreListenSupport
+ FirestoreAggregatedQuerySupport
+ Clone
+ Send
+ Sync
+ 'static,
{
/// Creates a new `FirestoreSelectDocBuilder`.
#[inline]
pub(crate) fn new(db: &'a D, params: FirestoreQueryParams) -> Self {
Self { db, params }
}
/// Specifies the parent document path for querying a sub-collection.
///
/// # Arguments
/// * `parent`: The full path to the parent document.
///
/// # Returns
/// The builder instance with the parent path set.
#[inline]
pub fn parent<S>(self, parent: S) -> Self
where
S: AsRef<str>,
{
Self {
params: self.params.with_parent(parent.as_ref().to_string()),
..self
}
}
/// Sets the maximum number of documents to return.
///
/// # Arguments
/// * `value`: The limit.
///
/// # Returns
/// The builder instance with the limit set.
#[inline]
pub fn limit(self, value: u32) -> Self {
Self {
params: self.params.with_limit(value),
..self
}
}
/// Sets the number of documents to skip before returning results.
///
/// # Arguments
/// * `value`: The offset.
///
/// # Returns
/// The builder instance with the offset set.
#[inline]
pub fn offset(self, value: u32) -> Self {
Self {
params: self.params.with_offset(value),
..self
}
}
/// Specifies the order in which to sort the query results.
///
/// Can be called multiple times to order by multiple fields.
///
/// # Arguments
/// * `fields`: An iterator of [`FirestoreQueryOrder`] specifying fields and directions.
///
/// # Returns
/// The builder instance with the ordering set.
#[inline]
pub fn order_by<I>(self, fields: I) -> Self
where
I: IntoIterator,
I::Item: Into<FirestoreQueryOrder>,
{
Self {
params: self
.params
.with_order_by(fields.into_iter().map(|field| field.into()).collect()),
..self
}
}
/// Sets the starting point for the query results using a cursor.
///
/// # Arguments
/// * `cursor`: A [`FirestoreQueryCursor`] defining the start point.
///
/// # Returns
/// The builder instance with the start cursor set.
#[inline]
pub fn start_at(self, cursor: FirestoreQueryCursor) -> Self {
Self {
params: self.params.with_start_at(cursor),
..self
}
}
/// Sets the ending point for the query results using a cursor.
///
/// # Arguments
/// * `cursor`: A [`FirestoreQueryCursor`] defining the end point.
///
/// # Returns
/// The builder instance with the end cursor set.
#[inline]
pub fn end_at(self, cursor: FirestoreQueryCursor) -> Self {
Self {
params: self.params.with_end_at(cursor),
..self
}
}
/// Configures the query to search all collections with the specified ID
/// under the parent path (for collection group queries).
///
/// # Returns
/// The builder instance configured for a collection group query.
#[inline]
pub fn all_descendants(self) -> Self {
Self {
params: self.params.with_all_descendants(true),
..self
}
}
/// Applies a filter to the query.
///
/// The `filter` argument is a closure that receives a [`FirestoreQueryFilterBuilder`]
/// and should return an `Option<FirestoreQueryFilter>`.
///
/// # Arguments
/// * `filter`: A closure to build the query filter.
///
/// # Returns
/// The builder instance with the filter applied.
#[inline]
pub fn filter<FN>(self, filter: FN) -> Self
where
FN: Fn(FirestoreQueryFilterBuilder) -> Option<FirestoreQueryFilter>,
{
let filter_builder = FirestoreQueryFilterBuilder::new();
Self {
params: self.params.opt_filter(filter(filter_builder)),
..self
}
}
/// Requests an explanation of the query execution plan from Firestore.
///
/// The explanation metrics will be available in the metadata of the query response.
///
/// # Returns
/// The builder instance with explain options enabled.
#[inline]
pub fn explain(self) -> FirestoreSelectDocBuilder<'a, D> {
Self {
params: self
.params
.with_explain_options(FirestoreExplainOptions::new()), // Default explain options
..self
}
}
/// Configures a vector similarity search (find nearest neighbors).
///
/// # Arguments
/// * `field_name`: The path to the vector field.
/// * `vector`: The query vector.
/// * `measure`: The distance measure to use.
/// * `neighbors_limit`: The maximum number of neighbors to return.
///
/// # Returns
/// The builder instance configured for a vector search.
#[inline]
pub fn find_nearest<F>(
self,
field_name: F,
vector: FirestoreVector,
measure: FirestoreFindNearestDistanceMeasure,
neighbors_limit: u32,
) -> FirestoreSelectDocBuilder<'a, D>
where
F: AsRef<str>,
{
self.find_nearest_with_options(FirestoreFindNearestOptions::new(
field_name.as_ref().to_string(),
vector,
measure,
neighbors_limit,
))
}
/// Configures a vector similarity search with detailed options.
///
/// # Arguments
/// * `options`: [`FirestoreFindNearestOptions`] specifying the vector search parameters.
///
/// # Returns
/// The builder instance configured for a vector search.
#[inline]
pub fn find_nearest_with_options(
self,
options: FirestoreFindNearestOptions,
) -> FirestoreSelectDocBuilder<'a, D> {
Self {
params: self.params.with_find_nearest(options),
..self
}
}
/// Requests an explanation of the query execution plan with specific options.
///
/// # Arguments
/// * `options`: [`FirestoreExplainOptions`] specifying the analysis options.
///
/// # Returns
/// The builder instance with the specified explain options.
#[inline]
pub fn explain_with_options(
self,
options: FirestoreExplainOptions,
) -> FirestoreSelectDocBuilder<'a, D> {
Self {
params: self.params.with_explain_options(options),
..self
}
}
/// Specifies that the query results should be deserialized into a specific Rust type `T`.
///
/// # Type Parameters
/// * `T`: The type to deserialize documents into. Must implement `serde::Deserialize`.
///
/// # Returns
/// A [`FirestoreSelectObjBuilder`] for executing the query and streaming deserialized objects.
#[inline]
pub fn obj<T>(self) -> FirestoreSelectObjBuilder<'a, D, T>
where
T: Send,
for<'de> T: Deserialize<'de>,
{
FirestoreSelectObjBuilder::new(self.db, self.params)
}
/// Configures the query as a partitioned query.
///
/// Partitioned queries are used to divide a large dataset into smaller chunks
/// that can be processed in parallel.
///
/// # Returns
/// A [`FirestorePartitionQueryDocBuilder`] to configure and stream partitions.
#[inline]
pub fn partition_query(self) -> FirestorePartitionQueryDocBuilder<'a, D> {
FirestorePartitionQueryDocBuilder::new(self.db, self.params.with_all_descendants(true))
}
/// Sets up a real-time listener for changes to the documents matching this query.
///
/// # Returns
/// A [`FirestoreDocChangesListenerInitBuilder`] to configure and start the listener.
#[inline]
pub fn listen(self) -> FirestoreDocChangesListenerInitBuilder<'a, D> {
FirestoreDocChangesListenerInitBuilder::new(
self.db,
FirestoreTargetType::Query(self.params),
)
}
/// Specifies aggregations to be performed over the documents matching this query.
///
/// The `aggregation` argument is a closure that receives a [`FirestoreAggregationBuilder`]
/// and should return a `Vec<FirestoreAggregation>`.
///
/// # Arguments
/// * `aggregation`: A closure to build the list of aggregations.
///
/// # Returns
/// A [`FirestoreAggregatedQueryDocBuilder`] to execute the aggregation query.
#[inline]
pub fn aggregate<FN>(self, aggregation: FN) -> FirestoreAggregatedQueryDocBuilder<'a, D>
where
FN: Fn(FirestoreAggregationBuilder) -> Vec<FirestoreAggregation>,
{
FirestoreAggregatedQueryDocBuilder::new(
self.db,
FirestoreAggregatedQueryParams::new(
self.params,
aggregation(FirestoreAggregationBuilder::new()),
),
)
}
/// Executes the configured query and retrieves all matching documents.
///
/// # Returns
/// A `FirestoreResult` containing a `Vec` of [`Document`]s.
pub async fn query(self) -> FirestoreResult<Vec<Document>> {
self.db.query_doc(self.params).await
}
/// Executes the configured query and returns a stream of matching documents.
///
/// Errors encountered during streaming will terminate the stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of [`Document`]s.
pub async fn stream_query<'b>(self) -> FirestoreResult<BoxStream<'b, Document>> {
self.db.stream_query_doc(self.params).await
}
/// Executes the configured query and returns a stream of `FirestoreResult<Document>`.
///
/// Errors encountered during streaming are yielded as `Err` items in the stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `FirestoreResult<Document>`.
pub async fn stream_query_with_errors<'b>(
self,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>> {
self.db.stream_query_doc_with_errors(self.params).await
}
/// Executes the query and returns a stream of documents along with their metadata.
///
/// Errors are yielded as `Err` items in the stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `FirestoreResult<FirestoreWithMetadata<Document>>`.
pub async fn stream_query_with_metadata<'b>(
self,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreWithMetadata<Document>>>> {
self.db.stream_query_doc_with_metadata(self.params).await
}
}
/// A builder for executing a query and deserializing results into a Rust type `T`.
#[derive(Clone, Debug)]
pub struct FirestoreSelectObjBuilder<'a, D, T>
where
D: FirestoreQuerySupport,
T: Send,
for<'de> T: Deserialize<'de>,
{
db: &'a D,
params: FirestoreQueryParams,
_pd: PhantomData<T>,
}
impl<'a, D, T> FirestoreSelectObjBuilder<'a, D, T>
where
D: FirestoreQuerySupport,
T: Send,
for<'de> T: Deserialize<'de>,
{
/// Creates a new `FirestoreSelectObjBuilder`.
pub(crate) fn new(
db: &'a D,
params: FirestoreQueryParams,
) -> FirestoreSelectObjBuilder<'a, D, T> {
Self {
db,
params,
_pd: PhantomData,
}
}
/// Executes the query and deserializes all matching documents into a `Vec<T>`.
///
/// # Returns
/// A `FirestoreResult` containing a `Vec<T>`.
pub async fn query(self) -> FirestoreResult<Vec<T>> {
self.db.query_obj(self.params).await
}
/// Executes the query and returns a stream of deserialized objects `T`.
///
/// Errors during streaming or deserialization will terminate the stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `T`.
pub async fn stream_query<'b>(self) -> FirestoreResult<BoxStream<'b, T>>
where
T: 'b,
{
self.db.stream_query_obj(self.params).await
}
/// Executes the query and returns a stream of `FirestoreResult<T>`.
///
/// Errors during streaming or deserialization are yielded as `Err` items.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `FirestoreResult<T>`.
pub async fn stream_query_with_errors<'b>(
self,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
T: 'b,
{
self.db.stream_query_obj_with_errors(self.params).await
}
/// Executes the query and returns a stream of deserialized objects `T` along with their metadata.
///
/// Errors are yielded as `Err` items in the stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `FirestoreResult<FirestoreWithMetadata<T>>`.
pub async fn stream_query_with_metadata<'b>(
self,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreWithMetadata<T>>>>
where
T: 'b,
{
self.db.stream_query_obj_with_metadata(self.params).await
}
/// Configures the query as a partitioned query for deserialized objects.
///
/// # Returns
/// A [`FirestorePartitionQueryObjBuilder`] to configure and stream partitions of `T`.
pub fn partition_query(self) -> FirestorePartitionQueryObjBuilder<'a, D, T>
where
T: 'a, // Ensure T lives as long as the builder
{
FirestorePartitionQueryObjBuilder::new(self.db, self.params.with_all_descendants(true))
}
}
/// A builder for selecting documents by their IDs from a collection.
#[derive(Clone, Debug)]
pub struct FirestoreSelectByIdBuilder<'a, D>
where
D: FirestoreGetByIdSupport,
{
db: &'a D,
collection: String,
parent: Option<String>,
return_only_fields: Option<Vec<String>>,
}
impl<'a, D> FirestoreSelectByIdBuilder<'a, D>
where
D: FirestoreGetByIdSupport + FirestoreListenSupport + Send + Sync + Clone + 'static,
{
/// Creates a new `FirestoreSelectByIdBuilder`.
pub(crate) fn new(
db: &'a D,
collection: String,
return_only_fields: Option<Vec<String>>,
) -> FirestoreSelectByIdBuilder<'a, D> {
Self {
db,
collection,
parent: None,
return_only_fields,
}
}
/// Specifies the parent document path for selecting documents from a sub-collection.
///
/// # Arguments
/// * `parent`: The full path to the parent document.
///
/// # Returns
/// The builder instance with the parent path set.
#[inline]
pub fn parent<S>(self, parent: S) -> Self
where
S: AsRef<str>,
{
Self {
parent: Some(parent.as_ref().to_string()),
..self
}
}
/// Specifies that the fetched documents should be deserialized into a specific Rust type `T`.
///
/// # Type Parameters
/// * `T`: The type to deserialize documents into. Must implement `serde::Deserialize`.
///
/// # Returns
/// A [`FirestoreSelectObjByIdBuilder`] for fetching and deserializing documents by ID.
#[inline]
pub fn obj<T>(self) -> FirestoreSelectObjByIdBuilder<'a, D, T>
where
T: Send,
for<'de> T: Deserialize<'de>,
{
FirestoreSelectObjByIdBuilder::new(
self.db,
self.collection,
self.parent,
self.return_only_fields,
)
}
/// Fetches a single document by its ID.
///
/// # Arguments
/// * `document_id`: The ID of the document to fetch.
///
/// # Returns
/// A `FirestoreResult` containing an `Option<Document>`. `None` if the document doesn't exist.
pub async fn one<S>(self, document_id: S) -> FirestoreResult<Option<Document>>
where
S: AsRef<str> + Send,
{
if let Some(parent) = self.parent {
match self
.db
.get_doc_at::<S>(
parent.as_str(),
self.collection.as_str(),
document_id,
self.return_only_fields,
)
.await
{
Ok(doc) => Ok(Some(doc)),
Err(err) => match err {
FirestoreError::DataNotFoundError(_) => Ok(None),
_ => Err(err),
},
}
} else {
match self
.db
.get_doc::<S>(
self.collection.as_str(),
document_id,
self.return_only_fields,
)
.await
{
Ok(doc) => Ok(Some(doc)),
Err(err) => match err {
FirestoreError::DataNotFoundError(_) => Ok(None),
_ => Err(err),
},
}
}
}
/// Fetches multiple documents by their IDs in a batch.
///
/// Returns a stream of `(String, Option<Document>)` tuples, where the string is the document ID.
/// Errors during fetching will terminate the stream.
///
/// # Arguments
/// * `document_ids`: An iterator of document IDs to fetch.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `(String, Option<Document>)`.
pub async fn batch<S, I>(
self,
document_ids: I,
) -> FirestoreResult<BoxStream<'a, (String, Option<Document>)>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
if let Some(parent) = self.parent {
self.db
.batch_stream_get_docs_at::<S, I>(
parent.as_str(),
self.collection.as_str(),
document_ids,
self.return_only_fields,
)
.await
} else {
self.db
.batch_stream_get_docs::<S, I>(
self.collection.as_str(),
document_ids,
self.return_only_fields,
)
.await
}
}
/// Fetches multiple documents by their IDs in a batch, yielding `FirestoreResult` for each.
///
/// Errors during fetching for individual documents are yielded as `Err` items in the stream.
///
/// # Arguments
/// * `document_ids`: An iterator of document IDs to fetch.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `FirestoreResult<(String, Option<Document>)>`.
pub async fn batch_with_errors<S, I>(
self,
document_ids: I,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(String, Option<Document>)>>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
if let Some(parent) = self.parent {
self.db
.batch_stream_get_docs_at_with_errors::<S, I>(
parent.as_str(),
self.collection.as_str(),
document_ids,
self.return_only_fields,
)
.await
} else {
self.db
.batch_stream_get_docs_with_errors::<S, I>(
self.collection.as_str(),
document_ids,
self.return_only_fields,
)
.await
}
}
/// Sets up a real-time listener for changes to a specific set of documents by their IDs.
///
/// # Arguments
/// * `document_ids`: An iterator of document IDs to listen to.
///
/// # Returns
/// A [`FirestoreDocChangesListenerInitBuilder`] to configure and start the listener.
pub fn batch_listen<S, I>(
self,
document_ids: I,
) -> FirestoreDocChangesListenerInitBuilder<'a, D>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
FirestoreDocChangesListenerInitBuilder::new(
self.db,
FirestoreTargetType::Documents(
FirestoreCollectionDocuments::new(
self.collection,
document_ids
.into_iter()
.map(|s| s.as_ref().to_string())
.collect(),
)
.opt_parent(self.parent),
),
)
}
}
/// A builder for fetching documents by ID and deserializing them into a Rust type `T`.
#[derive(Clone, Debug)]
pub struct FirestoreSelectObjByIdBuilder<'a, D, T>
where
D: FirestoreGetByIdSupport,
T: Send,
for<'de> T: Deserialize<'de>,
{
db: &'a D,
collection: String,
parent: Option<String>,
return_only_fields: Option<Vec<String>>,
_pd: PhantomData<T>,
}
impl<'a, D, T> FirestoreSelectObjByIdBuilder<'a, D, T>
where
D: FirestoreGetByIdSupport,
T: Send,
for<'de> T: Deserialize<'de>,
{
/// Creates a new `FirestoreSelectObjByIdBuilder`.
pub(crate) fn new(
db: &'a D,
collection: String,
parent: Option<String>,
return_only_fields: Option<Vec<String>>,
) -> FirestoreSelectObjByIdBuilder<'a, D, T> {
Self {
db,
collection,
parent,
return_only_fields,
_pd: PhantomData,
}
}
/// Fetches a single document by its ID and deserializes it into type `T`.
///
/// # Arguments
/// * `document_id`: The ID of the document to fetch.
///
/// # Returns
/// A `FirestoreResult` containing an `Option<T>`. `None` if the document doesn't exist.
pub async fn one<S>(self, document_id: S) -> FirestoreResult<Option<T>>
where
S: AsRef<str> + Send,
{
if let Some(parent) = self.parent {
match self
.db
.get_obj_at_return_fields::<T, S>(
parent.as_str(),
self.collection.as_str(),
document_id,
self.return_only_fields,
)
.await
{
Ok(doc) => Ok(Some(doc)),
Err(err) => match err {
FirestoreError::DataNotFoundError(_) => Ok(None),
_ => Err(err),
},
}
} else {
match self
.db
.get_obj_return_fields::<T, S>(
self.collection.as_str(),
document_id,
self.return_only_fields,
)
.await
{
Ok(doc) => Ok(Some(doc)),
Err(err) => match err {
FirestoreError::DataNotFoundError(_) => Ok(None),
_ => Err(err),
},
}
}
}
/// Fetches multiple documents by their IDs in a batch and deserializes them into type `T`.
///
/// Returns a stream of `(String, Option<T>)` tuples.
/// Errors during fetching or deserialization will terminate the stream.
///
/// # Arguments
/// * `document_ids`: An iterator of document IDs to fetch.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `(String, Option<T>)`.
pub async fn batch<S, I>(
self,
document_ids: I,
) -> FirestoreResult<BoxStream<'a, (String, Option<T>)>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
T: Send + 'a,
{
if let Some(parent) = self.parent {
self.db
.batch_stream_get_objects_at::<T, S, I>(
parent.as_str(),
self.collection.as_str(),
document_ids,
self.return_only_fields,
)
.await
} else {
self.db
.batch_stream_get_objects::<T, S, I>(
self.collection.as_str(),
document_ids,
self.return_only_fields,
)
.await
}
}
/// Fetches multiple documents by IDs in a batch, deserializing them and yielding `FirestoreResult`.
///
/// Errors during fetching or deserialization for individual documents are yielded as `Err` items.
///
/// # Arguments
/// * `document_ids`: An iterator of document IDs to fetch.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `FirestoreResult<(String, Option<T>)>`.
pub async fn batch_with_errors<S, I>(
self,
document_ids: I,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(String, Option<T>)>>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
T: Send + 'a,
{
if let Some(parent) = self.parent {
self.db
.batch_stream_get_objects_at_with_errors::<T, S, I>(
parent.as_str(),
self.collection.as_str(),
document_ids,
self.return_only_fields,
)
.await
} else {
self.db
.batch_stream_get_objects_with_errors::<T, S, I>(
self.collection.as_str(),
document_ids,
self.return_only_fields,
)
.await
}
}
}
/// A builder for configuring and executing a partitioned query for documents.
#[derive(Clone, Debug)]
pub struct FirestorePartitionQueryDocBuilder<'a, D>
where
D: FirestoreQuerySupport,
{
db: &'a D,
params: FirestoreQueryParams,
parallelism: usize,
partition_count: u32,
page_size: u32,
}
impl<'a, D> FirestorePartitionQueryDocBuilder<'a, D>
where
D: FirestoreQuerySupport,
{
/// Creates a new `FirestorePartitionQueryDocBuilder`.
#[inline]
pub(crate) fn new(db: &'a D, params: FirestoreQueryParams) -> Self {
Self {
db,
params,
parallelism: 2, // Default parallelism
partition_count: 10, // Default number of partitions to request
page_size: 1000, // Default page size for fetching partitions
}
}
/// Sets the desired parallelism for processing partitions.
///
/// This hints at how many partitions might be processed concurrently by the caller.
///
/// # Arguments
/// * `max_threads`: The desired level of parallelism.
///
/// # Returns
/// The builder instance with the parallelism level set.
#[inline]
pub fn parallelism(self, max_threads: usize) -> Self {
Self {
parallelism: max_threads,
..self
}
}
/// Sets the desired number of partitions to divide the query into.
///
/// # Arguments
/// * `count`: The number of partitions.
///
/// # Returns
/// The builder instance with the partition count set.
#[inline]
pub fn partition_count(self, count: u32) -> Self {
Self {
partition_count: count,
..self
}
}
/// Sets the page size for retrieving partition cursors.
///
/// This controls how many partition definitions are fetched in each request to the server.
///
/// # Arguments
/// * `len`: The page size for partition cursors.
///
/// # Returns
/// The builder instance with the partition page size set.
#[inline]
pub fn page_size(self, len: u32) -> Self {
Self {
page_size: len,
..self
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | true |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/fluent_api/select_aggregation_builder.rs | src/fluent_api/select_aggregation_builder.rs | //! Builder for constructing aggregation queries in Firestore.
//!
//! This module provides a fluent API to define aggregations like `COUNT`, `SUM`, and `AVG`
//! to be performed over a set of documents matching a query.
//!
//! The main entry point is [`FirestoreAggregationBuilder`], which is typically
//! accessed via a method on a select/query builder (e.g., [`FirestoreSelectDocQueryBuilder::aggregate()`](crate::FirestoreSelectDocQueryBuilder::aggregate)).
use crate::{
FirestoreAggregation, FirestoreAggregationOperator, FirestoreAggregationOperatorAvg,
FirestoreAggregationOperatorCount, FirestoreAggregationOperatorSum,
};
/// A builder for constructing a list of aggregations to apply to a query.
///
/// This builder is used within a select/query operation to specify one or more
/// aggregations to be computed by Firestore.
pub struct FirestoreAggregationBuilder {}
impl FirestoreAggregationBuilder {
/// Creates a new `FirestoreAggregationBuilder`.
/// This is typically not called directly but obtained from a select/query builder.
pub(crate) fn new() -> Self {
Self {}
}
/// Builds a `Vec` of [`FirestoreAggregation`] from a collection of aggregation expressions.
///
/// This method takes an iterator of items that implement [`FirestoreAggregationExpr`]
/// (typically created using [`FirestoreAggregationBuilder::field()`] and its chained methods)
/// and collects them into a vector of aggregations.
///
/// `Option<FirestoreAggregation>` expressions are filtered, so `None` values are ignored.
///
/// # Arguments
/// * `aggregation_field_expr`: An iterator of aggregation expressions.
///
/// # Returns
/// A `Vec<FirestoreAggregation>` ready to be used in an aggregation query.
#[inline]
pub fn fields<I>(&self, aggregation_field_expr: I) -> Vec<FirestoreAggregation>
where
I: IntoIterator,
I::Item: FirestoreAggregationExpr,
{
aggregation_field_expr
.into_iter()
.filter_map(|filter| filter.build_aggregation())
.collect()
}
/// Specifies an alias for the result of an aggregation.
///
/// The result of the aggregation (e.g., the count, sum, or average) will be
/// returned under this alias in the query response.
///
/// # Arguments
/// * `field_name`: The alias for the aggregation result.
///
/// # Returns
/// A [`FirestoreAggregationFieldExpr`] to specify the type of aggregation (count, sum, avg).
#[inline]
pub fn field<S>(&self, field_name: S) -> FirestoreAggregationFieldExpr
where
S: AsRef<str>,
{
FirestoreAggregationFieldExpr::new(field_name.as_ref().to_string())
}
}
/// A trait for types that can be converted into a [`FirestoreAggregation`].
///
/// This is used by [`FirestoreAggregationBuilder::fields()`] to allow various ways
/// of defining aggregations, including optional ones.
pub trait FirestoreAggregationExpr {
/// Builds the [`FirestoreAggregation`].
/// Returns `None` if the expression represents an empty or no-op aggregation.
fn build_aggregation(self) -> Option<FirestoreAggregation>;
}
/// Represents a specific alias targeted for an aggregation operation.
///
/// This struct provides methods to define the actual aggregation to be performed
/// (e.g., count, sum, avg) and associate it with the alias.
pub struct FirestoreAggregationFieldExpr {
field_name: String, // This is the alias for the aggregation result
}
impl FirestoreAggregationFieldExpr {
/// Creates a new `FirestoreAggregationFieldExpr` for the given alias.
pub(crate) fn new(field_name: String) -> Self {
Self { field_name }
}
/// Specifies a "count" aggregation.
///
/// Counts the number of documents matching the query. The result is returned
/// under the alias specified by `field_name`.
///
/// # Returns
/// An `Option<FirestoreAggregation>` representing this count aggregation.
#[inline]
pub fn count(self) -> Option<FirestoreAggregation> {
Some(FirestoreAggregation::new(self.field_name).with_operator(
FirestoreAggregationOperator::Count(FirestoreAggregationOperatorCount::new()),
))
}
/// Specifies a "count up to" aggregation.
///
/// Counts the number of documents matching the query, up to a specified limit.
/// This can be more efficient than a full count if only an approximate count or
/// a capped count is needed.
///
/// # Arguments
/// * `up_to`: The maximum number to count up to.
///
/// # Returns
/// An `Option<FirestoreAggregation>` representing this capped count aggregation.
#[inline]
pub fn count_up_to(self, up_to: usize) -> Option<FirestoreAggregation> {
Some(FirestoreAggregation::new(self.field_name).with_operator(
FirestoreAggregationOperator::Count(
FirestoreAggregationOperatorCount::new().with_up_to(up_to),
),
))
}
/// Specifies a "sum" aggregation.
///
/// Calculates the sum of the values of a specific numeric field across all
/// documents matching the query. The result is returned under the alias
/// specified by `field_name`.
///
/// # Arguments
/// * `sum_on_field_name`: The dot-separated path to the numeric field whose values will be summed.
///
/// # Returns
/// An `Option<FirestoreAggregation>` representing this sum aggregation.
#[inline]
pub fn sum<S>(self, sum_on_field_name: S) -> Option<FirestoreAggregation>
where
S: AsRef<str>,
{
Some(FirestoreAggregation::new(self.field_name).with_operator(
FirestoreAggregationOperator::Sum(FirestoreAggregationOperatorSum::new(
sum_on_field_name.as_ref().to_string(),
)),
))
}
/// Specifies an "average" (avg) aggregation.
///
/// Calculates the average of the values of a specific numeric field across all
/// documents matching the query. The result is returned under the alias
/// specified by `field_name`.
///
/// # Arguments
/// * `avg_on_field_name`: The dot-separated path to the numeric field whose values will be averaged.
///
/// # Returns
/// An `Option<FirestoreAggregation>` representing this average aggregation.
#[inline]
pub fn avg<S>(self, avg_on_field_name: S) -> Option<FirestoreAggregation>
where
S: AsRef<str>,
{
Some(FirestoreAggregation::new(self.field_name).with_operator(
FirestoreAggregationOperator::Avg(FirestoreAggregationOperatorAvg::new(
avg_on_field_name.as_ref().to_string(),
)),
))
}
}
impl FirestoreAggregationExpr for FirestoreAggregation {
#[inline]
fn build_aggregation(self) -> Option<FirestoreAggregation> {
Some(self)
}
}
// Allows using Option<FirestoreAggregation> in the fields array,
// filtering out None values.
impl<F> FirestoreAggregationExpr for Option<F>
where
F: FirestoreAggregationExpr,
{
#[inline]
fn build_aggregation(self) -> Option<FirestoreAggregation> {
self.and_then(|expr| expr.build_aggregation())
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/fluent_api/listing_builder.rs | src/fluent_api/listing_builder.rs | //! Builder for constructing Firestore list operations.
//!
//! This module provides a fluent API for listing documents within a collection
//! or listing collection IDs under a parent document (or the database root).
//! It supports pagination, ordering (for document listing), and projections.
use crate::{
FirestoreListCollectionIdsParams, FirestoreListCollectionIdsResult, FirestoreListDocParams,
FirestoreListDocResult, FirestoreListingSupport, FirestoreQueryOrder, FirestoreResult,
};
use futures::stream::BoxStream;
use gcloud_sdk::google::firestore::v1::Document;
use serde::Deserialize;
use std::marker::PhantomData;
/// The initial builder for a Firestore list operation.
///
/// Created by calling [`FirestoreExprBuilder::list()`](crate::FirestoreExprBuilder::list).
/// From here, you can choose to list documents from a collection or list collection IDs.
#[derive(Clone, Debug)]
pub struct FirestoreListingInitialBuilder<'a, D>
where
D: FirestoreListingSupport,
{
db: &'a D,
return_only_fields: Option<Vec<String>>,
}
impl<'a, D> FirestoreListingInitialBuilder<'a, D>
where
D: FirestoreListingSupport,
{
/// Creates a new `FirestoreListingInitialBuilder`.
#[inline]
pub(crate) fn new(db: &'a D) -> Self {
Self {
db,
return_only_fields: None,
}
}
/// Specifies which fields of the documents should be returned when listing documents.
///
/// This is a projection. If not set, all fields are returned.
/// This option is only applicable when listing documents, not collection IDs.
///
/// # Arguments
/// * `return_only_fields`: An iterator of field paths to return.
///
/// # Returns
/// The builder instance with the projection mask set.
#[inline]
pub fn fields<I>(self, return_only_fields: I) -> Self
where
I: IntoIterator,
I::Item: AsRef<str>,
{
Self {
return_only_fields: Some(
return_only_fields
.into_iter()
.map(|field| field.as_ref().to_string())
.collect(),
),
..self
}
}
/// Specifies that documents should be listed from the given collection.
///
/// # Arguments
/// * `collection`: The ID of the collection to list documents from.
///
/// # Returns
/// A [`FirestoreListingDocBuilder`] to further configure and execute the document listing.
#[inline]
pub fn from(self, collection: &str) -> FirestoreListingDocBuilder<'a, D> {
let params: FirestoreListDocParams = FirestoreListDocParams::new(collection.to_string())
.opt_return_only_fields(self.return_only_fields);
FirestoreListingDocBuilder::new(self.db, params)
}
/// Specifies that collection IDs should be listed.
///
/// # Returns
/// A [`FirestoreListCollectionIdsBuilder`] to configure and execute the collection ID listing.
#[inline]
pub fn collections(self) -> FirestoreListCollectionIdsBuilder<'a, D> {
FirestoreListCollectionIdsBuilder::new(self.db)
}
}
/// A builder for configuring and executing a document listing operation.
#[derive(Clone, Debug)]
pub struct FirestoreListingDocBuilder<'a, D>
where
D: FirestoreListingSupport,
{
db: &'a D,
params: FirestoreListDocParams,
}
impl<'a, D> FirestoreListingDocBuilder<'a, D>
where
D: FirestoreListingSupport,
{
/// Creates a new `FirestoreListingDocBuilder`.
#[inline]
pub(crate) fn new(db: &'a D, params: FirestoreListDocParams) -> Self {
Self { db, params }
}
/// Specifies that the listed documents should be deserialized into a specific Rust type `T`.
///
/// # Type Parameters
/// * `T`: The type to deserialize documents into. Must implement `serde::Deserialize`.
///
/// # Returns
/// A [`FirestoreListingObjBuilder`] for streaming deserialized objects.
#[inline]
pub fn obj<T>(self) -> FirestoreListingObjBuilder<'a, D, T>
where
T: Send,
for<'de> T: Deserialize<'de>,
{
FirestoreListingObjBuilder::new(self.db, self.params)
}
/// Specifies the parent document path for listing documents in a sub-collection.
///
/// # Arguments
/// * `parent`: The full path to the parent document.
///
/// # Returns
/// The builder instance with the parent path set.
#[inline]
pub fn parent<S>(self, parent: S) -> Self
where
S: AsRef<str>,
{
Self {
params: self.params.with_parent(parent.as_ref().to_string()),
..self
}
}
/// Sets the maximum number of documents to return in a single page.
///
/// # Arguments
/// * `value`: The page size.
///
/// # Returns
/// The builder instance with the page size set.
#[inline]
pub fn page_size(self, value: usize) -> Self {
Self {
params: self.params.with_page_size(value),
..self
}
}
/// Specifies the order in which to sort the documents.
///
/// # Arguments
/// * `fields`: An iterator of [`FirestoreQueryOrder`] specifying the fields and directions to sort by.
///
/// # Returns
/// The builder instance with the ordering set.
#[inline]
pub fn order_by<I>(self, fields: I) -> Self
where
I: IntoIterator,
I::Item: Into<FirestoreQueryOrder>,
{
Self {
params: self
.params
.with_order_by(fields.into_iter().map(|field| field.into()).collect()),
..self
}
}
/// Retrieves a single page of documents.
///
/// # Returns
/// A `FirestoreResult` containing a [`FirestoreListDocResult`], which includes the documents
/// for the current page and a potential next page token.
pub async fn get_page(self) -> FirestoreResult<FirestoreListDocResult> {
self.db.list_doc(self.params).await
}
/// Streams all documents matching the configuration, handling pagination automatically.
///
/// Errors encountered during streaming will terminate the stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of [`Document`]s.
pub async fn stream_all<'b>(self) -> FirestoreResult<BoxStream<'b, Document>> {
self.db.stream_list_doc(self.params).await
}
/// Streams all documents matching the configuration, handling pagination automatically.
///
/// Errors encountered during streaming are yielded as `Err` items in the stream,
/// allowing the caller to handle them without terminating the entire stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `FirestoreResult<Document>`.
pub async fn stream_all_with_errors<'b>(
self,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>> {
self.db.stream_list_doc_with_errors(self.params).await
}
}
/// A builder for streaming listed documents deserialized into a Rust type `T`.
#[derive(Clone, Debug)]
pub struct FirestoreListingObjBuilder<'a, D, T>
where
D: FirestoreListingSupport,
T: Send,
for<'de> T: Deserialize<'de>,
{
db: &'a D,
params: FirestoreListDocParams,
_pd: PhantomData<T>,
}
impl<'a, D, T> FirestoreListingObjBuilder<'a, D, T>
where
D: FirestoreListingSupport,
T: Send,
for<'de> T: Deserialize<'de>,
{
/// Creates a new `FirestoreListingObjBuilder`.
pub(crate) fn new(
db: &'a D,
params: FirestoreListDocParams,
) -> FirestoreListingObjBuilder<'a, D, T> {
Self {
db,
params,
_pd: PhantomData,
}
}
/// Streams all documents matching the configuration, deserializing them into type `T`
/// and handling pagination automatically.
///
/// Errors encountered during streaming or deserialization will terminate the stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of deserialized objects `T`.
pub async fn stream_all<'b>(self) -> FirestoreResult<BoxStream<'b, T>>
where
T: 'b,
{
self.db.stream_list_obj(self.params).await
}
/// Streams all documents matching the configuration, deserializing them into type `T`
/// and handling pagination automatically.
///
/// Errors encountered during streaming or deserialization are yielded as `Err` items
/// in the stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `FirestoreResult<T>`.
pub async fn stream_all_with_errors<'b>(
self,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
T: 'b,
{
self.db.stream_list_obj_with_errors(self.params).await
}
}
/// A builder for configuring and executing a collection ID listing operation.
#[derive(Clone, Debug)]
pub struct FirestoreListCollectionIdsBuilder<'a, D>
where
D: FirestoreListingSupport,
{
db: &'a D,
params: FirestoreListCollectionIdsParams,
}
impl<'a, D> FirestoreListCollectionIdsBuilder<'a, D>
where
D: FirestoreListingSupport,
{
/// Creates a new `FirestoreListCollectionIdsBuilder`.
#[inline]
pub(crate) fn new(db: &'a D) -> Self {
Self {
db,
params: FirestoreListCollectionIdsParams::new(),
}
}
/// Specifies the parent document path under which to list collection IDs.
///
/// If not specified, collection IDs directly under the database root are listed.
///
/// # Arguments
/// * `parent`: The full path to the parent document.
///
/// # Returns
/// The builder instance with the parent path set.
#[inline]
pub fn parent<S>(self, parent: S) -> Self
where
S: AsRef<str>,
{
Self {
params: self.params.with_parent(parent.as_ref().to_string()),
..self
}
}
/// Sets the maximum number of collection IDs to return in a single page.
///
/// # Arguments
/// * `value`: The page size.
///
/// # Returns
/// The builder instance with the page size set.
#[inline]
pub fn page_size(self, value: usize) -> Self {
Self {
params: self.params.with_page_size(value),
..self
}
}
/// Retrieves a single page of collection IDs.
///
/// # Returns
/// A `FirestoreResult` containing a [`FirestoreListCollectionIdsResult`], which includes
/// the collection IDs for the current page and a potential next page token.
pub async fn get_page(self) -> FirestoreResult<FirestoreListCollectionIdsResult> {
self.db.list_collection_ids(self.params).await
}
/// Streams all collection IDs matching the configuration, handling pagination automatically.
///
/// Errors encountered during streaming will terminate the stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `String` (collection IDs).
pub async fn stream_all(self) -> FirestoreResult<BoxStream<'a, String>> {
self.db.stream_list_collection_ids(self.params).await
}
/// Streams all collection IDs matching the configuration, handling pagination automatically.
///
/// Errors encountered during streaming are yielded as `Err` items in the stream.
///
/// # Returns
/// A `FirestoreResult` containing a `BoxStream` of `FirestoreResult<String>`.
pub async fn stream_all_with_errors(
self,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<String>>> {
self.db
.stream_list_collection_ids_with_errors(self.params)
.await
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/fluent_api/delete_builder.rs | src/fluent_api/delete_builder.rs | //! Builder for constructing Firestore delete operations.
//!
//! This module provides a fluent API to specify the document to be deleted,
//! optionally including a parent path for sub-collections and preconditions
//! for the delete operation.
use crate::{
FirestoreBatch, FirestoreBatchWriter, FirestoreDeleteSupport, FirestoreResult,
FirestoreTransactionOps, FirestoreWritePrecondition,
};
/// The initial builder for a Firestore delete operation.
///
/// Created by calling [`FirestoreExprBuilder::delete()`](crate::FirestoreExprBuilder::delete).
#[derive(Clone, Debug)]
pub struct FirestoreDeleteInitialBuilder<'a, D>
where
D: FirestoreDeleteSupport,
{
db: &'a D,
}
impl<'a, D> FirestoreDeleteInitialBuilder<'a, D>
where
D: FirestoreDeleteSupport,
{
/// Creates a new `FirestoreDeleteInitialBuilder`.
#[inline]
pub(crate) fn new(db: &'a D) -> Self {
Self { db }
}
/// Specifies the collection ID from which to delete the document.
///
/// # Arguments
/// * `collection_id`: The ID of the collection.
///
/// # Returns
/// A [`FirestoreDeleteDocIdBuilder`] to specify the document ID and other options.
#[inline]
pub fn from(self, collection_id: &str) -> FirestoreDeleteDocIdBuilder<'a, D> {
FirestoreDeleteDocIdBuilder::new(self.db, collection_id.to_string())
}
}
/// A builder for specifying the document ID and options for a delete operation.
#[derive(Clone, Debug)]
pub struct FirestoreDeleteDocIdBuilder<'a, D>
where
D: FirestoreDeleteSupport,
{
db: &'a D,
collection_id: String,
parent: Option<String>,
precondition: Option<FirestoreWritePrecondition>,
}
impl<'a, D> FirestoreDeleteDocIdBuilder<'a, D>
where
D: FirestoreDeleteSupport,
{
/// Creates a new `FirestoreDeleteDocIdBuilder`.
#[inline]
pub(crate) fn new(db: &'a D, collection_id: String) -> Self {
Self {
db,
collection_id,
parent: None,
precondition: None,
}
}
/// Specifies the parent document path for deleting a document in a sub-collection.
///
/// # Arguments
/// * `parent`: The full path to the parent document.
///
/// # Returns
/// The builder instance with the parent path set.
#[inline]
pub fn parent<S>(self, parent: S) -> Self
where
S: AsRef<str>,
{
Self {
parent: Some(parent.as_ref().to_string()),
..self
}
}
/// Specifies a precondition for the delete operation.
///
/// The delete will only be executed if the precondition is met.
///
/// # Arguments
/// * `precondition`: The [`FirestoreWritePrecondition`] to apply.
///
/// # Returns
/// The builder instance with the precondition set.
#[inline]
pub fn precondition(self, precondition: FirestoreWritePrecondition) -> Self {
Self {
precondition: Some(precondition),
..self
}
}
/// Specifies the ID of the document to delete.
///
/// # Arguments
/// * `document_id`: The ID of the document.
///
/// # Returns
/// A [`FirestoreDeleteExecuteBuilder`] to execute the delete operation or add it to a batch/transaction.
#[inline]
pub fn document_id<S>(self, document_id: S) -> FirestoreDeleteExecuteBuilder<'a, D>
where
S: AsRef<str> + Send,
{
FirestoreDeleteExecuteBuilder::new(
self.db,
self.collection_id.to_string(),
document_id.as_ref().to_string(),
self.parent,
self.precondition,
)
}
}
/// A builder for executing a Firestore delete operation or adding it to a batch/transaction.
#[derive(Clone, Debug)]
pub struct FirestoreDeleteExecuteBuilder<'a, D>
where
D: FirestoreDeleteSupport,
{
db: &'a D,
collection_id: String,
document_id: String,
parent: Option<String>,
precondition: Option<FirestoreWritePrecondition>,
}
impl<'a, D> FirestoreDeleteExecuteBuilder<'a, D>
where
D: FirestoreDeleteSupport,
{
/// Creates a new `FirestoreDeleteExecuteBuilder`.
#[inline]
pub(crate) fn new(
db: &'a D,
collection_id: String,
document_id: String,
parent: Option<String>,
precondition: Option<FirestoreWritePrecondition>,
) -> Self {
Self {
db,
collection_id,
document_id,
parent,
precondition,
}
}
/// Specifies the parent document path. This is an alternative way to set the parent
/// if not already set in the previous builder step.
#[inline]
pub fn parent<S>(self, parent: S) -> Self
where
S: AsRef<str>,
{
Self {
parent: Some(parent.as_ref().to_string()),
..self
}
}
/// Specifies a precondition for the delete operation. This is an alternative way to set
/// the precondition if not already set.
#[inline]
pub fn precondition(self, precondition: FirestoreWritePrecondition) -> Self {
Self {
precondition: Some(precondition),
..self
}
}
/// Executes the configured delete operation.
///
/// # Returns
/// A `FirestoreResult` indicating success or failure.
pub async fn execute(self) -> FirestoreResult<()> {
if let Some(parent) = self.parent {
self.db
.delete_by_id_at(
parent.as_str(),
self.collection_id.as_str(),
self.document_id,
self.precondition,
)
.await
} else {
self.db
.delete_by_id(
self.collection_id.as_str(),
self.document_id,
self.precondition,
)
.await
}
}
/// Adds this delete operation to a [`FirestoreTransaction`].
///
/// # Arguments
/// * `transaction`: A mutable reference to the transaction to add this operation to.
///
/// # Returns
/// A `FirestoreResult` containing the mutable reference to the transaction, allowing for chaining.
#[inline]
pub fn add_to_transaction<'t, TO>(self, transaction: &'t mut TO) -> FirestoreResult<&'t mut TO>
where
TO: FirestoreTransactionOps,
{
if let Some(parent) = self.parent {
transaction.delete_by_id_at(
parent.as_str(),
self.collection_id.as_str(),
self.document_id,
self.precondition,
)
} else {
transaction.delete_by_id(
self.collection_id.as_str(),
self.document_id,
self.precondition,
)
}
}
/// Adds this delete operation to a [`FirestoreBatch`].
///
/// # Arguments
/// * `batch`: A mutable reference to the batch writer to add this operation to.
///
/// # Type Parameters
/// * `W`: The type of the batch writer, implementing [`FirestoreBatchWriter`].
///
/// # Returns
/// A `FirestoreResult` containing the mutable reference to the batch, allowing for chaining.
#[inline]
pub fn add_to_batch<'t, W>(
self,
batch: &'a mut FirestoreBatch<'t, W>,
) -> FirestoreResult<&'a mut FirestoreBatch<'t, W>>
where
W: FirestoreBatchWriter,
{
if let Some(parent) = self.parent {
batch.delete_by_id_at(
parent.as_str(),
self.collection_id.as_str(),
self.document_id,
self.precondition,
)
} else {
batch.delete_by_id(
self.collection_id.as_str(),
self.document_id,
self.precondition,
)
}
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/fluent_api/update_builder.rs | src/fluent_api/update_builder.rs | //! Builder for constructing Firestore update operations.
//!
//! This module provides a fluent API to specify the document to be updated,
//! the data to update (either a full object, specific fields, or field transformations),
//! and optional preconditions.
use crate::document_transform_builder::FirestoreTransformBuilder;
use crate::{
FirestoreBatch, FirestoreBatchWriter, FirestoreFieldTransform, FirestoreResult,
FirestoreTransaction, FirestoreTransactionOps, FirestoreUpdateSupport,
FirestoreWritePrecondition,
};
use gcloud_sdk::google::firestore::v1::Document;
use serde::{Deserialize, Serialize};
/// The initial builder for a Firestore update operation.
///
/// Created by calling [`FirestoreExprBuilder::update()`](crate::FirestoreExprBuilder::update).
/// This builder allows specifying which fields to update. If no fields are specified,
/// the entire object provided later will be merged with the existing document.
#[derive(Clone, Debug)]
pub struct FirestoreUpdateInitialBuilder<'a, D>
where
D: FirestoreUpdateSupport,
{
db: &'a D,
update_only_fields: Option<Vec<String>>,
}
impl<'a, D> FirestoreUpdateInitialBuilder<'a, D>
where
D: FirestoreUpdateSupport,
{
/// Creates a new `FirestoreUpdateInitialBuilder`.
#[inline]
pub(crate) fn new(db: &'a D) -> Self {
Self {
db,
update_only_fields: None,
}
}
/// Specifies the exact set of fields to update.
///
/// If this is set, only the fields listed here will be modified. Any other fields
/// in the provided object/document will be ignored. If not set (the default),
/// the update acts as a merge: fields in the provided object will overwrite
/// existing fields, and new fields will be added. Fields not present in the
/// provided object will remain untouched in the document.
///
/// # Arguments
/// * `update_only_fields`: An iterator of field paths (dot-separated for nested fields)
/// to be included in the update mask.
///
/// # Returns
/// The builder instance with the field mask set.
#[inline]
pub fn fields<I>(self, update_only_fields: I) -> Self
where
I: IntoIterator,
I::Item: AsRef<str>,
{
Self {
update_only_fields: Some(
update_only_fields
.into_iter()
.map(|field| field.as_ref().to_string())
.collect(),
),
..self
}
}
/// Specifies the collection ID where the document to update resides.
///
/// # Arguments
/// * `collection_id`: The ID of the collection.
///
/// # Returns
/// A [`FirestoreUpdateDocObjBuilder`] to specify the document ID and data.
#[inline]
pub fn in_col(self, collection_id: &str) -> FirestoreUpdateDocObjBuilder<'a, D> {
FirestoreUpdateDocObjBuilder::new(
self.db,
collection_id.to_string(),
self.update_only_fields,
)
}
}
/// A builder for specifying the document ID and data for an update operation.
///
/// This stage allows setting the document data (as a raw `Document` or a serializable object),
/// preconditions, field transformations, and which fields to return after the update.
#[derive(Clone, Debug)]
pub struct FirestoreUpdateDocObjBuilder<'a, D>
where
D: FirestoreUpdateSupport,
{
db: &'a D,
collection_id: String,
update_only_fields: Option<Vec<String>>,
parent: Option<String>,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
}
impl<'a, D> FirestoreUpdateDocObjBuilder<'a, D>
where
D: FirestoreUpdateSupport,
{
/// Creates a new `FirestoreUpdateDocObjBuilder`.
#[inline]
pub(crate) fn new(
db: &'a D,
collection_id: String,
update_only_fields: Option<Vec<String>>,
) -> Self {
Self {
db,
collection_id,
update_only_fields,
parent: None,
return_only_fields: None,
precondition: None,
transforms: vec![],
}
}
/// Specifies which fields of the updated document should be returned.
///
/// If not set, the entire document is typically returned after the update.
///
/// # Arguments
/// * `return_only_fields`: An iterator of field paths to return.
///
/// # Returns
/// The builder instance with the projection mask for the return value set.
#[inline]
pub fn return_only_fields<I>(self, return_only_fields: I) -> Self
where
I: IntoIterator,
I::Item: AsRef<str>,
{
Self {
return_only_fields: Some(
return_only_fields
.into_iter()
.map(|field| field.as_ref().to_string())
.collect(),
),
..self
}
}
/// Specifies a precondition for the update operation.
///
/// The update will only be executed if the precondition is met.
///
/// # Arguments
/// * `precondition`: The [`FirestoreWritePrecondition`] to apply.
///
/// # Returns
/// The builder instance with the precondition set.
#[inline]
pub fn precondition(self, precondition: FirestoreWritePrecondition) -> Self {
Self {
precondition: Some(precondition),
..self
}
}
/// Specifies server-side field transformations to apply as part of the update.
///
/// The `doc_transform` argument is a closure that receives a [`FirestoreTransformBuilder`]
/// and should return a `Vec<FirestoreFieldTransform>`.
///
/// # Arguments
/// * `doc_transform`: A closure to build the list of field transformations.
///
/// # Returns
/// The builder instance with the field transformations set.
#[inline]
pub fn transforms<FN>(self, doc_transform: FN) -> Self
where
FN: Fn(FirestoreTransformBuilder) -> Vec<FirestoreFieldTransform>,
{
Self {
transforms: doc_transform(FirestoreTransformBuilder::new()),
..self
}
}
/// Specifies the document data to update using a raw [`Document`].
///
/// The `document.name` field should contain the full path to the document.
///
/// # Arguments
/// * `document`: The Firestore `Document` containing the fields to update.
///
/// # Returns
/// A [`FirestoreUpdateDocExecuteBuilder`] to execute the operation.
#[inline]
pub fn document(self, document: Document) -> FirestoreUpdateDocExecuteBuilder<'a, D> {
FirestoreUpdateDocExecuteBuilder::new(
self.db,
self.collection_id.to_string(),
self.update_only_fields,
document,
self.return_only_fields,
self.precondition,
)
}
/// Specifies the ID of the document to update.
///
/// This transitions the builder to expect a Rust object for the update data.
///
/// # Arguments
/// * `document_id`: The ID of the document to update.
///
/// # Returns
/// A [`FirestoreUpdateObjInitExecuteBuilder`] to specify the object and execute.
#[inline]
pub fn document_id<S>(self, document_id: S) -> FirestoreUpdateObjInitExecuteBuilder<'a, D>
where
S: AsRef<str> + Send,
{
FirestoreUpdateObjInitExecuteBuilder::new(
self.db,
self.collection_id,
self.update_only_fields,
self.parent,
document_id.as_ref().to_string(),
self.return_only_fields,
self.precondition,
self.transforms,
)
}
}
/// A builder for executing an update operation with raw [`Document`] data.
#[derive(Clone, Debug)]
pub struct FirestoreUpdateDocExecuteBuilder<'a, D>
where
D: FirestoreUpdateSupport,
{
db: &'a D,
collection_id: String,
update_only_fields: Option<Vec<String>>,
document: Document,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
}
impl<'a, D> FirestoreUpdateDocExecuteBuilder<'a, D>
where
D: FirestoreUpdateSupport,
{
/// Creates a new `FirestoreUpdateDocExecuteBuilder`.
#[inline]
pub(crate) fn new(
db: &'a D,
collection_id: String,
update_only_fields: Option<Vec<String>>,
document: Document,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
) -> Self {
Self {
db,
collection_id,
update_only_fields,
document,
return_only_fields,
precondition,
}
}
/// Executes the configured update operation using a raw `Document`.
///
/// # Returns
/// A `FirestoreResult` containing the updated [`Document`].
pub async fn execute(self) -> FirestoreResult<Document> {
// Note: The `update_doc` method on `FirestoreUpdateSupport` expects the full document path
// to be in `self.document.name`. The `collection_id` here is somewhat redundant if
// `document.name` is correctly populated, but kept for consistency with other builders.
self.db
.update_doc(
self.collection_id.as_str(),
self.document,
self.update_only_fields,
self.return_only_fields,
self.precondition,
)
.await
}
}
/// An intermediate builder stage for update operations using a Rust object.
/// This stage has the document ID and is ready to accept the object to update with.
#[derive(Clone, Debug)]
pub struct FirestoreUpdateObjInitExecuteBuilder<'a, D>
where
D: FirestoreUpdateSupport,
{
db: &'a D,
collection_id: String,
update_only_fields: Option<Vec<String>>,
parent: Option<String>,
document_id: String,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
}
impl<'a, D> FirestoreUpdateObjInitExecuteBuilder<'a, D>
where
D: FirestoreUpdateSupport,
{
/// Creates a new `FirestoreUpdateObjInitExecuteBuilder`.
#[inline]
pub(crate) fn new(
db: &'a D,
collection_id: String,
update_only_fields: Option<Vec<String>>,
parent: Option<String>,
document_id: String,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
) -> Self {
Self {
db,
collection_id,
update_only_fields,
parent,
document_id,
return_only_fields,
precondition,
transforms,
}
}
/// Specifies the parent document path for updating a document in a sub-collection.
#[inline]
pub fn parent<S>(self, parent: S) -> Self
where
S: AsRef<str>,
{
Self {
parent: Some(parent.as_ref().to_string()),
..self
}
}
/// Specifies the Rust object containing the data to update the document with.
///
/// The object `T` must implement `serde::Serialize`.
///
/// # Arguments
/// * `object`: A reference to the Rust object.
///
/// # Type Parameters
/// * `T`: The type of the object.
///
/// # Returns
/// A [`FirestoreUpdateObjExecuteBuilder`] to execute the operation or add it to a batch/transaction.
#[inline]
pub fn object<T>(self, object: &'a T) -> FirestoreUpdateObjExecuteBuilder<'a, D, T>
where
T: Serialize + Sync + Send,
for<'de> T: Deserialize<'de>,
{
FirestoreUpdateObjExecuteBuilder::new(
self.db,
self.collection_id.to_string(),
self.update_only_fields,
self.parent,
self.document_id,
object,
self.return_only_fields,
self.precondition,
self.transforms,
)
}
/// Specifies server-side field transformations to apply.
/// This method is used when the update consists *only* of transformations,
/// without merging an object's fields.
///
/// # Arguments
/// * `doc_transform`: A closure to build the list of field transformations.
///
/// # Returns
/// The builder instance with the field transformations set.
#[inline]
pub fn transforms<FN>(self, doc_transform: FN) -> Self
where
FN: Fn(FirestoreTransformBuilder) -> Vec<FirestoreFieldTransform>,
{
Self {
transforms: doc_transform(FirestoreTransformBuilder::new()),
..self
}
}
/// Finalizes the builder for an update operation that *only* applies field transformations.
///
/// This should be called if no `.object()` is provided, and the update relies solely
/// on the transformations defined via `.transforms()`.
///
/// # Returns
/// A [`FirestoreUpdateOnlyTransformBuilder`] to add the transform-only operation to a batch or transaction.
#[inline]
pub fn only_transform(self) -> FirestoreUpdateOnlyTransformBuilder<'a, D> {
FirestoreUpdateOnlyTransformBuilder::new(
self.db,
self.collection_id.to_string(),
self.parent,
self.document_id,
self.precondition,
self.transforms,
)
}
}
/// A builder for executing an update operation with a serializable Rust object.
#[derive(Clone, Debug)]
pub struct FirestoreUpdateObjExecuteBuilder<'a, D, T>
where
D: FirestoreUpdateSupport,
T: Serialize + Sync + Send,
{
db: &'a D,
collection_id: String,
update_only_fields: Option<Vec<String>>,
parent: Option<String>,
document_id: String,
object: &'a T,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
}
impl<'a, D, T> FirestoreUpdateObjExecuteBuilder<'a, D, T>
where
D: FirestoreUpdateSupport,
T: Serialize + Sync + Send,
{
/// Creates a new `FirestoreUpdateObjExecuteBuilder`.
#[inline]
pub(crate) fn new(
db: &'a D,
collection_id: String,
update_only_fields: Option<Vec<String>>,
parent: Option<String>,
document_id: String,
object: &'a T,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
) -> Self {
Self {
db,
collection_id,
update_only_fields,
parent,
document_id,
object,
return_only_fields,
precondition,
transforms,
}
}
/// Executes the configured update operation, serializing the object and
/// deserializing the result into type `O`.
///
/// # Type Parameters
/// * `O`: The type to deserialize the result into. Must implement `serde::Deserialize`.
///
/// # Returns
/// A `FirestoreResult` containing the deserialized object `O` representing the updated document.
pub async fn execute<O>(self) -> FirestoreResult<O>
where
for<'de> O: Deserialize<'de>,
{
if let Some(parent) = self.parent {
self.db
.update_obj_at(
parent.as_str(),
self.collection_id.as_str(),
self.document_id,
self.object,
self.update_only_fields,
self.return_only_fields,
self.precondition,
// Note: The current FirestoreUpdateSupport::update_obj_at doesn't take transforms.
// This might be an oversight or transforms are handled differently for object updates.
// If transforms are intended here, the trait method needs adjustment.
// For now, passing an empty vec or ignoring self.transforms if not supported by the trait.
)
.await
} else {
self.db
.update_obj(
self.collection_id.as_str(),
self.document_id,
self.object,
self.update_only_fields,
self.return_only_fields,
self.precondition,
// Similar note as above for transforms.
)
.await
}
}
/// Adds server-side field transformations to the update operation.
///
/// This can be combined with updating fields from an object. The transformations
/// are applied *after* the object merge/update.
///
/// # Arguments
/// * `transforms_builder`: A closure to build the list of field transformations.
///
/// # Returns
/// The builder instance with added transformations.
#[inline]
pub fn transforms<FN>(self, transforms_builder: FN) -> Self
where
FN: Fn(FirestoreTransformBuilder) -> Vec<FirestoreFieldTransform>,
{
Self {
transforms: transforms_builder(FirestoreTransformBuilder::new()),
..self
}
}
/// Adds this update operation (object merge and/or transforms) to a [`FirestoreTransaction`].
///
/// # Arguments
/// * `transaction`: A mutable reference to the transaction.
///
/// # Returns
/// A `FirestoreResult` containing the mutable reference to the transaction.
#[inline]
pub fn add_to_transaction<'t, TO>(self, transaction: &'t mut TO) -> FirestoreResult<&'t mut TO>
where
TO: FirestoreTransactionOps,
{
if let Some(parent) = self.parent {
transaction.update_object_at(
parent.as_str(),
self.collection_id.as_str(),
self.document_id,
self.object,
self.update_only_fields,
self.precondition,
self.transforms,
)
} else {
transaction.update_object(
self.collection_id.as_str(),
self.document_id,
self.object,
self.update_only_fields,
self.precondition,
self.transforms,
)
}
}
/// Adds this update operation (object merge and/or transforms) to a [`FirestoreBatch`].
///
/// # Arguments
/// * `batch`: A mutable reference to the batch writer.
///
/// # Type Parameters
/// * `W`: The type of the batch writer.
///
/// # Returns
/// A `FirestoreResult` containing the mutable reference to the batch.
#[inline]
pub fn add_to_batch<'t, W>(
self,
batch: &'a mut FirestoreBatch<'t, W>,
) -> FirestoreResult<&'a mut FirestoreBatch<'t, W>>
where
W: FirestoreBatchWriter,
{
if let Some(parent) = self.parent {
batch.update_object_at(
parent.as_str(),
self.collection_id.as_str(),
self.document_id,
self.object,
self.update_only_fields,
self.precondition,
self.transforms,
)
} else {
batch.update_object(
self.collection_id.as_str(),
self.document_id,
self.object,
self.update_only_fields,
self.precondition,
self.transforms,
)
}
}
}
/// A builder for an update operation that consists *only* of field transformations.
///
/// This is used when no object data is being merged, and the update is solely
/// defined by server-side atomic operations like increment, array manipulation, etc.
#[derive(Clone, Debug)]
pub struct FirestoreUpdateOnlyTransformBuilder<'a, D>
where
D: FirestoreUpdateSupport,
{
_db: &'a D,
collection_id: String,
parent: Option<String>,
document_id: String,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
}
impl<'a, D> FirestoreUpdateOnlyTransformBuilder<'a, D>
where
D: FirestoreUpdateSupport,
{
/// Creates a new `FirestoreUpdateOnlyTransformBuilder`.
#[inline]
pub(crate) fn new(
db: &'a D,
collection_id: String,
parent: Option<String>,
document_id: String,
precondition: Option<FirestoreWritePrecondition>,
transforms: Vec<FirestoreFieldTransform>,
) -> Self {
Self {
_db: db,
collection_id,
parent,
document_id,
precondition,
transforms,
}
}
/// Adds this transform-only update operation to a [`FirestoreTransaction`].
///
/// # Arguments
/// * `transaction`: A mutable reference to the transaction.
///
/// # Returns
/// A `FirestoreResult` containing the mutable reference to the transaction.
#[inline]
pub fn add_to_transaction<'t>(
self,
transaction: &'a mut FirestoreTransaction<'t>,
) -> FirestoreResult<&'a mut FirestoreTransaction<'t>> {
if let Some(parent) = self.parent {
transaction.transform_at(
parent.as_str(),
self.collection_id.as_str(),
self.document_id,
self.precondition,
self.transforms,
)
} else {
transaction.transform(
self.collection_id.as_str(),
self.document_id,
self.precondition,
self.transforms,
)
}
}
/// Adds this transform-only update operation to a [`FirestoreBatch`].
///
/// # Arguments
/// * `batch`: A mutable reference to the batch writer.
///
/// # Type Parameters
/// * `W`: The type of the batch writer.
///
/// # Returns
/// A `FirestoreResult` containing the mutable reference to the batch.
#[inline]
pub fn add_to_batch<'t, W>(
self,
batch: &'a mut FirestoreBatch<'t, W>,
) -> FirestoreResult<&'a mut FirestoreBatch<'t, W>>
where
W: FirestoreBatchWriter,
{
if let Some(parent) = self.parent {
batch.transform_at(
parent.as_str(),
self.collection_id.as_str(),
self.document_id,
self.precondition,
self.transforms,
)
} else {
batch.transform(
self.collection_id.as_str(),
self.document_id,
self.precondition,
self.transforms,
)
}
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/fluent_api/mod.rs | src/fluent_api/mod.rs | //! Provides a fluent, chainable API for constructing and executing Firestore operations.
//!
//! This module is the entry point for the fluent API, which allows for a more declarative
//! and type-safe way to interact with Firestore compared to using the direct methods on
//! [`FirestoreDb`](crate::FirestoreDb) with [`FirestoreQueryParams`](crate::FirestoreQueryParams).
//!
//! The main way to access this API is via the [`FirestoreDb::fluent()`](crate::FirestoreDb::fluent) method,
//! which returns a [`FirestoreExprBuilder`]. From there, you can chain calls to build
//! `select`, `insert`, `update`, `delete`, or `list` operations.
//!
//! Each operation type has its own dedicated builder module:
//! - [`delete_builder`]: For constructing delete operations.
//! - [`document_transform_builder`]: For specifying field transformations in update operations.
//! - [`insert_builder`]: For constructing insert/create operations.
//! - [`listing_builder`]: For listing documents or collection IDs.
//! - [`select_aggregation_builder`]: For building aggregation queries (e.g., count, sum, avg).
//! - [`select_builder`]: For constructing query/select operations.
//! - [`select_filter_builder`]: For building complex filter conditions for queries.
//! - [`update_builder`]: For constructing update operations.
//! ```
// Linter allowance for functions that might have many arguments,
// often seen in builder patterns or comprehensive configuration methods.
#![allow(clippy::too_many_arguments)]
pub mod delete_builder;
pub mod document_transform_builder;
pub mod insert_builder;
pub mod listing_builder;
pub mod select_aggregation_builder;
pub mod select_builder;
pub mod select_filter_builder;
pub mod update_builder;
use crate::delete_builder::FirestoreDeleteInitialBuilder;
use crate::fluent_api::select_builder::FirestoreSelectInitialBuilder;
use crate::insert_builder::FirestoreInsertInitialBuilder;
use crate::listing_builder::FirestoreListingInitialBuilder;
use crate::update_builder::FirestoreUpdateInitialBuilder;
use crate::{
FirestoreAggregatedQuerySupport, FirestoreCreateSupport, FirestoreDb, FirestoreDeleteSupport,
FirestoreGetByIdSupport, FirestoreListenSupport, FirestoreListingSupport,
FirestoreQuerySupport, FirestoreUpdateSupport,
};
/// The entry point for building fluent Firestore expressions.
///
/// Obtain an instance of this builder by calling [`FirestoreDb::fluent()`](crate::FirestoreDb::fluent).
/// From this builder, you can chain methods to specify the type of operation
/// (select, insert, update, delete, list) and then further configure and execute it.
///
/// The type parameter `D` represents the underlying database client type, which
/// must implement various support traits (like [`FirestoreQuerySupport`], [`FirestoreCreateSupport`], etc.).
/// This is typically [`FirestoreDb`](crate::FirestoreDb).
#[derive(Clone, Debug)]
pub struct FirestoreExprBuilder<'a, D> {
db: &'a D,
}
impl<'a, D> FirestoreExprBuilder<'a, D>
where
D: FirestoreQuerySupport
+ FirestoreCreateSupport
+ FirestoreDeleteSupport
+ FirestoreUpdateSupport
+ FirestoreListingSupport
+ FirestoreGetByIdSupport
+ FirestoreListenSupport
+ FirestoreAggregatedQuerySupport
+ Clone
+ Send
+ Sync
+ 'static,
{
/// Creates a new `FirestoreExprBuilder` with a reference to the database client.
/// This is typically called by [`FirestoreDb::fluent()`](crate::FirestoreDb::fluent).
pub(crate) fn new(db: &'a D) -> Self {
Self { db }
}
/// Begins building a Firestore select/query operation.
///
/// Returns a [`FirestoreSelectInitialBuilder`] to further configure the query.
#[inline]
pub fn select(self) -> FirestoreSelectInitialBuilder<'a, D> {
FirestoreSelectInitialBuilder::new(self.db)
}
/// Begins building a Firestore insert/create operation.
///
/// Returns a [`FirestoreInsertInitialBuilder`] to further configure the insertion.
#[inline]
pub fn insert(self) -> FirestoreInsertInitialBuilder<'a, D> {
FirestoreInsertInitialBuilder::new(self.db)
}
/// Begins building a Firestore update operation.
///
/// Returns a [`FirestoreUpdateInitialBuilder`] to further configure the update.
#[inline]
pub fn update(self) -> FirestoreUpdateInitialBuilder<'a, D> {
FirestoreUpdateInitialBuilder::new(self.db)
}
/// Begins building a Firestore delete operation.
///
/// Returns a [`FirestoreDeleteInitialBuilder`] to further configure the deletion.
#[inline]
pub fn delete(self) -> FirestoreDeleteInitialBuilder<'a, D> {
FirestoreDeleteInitialBuilder::new(self.db)
}
/// Begins building a Firestore list operation (e.g., listing documents in a collection
/// or listing collection IDs).
///
/// Returns a [`FirestoreListingInitialBuilder`] to further configure the listing operation.
#[inline]
pub fn list(self) -> FirestoreListingInitialBuilder<'a, D> {
FirestoreListingInitialBuilder::new(self.db)
}
}
impl FirestoreDb {
/// Provides access to the fluent API for building Firestore operations.
///
/// This is the main entry point for using the chainable builder pattern.
#[inline]
pub fn fluent(&self) -> FirestoreExprBuilder<'_, FirestoreDb> {
FirestoreExprBuilder::new(self)
}
}
#[cfg(test)]
pub(crate) mod tests {
pub mod mockdb;
// Test structure used in fluent API examples and tests.
pub struct TestStructure {
pub some_id: String,
pub one_more_string: String,
pub some_num: u64,
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/fluent_api/select_filter_builder.rs | src/fluent_api/select_filter_builder.rs | //! Builder for constructing complex filter conditions for Firestore queries.
//!
//! This module provides a fluent API to define filters that can be applied to
//! select/query operations. It supports:
//! - Simple field comparisons (e.g., equality, greater than).
//! - Unary filters (e.g., IS NULL, IS NAN).
//! - Composite filters (AND, OR) to combine multiple conditions.
//!
//! The main entry point is [`FirestoreQueryFilterBuilder`], which is typically
//! accessed within a closure passed to the `.filter()` method of a query builder
//! (e.g., [`FirestoreSelectDocBuilder::filter()`](crate::FirestoreSelectDocBuilder::filter)).
use crate::{
FirestoreQueryFilter, FirestoreQueryFilterCompare, FirestoreQueryFilterComposite,
FirestoreQueryFilterCompositeOperator, FirestoreQueryFilterUnary, FirestoreValue,
};
/// A builder for constructing Firestore query filters.
///
/// This builder is used to create [`FirestoreQueryFilter`] instances, which can then
/// be applied to select/query operations. It provides methods for creating
/// composite filters (`for_all`, `for_any`) and for targeting specific fields
/// to apply comparison or unary operators.
#[derive(Clone, Debug)]
pub struct FirestoreQueryFilterBuilder;
impl FirestoreQueryFilterBuilder {
/// Creates a new `FirestoreQueryFilterBuilder`.
/// This is typically not called directly but provided within a `.filter()` closure.
pub(crate) fn new() -> Self {
Self {}
}
/// Internal helper to build a composite filter (AND or OR).
///
/// If only one valid filter expression is provided, it's returned directly without
/// being wrapped in a composite filter. If no valid expressions are provided,
/// `None` is returned.
#[inline]
fn build_filter_with_op<I>(
&self,
filter_expressions: I,
op: FirestoreQueryFilterCompositeOperator,
) -> Option<FirestoreQueryFilter>
where
I: IntoIterator,
I::Item: FirestoreQueryFilterExpr,
{
let mut filters: Vec<FirestoreQueryFilter> = filter_expressions
.into_iter()
.filter_map(|filter| filter.build_filter())
.collect();
if filters.is_empty() {
None
} else if filters.len() == 1 {
filters.pop()
} else {
Some(FirestoreQueryFilter::Composite(
FirestoreQueryFilterComposite::new(filters, op),
))
}
}
/// Creates a composite filter where all provided filter expressions must be true (logical AND).
///
/// # Arguments
/// * `filter_expressions`: An iterator of items that implement [`FirestoreQueryFilterExpr`].
///
/// # Returns
/// An `Option<FirestoreQueryFilter>` representing the AND-combined filter.
/// Returns `None` if `filter_expressions` is empty or contains only `None` expressions.
/// Returns the single filter directly if only one valid expression is provided.
#[inline]
pub fn for_all<I>(&self, filter_expressions: I) -> Option<FirestoreQueryFilter>
where
I: IntoIterator,
I::Item: FirestoreQueryFilterExpr,
{
self.build_filter_with_op(
filter_expressions,
FirestoreQueryFilterCompositeOperator::And,
)
}
/// Creates a composite filter where at least one of the provided filter expressions must be true (logical OR).
///
/// # Arguments
/// * `filter_expressions`: An iterator of items that implement [`FirestoreQueryFilterExpr`].
///
/// # Returns
/// An `Option<FirestoreQueryFilter>` representing the OR-combined filter.
/// Returns `None` if `filter_expressions` is empty or contains only `None` expressions.
/// Returns the single filter directly if only one valid expression is provided.
#[inline]
pub fn for_any<I>(&self, filter_expressions: I) -> Option<FirestoreQueryFilter>
where
I: IntoIterator,
I::Item: FirestoreQueryFilterExpr,
{
self.build_filter_with_op(
filter_expressions,
FirestoreQueryFilterCompositeOperator::Or,
)
}
/// Specifies a document field to apply a filter condition to.
///
/// # Arguments
/// * `field_name`: The dot-separated path to the field.
///
/// # Returns
/// A [`FirestoreQueryFilterFieldExpr`] to specify the comparison or unary operator.
#[inline]
pub fn field<S>(&self, field_name: S) -> FirestoreQueryFilterFieldExpr
where
S: AsRef<str>,
{
FirestoreQueryFilterFieldExpr::new(field_name.as_ref().to_string())
}
}
/// A trait for types that can be converted into a [`FirestoreQueryFilter`].
///
/// This is used by [`FirestoreQueryFilterBuilder`] methods like `for_all` and `for_any`
/// to allow various ways of defining filter conditions, including optional ones.
pub trait FirestoreQueryFilterExpr {
/// Builds the [`FirestoreQueryFilter`].
/// Returns `None` if the expression represents an empty or no-op filter.
fn build_filter(self) -> Option<FirestoreQueryFilter>;
}
/// Represents a specific field targeted for a filter condition.
///
/// This struct provides methods to define the comparison or unary operator
/// to be applied to the field.
pub struct FirestoreQueryFilterFieldExpr {
field_name: String,
}
impl FirestoreQueryFilterFieldExpr {
/// Creates a new `FirestoreQueryFilterFieldExpr` for the given field name.
pub(crate) fn new(field_name: String) -> Self {
Self { field_name }
}
/// Creates an "equal to" filter (e.g., `field == value`).
/// Alias for [`equal()`](#method.equal).
#[inline]
pub fn eq<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
self.equal(value)
}
/// Creates a "not equal to" filter (e.g., `field != value`).
/// Alias for [`not_equal()`](#method.not_equal).
#[inline]
pub fn neq<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
self.not_equal(value)
}
/// Creates an "equal to" filter (e.g., `field == value`).
#[inline]
pub fn equal<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
Some(FirestoreQueryFilter::Compare(Some(
FirestoreQueryFilterCompare::Equal(self.field_name, value.into()),
)))
}
/// Creates a "not equal to" filter (e.g., `field != value`).
#[inline]
pub fn not_equal<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
Some(FirestoreQueryFilter::Compare(Some(
FirestoreQueryFilterCompare::NotEqual(self.field_name, value.into()),
)))
}
/// Creates a "less than" filter (e.g., `field < value`).
#[inline]
pub fn less_than<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
Some(FirestoreQueryFilter::Compare(Some(
FirestoreQueryFilterCompare::LessThan(self.field_name, value.into()),
)))
}
/// Creates a "less than or equal to" filter (e.g., `field <= value`).
#[inline]
pub fn less_than_or_equal<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
Some(FirestoreQueryFilter::Compare(Some(
FirestoreQueryFilterCompare::LessThanOrEqual(self.field_name, value.into()),
)))
}
/// Creates a "greater than" filter (e.g., `field > value`).
#[inline]
pub fn greater_than<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
Some(FirestoreQueryFilter::Compare(Some(
FirestoreQueryFilterCompare::GreaterThan(self.field_name, value.into()),
)))
}
/// Creates a "greater than or equal to" filter (e.g., `field >= value`).
#[inline]
pub fn greater_than_or_equal<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
Some(FirestoreQueryFilter::Compare(Some(
FirestoreQueryFilterCompare::GreaterThanOrEqual(self.field_name, value.into()),
)))
}
/// Creates an "in" filter (e.g., `field IN [value1, value2, ...]`).
/// The provided `value` should be a [`FirestoreValue::ArrayValue`].
#[inline]
pub fn is_in<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
Some(FirestoreQueryFilter::Compare(Some(
FirestoreQueryFilterCompare::In(self.field_name, value.into()),
)))
}
/// Creates a "not in" filter (e.g., `field NOT IN [value1, value2, ...]`).
/// The provided `value` should be a [`FirestoreValue::ArrayValue`].
#[inline]
pub fn is_not_in<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
Some(FirestoreQueryFilter::Compare(Some(
FirestoreQueryFilterCompare::NotIn(self.field_name, value.into()),
)))
}
/// Creates an "array-contains" filter (e.g., `field array-contains value`).
/// Checks if an array field contains the given value.
#[inline]
pub fn array_contains<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
Some(FirestoreQueryFilter::Compare(Some(
FirestoreQueryFilterCompare::ArrayContains(self.field_name, value.into()),
)))
}
/// Creates an "array-contains-any" filter (e.g., `field array-contains-any [value1, value2, ...]`).
/// Checks if an array field contains any of the values in the provided array.
/// The provided `value` should be a [`FirestoreValue::ArrayValue`].
#[inline]
pub fn array_contains_any<V>(self, value: V) -> Option<FirestoreQueryFilter>
where
V: Into<FirestoreValue>,
{
Some(FirestoreQueryFilter::Compare(Some(
FirestoreQueryFilterCompare::ArrayContainsAny(self.field_name, value.into()),
)))
}
/// Creates an "is NaN" filter. Checks if a numeric field is NaN (Not a Number).
#[inline]
pub fn is_nan(self) -> Option<FirestoreQueryFilter> {
Some(FirestoreQueryFilter::Unary(
FirestoreQueryFilterUnary::IsNan(self.field_name),
))
}
/// Creates an "is not NaN" filter. Checks if a numeric field is not NaN.
#[inline]
pub fn is_not_nan(self) -> Option<FirestoreQueryFilter> {
Some(FirestoreQueryFilter::Unary(
FirestoreQueryFilterUnary::IsNotNan(self.field_name),
))
}
/// Creates an "is null" filter. Checks if a field is null.
#[inline]
pub fn is_null(self) -> Option<FirestoreQueryFilter> {
Some(FirestoreQueryFilter::Unary(
FirestoreQueryFilterUnary::IsNull(self.field_name),
))
}
/// Creates an "is not null" filter. Checks if a field is not null.
#[inline]
pub fn is_not_null(self) -> Option<FirestoreQueryFilter> {
Some(FirestoreQueryFilter::Unary(
FirestoreQueryFilterUnary::IsNotNull(self.field_name),
))
}
}
impl FirestoreQueryFilterExpr for FirestoreQueryFilter {
#[inline]
fn build_filter(self) -> Option<FirestoreQueryFilter> {
Some(self)
}
}
// Allows using Option<FirestoreQueryFilter> in the for_all/for_any arrays,
// filtering out None values.
impl<F> FirestoreQueryFilterExpr for Option<F>
where
F: FirestoreQueryFilterExpr,
{
#[inline]
fn build_filter(self) -> Option<FirestoreQueryFilter> {
self.and_then(|expr| expr.build_filter())
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/fluent_api/insert_builder.rs | src/fluent_api/insert_builder.rs | //! Builder for constructing Firestore insert (create) operations.
//!
//! This module provides a fluent API to specify the collection, document ID (optional),
//! and the data to be inserted into Firestore. It supports inserting both raw
//! [`Document`](gcloud_sdk::google::firestore::v1::Document) types and serializable Rust objects.
use crate::{FirestoreCreateSupport, FirestoreResult};
use gcloud_sdk::google::firestore::v1::Document;
use serde::{Deserialize, Serialize};
/// The initial builder for a Firestore insert operation.
///
/// Created by calling [`FirestoreExprBuilder::insert()`](crate::FirestoreExprBuilder::insert).
#[derive(Clone, Debug)]
pub struct FirestoreInsertInitialBuilder<'a, D>
where
D: FirestoreCreateSupport,
{
db: &'a D,
}
impl<'a, D> FirestoreInsertInitialBuilder<'a, D>
where
D: FirestoreCreateSupport,
{
/// Creates a new `FirestoreInsertInitialBuilder`.
#[inline]
pub(crate) fn new(db: &'a D) -> Self {
Self { db }
}
/// Specifies the collection ID into which the document will be inserted.
///
/// # Arguments
/// * `collection_id`: The ID of the target collection.
///
/// # Returns
/// A [`FirestoreInsertDocIdBuilder`] to specify the document ID or have it auto-generated.
#[inline]
pub fn into(self, collection_id: &str) -> FirestoreInsertDocIdBuilder<'a, D> {
FirestoreInsertDocIdBuilder::new(self.db, collection_id.to_string())
}
}
/// A builder for specifying the document ID for an insert operation.
///
/// This stage allows either providing a specific document ID or opting for
/// Firestore to auto-generate one.
#[derive(Clone, Debug)]
pub struct FirestoreInsertDocIdBuilder<'a, D>
where
D: FirestoreCreateSupport,
{
db: &'a D,
collection_id: String,
}
impl<'a, D> FirestoreInsertDocIdBuilder<'a, D>
where
D: FirestoreCreateSupport,
{
/// Creates a new `FirestoreInsertDocIdBuilder`.
#[inline]
pub(crate) fn new(db: &'a D, collection_id: String) -> Self {
Self { db, collection_id }
}
/// Specifies a user-defined ID for the new document.
///
/// If this ID already exists in the collection, the operation will fail.
///
/// # Arguments
/// * `document_id`: The ID to assign to the new document.
///
/// # Returns
/// A [`FirestoreInsertDocObjBuilder`] to specify the document data.
#[inline]
pub fn document_id<S>(self, document_id: S) -> FirestoreInsertDocObjBuilder<'a, D>
where
S: AsRef<str> + Send,
{
FirestoreInsertDocObjBuilder::new(
self.db,
self.collection_id,
Some(document_id.as_ref().to_string()),
)
}
/// Configures the operation to let Firestore auto-generate the document ID.
///
/// # Returns
/// A [`FirestoreInsertDocObjBuilder`] to specify the document data.
#[inline]
pub fn generate_document_id(self) -> FirestoreInsertDocObjBuilder<'a, D> {
FirestoreInsertDocObjBuilder::new(self.db, self.collection_id, None)
}
}
/// A builder for specifying the object or document data for an insert operation.
///
/// This stage also allows specifying a parent path (for sub-collections) and
/// which fields to return from the operation.
#[derive(Clone, Debug)]
pub struct FirestoreInsertDocObjBuilder<'a, D>
where
D: FirestoreCreateSupport,
{
db: &'a D,
collection_id: String,
document_id: Option<String>,
parent: Option<String>,
return_only_fields: Option<Vec<String>>,
}
impl<'a, D> FirestoreInsertDocObjBuilder<'a, D>
where
D: FirestoreCreateSupport,
{
/// Creates a new `FirestoreInsertDocObjBuilder`.
#[inline]
pub(crate) fn new(db: &'a D, collection_id: String, document_id: Option<String>) -> Self {
Self {
db,
collection_id,
document_id,
parent: None,
return_only_fields: None,
}
}
/// Specifies the parent document path for inserting a document into a sub-collection.
///
/// # Arguments
/// * `parent`: The full path to the parent document.
///
/// # Returns
/// The builder instance with the parent path set.
#[inline]
pub fn parent<S>(self, parent: S) -> Self
where
S: AsRef<str>,
{
Self {
parent: Some(parent.as_ref().to_string()),
..self
}
}
/// Specifies which fields of the newly created document should be returned.
///
/// If not set, the entire document is typically returned (behavior may depend on the server).
///
/// # Arguments
/// * `return_only_fields`: An iterator of field paths to return.
///
/// # Returns
/// The builder instance with the projection mask set.
#[inline]
pub fn return_only_fields<I>(self, return_only_fields: I) -> Self
where
I: IntoIterator,
I::Item: AsRef<str>,
{
Self {
return_only_fields: Some(
return_only_fields
.into_iter()
.map(|field| field.as_ref().to_string())
.collect(),
),
..self
}
}
/// Specifies the document data to insert as a raw [`Document`].
///
/// # Arguments
/// * `document`: The Firestore `Document` to insert.
///
/// # Returns
/// A [`FirestoreInsertDocExecuteBuilder`] to execute the operation.
#[inline]
pub fn document(self, document: Document) -> FirestoreInsertDocExecuteBuilder<'a, D> {
FirestoreInsertDocExecuteBuilder::new(
self.db,
self.collection_id.to_string(),
self.document_id,
self.parent,
document,
self.return_only_fields,
)
}
/// Specifies the document data to insert as a serializable Rust object.
///
/// The object `T` must implement `serde::Serialize`.
///
/// # Arguments
/// * `object`: A reference to the Rust object to serialize and insert.
///
/// # Type Parameters
/// * `T`: The type of the object to insert.
///
/// # Returns
/// A [`FirestoreInsertObjExecuteBuilder`] to execute the operation.
#[inline]
pub fn object<T>(self, object: &'a T) -> FirestoreInsertObjExecuteBuilder<'a, D, T>
where
T: Serialize + Sync + Send,
for<'de> T: Deserialize<'de>, // Bound for potential return type deserialization
{
FirestoreInsertObjExecuteBuilder::new(
self.db,
self.collection_id.to_string(),
self.parent,
self.document_id,
object,
self.return_only_fields,
)
}
}
/// A builder for executing an insert operation with raw [`Document`] data.
#[derive(Clone, Debug)]
pub struct FirestoreInsertDocExecuteBuilder<'a, D>
where
D: FirestoreCreateSupport,
{
db: &'a D,
collection_id: String,
document_id: Option<String>,
parent: Option<String>,
document: Document,
return_only_fields: Option<Vec<String>>,
}
impl<'a, D> FirestoreInsertDocExecuteBuilder<'a, D>
where
D: FirestoreCreateSupport,
{
/// Creates a new `FirestoreInsertDocExecuteBuilder`.
#[inline]
pub(crate) fn new(
db: &'a D,
collection_id: String,
document_id: Option<String>,
parent: Option<String>,
document: Document,
return_only_fields: Option<Vec<String>>,
) -> Self {
Self {
db,
collection_id,
document_id,
parent,
document,
return_only_fields,
}
}
/// Executes the configured insert operation.
///
/// # Returns
/// A `FirestoreResult` containing the created [`Document`].
pub async fn execute(self) -> FirestoreResult<Document> {
if let Some(parent) = self.parent {
self.db
.create_doc_at(
parent.as_str(),
self.collection_id.as_str(),
self.document_id,
self.document,
self.return_only_fields,
)
.await
} else {
self.db
.create_doc(
self.collection_id.as_str(),
self.document_id,
self.document,
self.return_only_fields,
)
.await
}
}
}
/// A builder for executing an insert operation with a serializable Rust object.
#[derive(Clone, Debug)]
pub struct FirestoreInsertObjExecuteBuilder<'a, D, T>
where
D: FirestoreCreateSupport,
T: Serialize + Sync + Send,
{
db: &'a D,
collection_id: String,
parent: Option<String>,
document_id: Option<String>,
object: &'a T,
return_only_fields: Option<Vec<String>>,
}
impl<'a, D, T> FirestoreInsertObjExecuteBuilder<'a, D, T>
where
D: FirestoreCreateSupport,
T: Serialize + Sync + Send,
{
/// Creates a new `FirestoreInsertObjExecuteBuilder`.
#[inline]
pub(crate) fn new(
db: &'a D,
collection_id: String,
parent: Option<String>,
document_id: Option<String>,
object: &'a T,
return_only_fields: Option<Vec<String>>,
) -> Self {
Self {
db,
collection_id,
parent,
document_id,
object,
return_only_fields,
}
}
/// Executes the configured insert operation, serializing the object and
/// deserializing the result into type `O`.
///
/// # Type Parameters
/// * `O`: The type to deserialize the result into. Must implement `serde::Deserialize`.
///
/// # Returns
/// A `FirestoreResult` containing the deserialized object `O`.
pub async fn execute<O>(self) -> FirestoreResult<O>
where
for<'de> O: Deserialize<'de>,
{
if let Some(parent) = self.parent {
self.db
.create_obj_at(
parent.as_str(),
self.collection_id.as_str(),
self.document_id,
self.object,
self.return_only_fields,
)
.await
} else {
self.db
.create_obj(
self.collection_id.as_str(),
self.document_id,
self.object,
self.return_only_fields,
)
.await
}
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/fluent_api/document_transform_builder.rs | src/fluent_api/document_transform_builder.rs | //! Builder for specifying field transformations in Firestore update operations.
//!
//! This module provides a fluent API to define transformations that should be
//! applied to document fields atomically on the server-side. These transformations
//! are used with update operations.
//!
//! Examples of transformations include:
//! - Setting a field to the server's timestamp.
//! - Incrementing a numeric field.
//! - Adding or removing elements from an array field.
//!
//! The main entry point is [`FirestoreTransformBuilder`], which is typically
//! accessed via a method on an update builder (e.g., [`FirestoreUpdateSetBuilder::transforms()`](crate::FirestoreUpdateSetBuilder::transforms)).
use crate::{
FirestoreFieldTransform, FirestoreFieldTransformType, FirestoreTransformServerValue,
FirestoreValue,
};
/// A builder for constructing a list of field transformations.
///
/// This builder is used within an update operation to specify atomic, server-side
/// modifications to document fields.
pub struct FirestoreTransformBuilder {}
impl FirestoreTransformBuilder {
/// Creates a new `FirestoreTransformBuilder`.
/// This is typically not called directly but obtained from an update builder.
pub(crate) fn new() -> Self {
Self {}
}
/// Builds a `Vec` of [`FirestoreFieldTransform`] from a collection of transform expressions.
///
/// This method takes an iterator of items that implement [`FirestoreTransformExpr`]
/// (typically created using [`FirestoreTransformBuilder::field()`] and its chained methods)
/// and collects them into a vector of transformations.
///
/// `Option<FirestoreFieldTransform>` expressions are filtered, so `None` values are ignored.
///
/// # Arguments
/// * `transform_field_expr`: An iterator of transform expressions.
///
/// # Returns
/// A `Vec<FirestoreFieldTransform>` ready to be used in an update operation.
#[inline]
pub fn fields<I>(&self, transform_field_expr: I) -> Vec<FirestoreFieldTransform>
where
I: IntoIterator,
I::Item: FirestoreTransformExpr,
{
transform_field_expr
.into_iter()
.filter_map(|filter| filter.build_transform())
.collect()
}
/// Specifies a field to apply a transformation to.
///
/// # Arguments
/// * `field_name`: The dot-separated path to the field.
///
/// # Returns
/// A [`FirestoreTransformFieldExpr`] to specify the type of transformation.
#[inline]
pub fn field<S>(&self, field_name: S) -> FirestoreTransformFieldExpr
where
S: AsRef<str>,
{
FirestoreTransformFieldExpr::new(field_name.as_ref().to_string())
}
}
/// A trait for types that can be converted into a [`FirestoreFieldTransform`].
///
/// This is used by [`FirestoreTransformBuilder::fields()`] to allow various ways
/// of defining transformations, including optional ones.
pub trait FirestoreTransformExpr {
/// Builds the [`FirestoreFieldTransform`].
/// Returns `None` if the expression represents an empty or no-op transform.
fn build_transform(self) -> Option<FirestoreFieldTransform>;
}
/// Represents a specific field targeted for a transformation.
///
/// This struct provides methods to define the actual transformation to be applied
/// to the field (e.g., increment, set to server value).
pub struct FirestoreTransformFieldExpr {
field_name: String,
}
impl FirestoreTransformFieldExpr {
/// Creates a new `FirestoreTransformFieldExpr` for the given field name.
pub(crate) fn new(field_name: String) -> Self {
Self { field_name }
}
/// Specifies an "increment" transformation.
///
/// Atomically increments the numeric value of the field by the given value.
/// The value must be an integer or a double.
///
/// # Arguments
/// * `value`: The value to increment by, convertible to [`FirestoreValue`].
///
/// # Returns
/// An `Option<FirestoreFieldTransform>` representing this transformation.
#[inline]
pub fn increment<V>(self, value: V) -> Option<FirestoreFieldTransform>
where
V: Into<FirestoreValue>,
{
Some(FirestoreFieldTransform::new(
self.field_name,
FirestoreFieldTransformType::Increment(value.into()),
))
}
/// Specifies a "maximum" transformation.
///
/// Atomically sets the field to the maximum of its current value and the given value.
///
/// # Arguments
/// * `value`: The value to compare with, convertible to [`FirestoreValue`].
///
/// # Returns
/// An `Option<FirestoreFieldTransform>` representing this transformation.
#[inline]
pub fn maximum<V>(self, value: V) -> Option<FirestoreFieldTransform>
where
V: Into<FirestoreValue>,
{
Some(FirestoreFieldTransform::new(
self.field_name,
FirestoreFieldTransformType::Maximum(value.into()),
))
}
/// Specifies a "minimum" transformation.
///
/// Atomically sets the field to the minimum of its current value and the given value.
///
/// # Arguments
/// * `value`: The value to compare with, convertible to [`FirestoreValue`].
///
/// # Returns
/// An `Option<FirestoreFieldTransform>` representing this transformation.
#[inline]
pub fn minimum<V>(self, value: V) -> Option<FirestoreFieldTransform>
where
V: Into<FirestoreValue>,
{
Some(FirestoreFieldTransform::new(
self.field_name,
FirestoreFieldTransformType::Minimum(value.into()),
))
}
/// Specifies a "set to server value" transformation.
///
/// Sets the field to a server-generated value, most commonly the request timestamp.
///
/// # Arguments
/// * `value`: The [`FirestoreTransformServerValue`] to set (e.g., `RequestTime`).
///
/// # Returns
/// An `Option<FirestoreFieldTransform>` representing this transformation.
#[inline]
pub fn server_value(
self,
value: FirestoreTransformServerValue,
) -> Option<FirestoreFieldTransform> {
Some(FirestoreFieldTransform::new(
self.field_name,
FirestoreFieldTransformType::SetToServerValue(value),
))
}
/// Specifies an "append missing elements" transformation for an array field.
///
/// Atomically adds elements to the end of an array field, but only if they are
/// not already present in the array.
///
/// # Arguments
/// * `values`: An iterator of items convertible to [`FirestoreValue`] to append.
///
/// # Returns
/// An `Option<FirestoreFieldTransform>` representing this transformation.
#[inline]
pub fn append_missing_elements<I>(self, values: I) -> Option<FirestoreFieldTransform>
where
I: IntoIterator,
I::Item: Into<FirestoreValue>,
{
Some(FirestoreFieldTransform::new(
self.field_name,
FirestoreFieldTransformType::AppendMissingElements(
values.into_iter().map(|m| m.into()).collect(),
),
))
}
/// Specifies a "remove all from array" transformation for an array field.
///
/// Atomically removes all instances of the given elements from an array field.
///
/// # Arguments
/// * `values`: An iterator of items convertible to [`FirestoreValue`] to remove.
///
/// # Returns
/// An `Option<FirestoreFieldTransform>` representing this transformation.
#[inline]
pub fn remove_all_from_array<I>(self, values: I) -> Option<FirestoreFieldTransform>
where
I: IntoIterator,
I::Item: Into<FirestoreValue>,
{
Some(FirestoreFieldTransform::new(
self.field_name,
FirestoreFieldTransformType::RemoveAllFromArray(
values.into_iter().map(|m| m.into()).collect(),
),
))
}
}
impl FirestoreTransformExpr for FirestoreFieldTransform {
#[inline]
fn build_transform(self) -> Option<FirestoreFieldTransform> {
Some(self)
}
}
// Allows using Option<FirestoreFieldTransform> in the fields array,
// filtering out None values.
impl<F> FirestoreTransformExpr for Option<F>
where
F: FirestoreTransformExpr,
{
#[inline]
fn build_transform(self) -> Option<FirestoreFieldTransform> {
self.and_then(|expr| expr.build_transform())
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/fluent_api/tests/mockdb.rs | src/fluent_api/tests/mockdb.rs | use crate::*;
use async_trait::async_trait;
use futures::future::BoxFuture;
use futures::stream::BoxStream;
use gcloud_sdk::google::firestore::v1::{Document, ListenResponse};
use serde::{Deserialize, Serialize};
#[derive(Clone)]
pub struct MockDatabase;
#[allow(unused)]
#[async_trait]
impl FirestoreQuerySupport for MockDatabase {
async fn query_doc(&self, _params: FirestoreQueryParams) -> FirestoreResult<Vec<Document>> {
unreachable!()
}
async fn stream_query_doc<'b>(
&self,
_params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, Document>> {
unreachable!()
}
async fn stream_query_doc_with_errors<'b>(
&self,
_params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>> {
unreachable!()
}
async fn stream_query_doc_with_metadata<'b>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreWithMetadata<FirestoreDocument>>>>
{
unreachable!()
}
async fn query_obj<T>(&self, _params: FirestoreQueryParams) -> FirestoreResult<Vec<T>>
where
for<'de> T: Deserialize<'de>,
{
unreachable!()
}
async fn stream_query_obj<'b, T>(
&self,
_params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, T>>
where
for<'de> T: Deserialize<'de>,
T: 'b,
{
unreachable!()
}
async fn stream_query_obj_with_errors<'b, T>(
&self,
_params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'b,
{
unreachable!()
}
async fn stream_query_obj_with_metadata<'b, T>(
&self,
params: FirestoreQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreWithMetadata<T>>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'b,
{
unreachable!()
}
fn stream_partition_cursors_with_errors(
&self,
params: FirestorePartitionQueryParams,
) -> BoxFuture<FirestoreResult<PeekableBoxStream<FirestoreResult<FirestoreQueryCursor>>>> {
unreachable!()
}
async fn stream_partition_query_doc_with_errors(
&self,
parallelism: usize,
partition_params: FirestorePartitionQueryParams,
) -> FirestoreResult<BoxStream<FirestoreResult<(FirestorePartition, Document)>>> {
unreachable!()
}
async fn stream_partition_query_obj_with_errors<'a, T>(
&'a self,
parallelism: usize,
partition_params: FirestorePartitionQueryParams,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(FirestorePartition, T)>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'a,
{
unreachable!()
}
}
#[allow(unused)]
#[async_trait]
impl FirestoreCreateSupport for MockDatabase {
async fn create_doc<S>(
&self,
collection_id: &str,
document_id: Option<S>,
input_doc: Document,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send,
{
unreachable!()
}
async fn create_doc_at<S>(
&self,
parent: &str,
collection_id: &str,
document_id: Option<S>,
input_doc: Document,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send,
{
unreachable!()
}
async fn create_obj<I, O, S>(
&self,
collection_id: &str,
document_id: Option<S>,
obj: &I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send,
{
unreachable!()
}
async fn create_obj_at<I, O, S>(
&self,
parent: &str,
collection_id: &str,
document_id: Option<S>,
obj: &I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send,
{
unreachable!()
}
}
#[allow(unused)]
#[async_trait]
impl FirestoreUpdateSupport for MockDatabase {
async fn update_obj<I, O, S>(
&self,
collection_id: &str,
document_id: S,
obj: &I,
update_only: Option<Vec<String>>,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send,
{
unreachable!()
}
async fn update_obj_at<I, O, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
obj: &I,
update_only: Option<Vec<String>>,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<O>
where
I: Serialize + Sync + Send,
for<'de> O: Deserialize<'de>,
S: AsRef<str> + Send,
{
unreachable!()
}
async fn update_doc(
&self,
collection_id: &str,
firestore_doc: Document,
update_only: Option<Vec<String>>,
return_only_fields: Option<Vec<String>>,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<Document> {
unreachable!()
}
}
#[allow(unused)]
#[async_trait]
impl FirestoreDeleteSupport for MockDatabase {
async fn delete_by_id<S>(
&self,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<()>
where
S: AsRef<str> + Send,
{
unreachable!()
}
async fn delete_by_id_at<S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
precondition: Option<FirestoreWritePrecondition>,
) -> FirestoreResult<()>
where
S: AsRef<str> + Send,
{
unreachable!()
}
}
#[allow(unused)]
#[async_trait]
impl FirestoreListingSupport for MockDatabase {
async fn list_doc(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<FirestoreListDocResult> {
unreachable!()
}
async fn stream_list_doc<'b>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, Document>> {
unreachable!()
}
async fn stream_list_doc_with_errors<'b>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>> {
unreachable!()
}
async fn stream_list_obj<'b, T>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, T>>
where
for<'de> T: Deserialize<'de> + 'b,
{
unreachable!()
}
async fn stream_list_obj_with_errors<'b, T>(
&self,
params: FirestoreListDocParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
for<'de> T: Deserialize<'de> + 'b,
{
unreachable!()
}
async fn list_collection_ids(
&self,
params: FirestoreListCollectionIdsParams,
) -> FirestoreResult<FirestoreListCollectionIdsResult> {
unreachable!()
}
async fn stream_list_collection_ids_with_errors(
&self,
params: FirestoreListCollectionIdsParams,
) -> FirestoreResult<BoxStream<FirestoreResult<String>>> {
unreachable!()
}
async fn stream_list_collection_ids(
&self,
params: FirestoreListCollectionIdsParams,
) -> FirestoreResult<BoxStream<String>> {
unreachable!()
}
}
#[allow(unused)]
#[async_trait]
impl FirestoreGetByIdSupport for MockDatabase {
async fn get_doc<S>(
&self,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send,
{
unreachable!()
}
async fn get_doc_at<S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Document>
where
S: AsRef<str> + Send,
{
unreachable!()
}
async fn get_obj<T, S>(&self, collection_id: &str, document_id: S) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
unreachable!()
}
async fn get_obj_at<T, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
unreachable!()
}
async fn get_obj_at_return_fields<T, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
unreachable!()
}
async fn get_obj_if_exists<T, S>(
&self,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Option<T>>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
unreachable!()
}
async fn get_obj_at_if_exists<T, S>(
&self,
parent: &str,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<Option<T>>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
unreachable!()
}
async fn batch_stream_get_docs_at<S, I>(
&self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<(String, Option<Document>)>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
unreachable!()
}
async fn batch_stream_get_objects<'a, T, S, I>(
&'a self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, (String, Option<T>)>>
where
for<'de> T: Deserialize<'de> + 'a,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
unreachable!()
}
async fn batch_stream_get_docs_at_with_errors<S, I>(
&self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<FirestoreResult<(String, Option<Document>)>>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
unreachable!()
}
async fn batch_stream_get_objects_with_errors<'a, T, S, I>(
&'a self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(String, Option<T>)>>>
where
for<'de> T: Deserialize<'de> + Send + 'a,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
unreachable!()
}
async fn batch_stream_get_docs<S, I>(
&self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<(String, Option<Document>)>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
unreachable!()
}
async fn batch_stream_get_docs_with_errors<S, I>(
&self,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<FirestoreResult<(String, Option<Document>)>>>
where
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
unreachable!()
}
async fn batch_stream_get_objects_at<'a, T, S, I>(
&'a self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, (String, Option<T>)>>
where
for<'de> T: Deserialize<'de> + Send + 'a,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
unreachable!()
}
async fn batch_stream_get_objects_at_with_errors<'a, T, S, I>(
&'a self,
parent: &str,
collection_id: &str,
document_ids: I,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(String, Option<T>)>>>
where
for<'de> T: Deserialize<'de> + Send,
S: AsRef<str> + Send,
I: IntoIterator<Item = S> + Send,
{
unreachable!()
}
async fn get_obj_return_fields<T, S>(
&self,
collection_id: &str,
document_id: S,
return_only_fields: Option<Vec<String>>,
) -> FirestoreResult<T>
where
for<'de> T: Deserialize<'de>,
S: AsRef<str> + Send,
{
unreachable!()
}
}
#[allow(unused)]
#[async_trait]
impl FirestoreListenSupport for MockDatabase {
async fn listen_doc_changes<'a, 'b>(
&'a self,
targets: Vec<FirestoreListenerTargetParams>,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<ListenResponse>>> {
unreachable!()
}
}
#[allow(unused)]
#[async_trait]
impl FirestoreAggregatedQuerySupport for MockDatabase {
async fn aggregated_query_doc(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<Vec<Document>> {
unreachable!()
}
async fn stream_aggregated_query_doc<'b>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, Document>> {
unreachable!()
}
async fn stream_aggregated_query_doc_with_errors<'b>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<Document>>> {
unreachable!()
}
async fn aggregated_query_obj<T>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<Vec<T>>
where
for<'de> T: Deserialize<'de>,
{
unreachable!()
}
async fn stream_aggregated_query_obj<'b, T>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, T>>
where
for<'de> T: Deserialize<'de>,
{
unreachable!()
}
async fn stream_aggregated_query_obj_with_errors<'b, T>(
&self,
params: FirestoreAggregatedQueryParams,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<T>>>
where
for<'de> T: Deserialize<'de>,
T: Send + 'b,
{
unreachable!()
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/transform_tests.rs | tests/transform_tests.rs | use crate::common::setup;
use firestore::*;
mod common;
#[tokio::test]
async fn crud_tests() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
const TEST_COLLECTION_NAME: &str = "integration-test-transform";
let db = setup().await?;
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id("test-t0")
.execute()
.await?;
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id("test-t0")
.document(FirestoreDb::serialize_map_to_doc(
"",
[(
"bar",
FirestoreValue::from_map([("123", ["inner-value"].into())]),
)],
)?)
.execute()
.await?;
let mut transaction = db.begin_transaction().await?;
db.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id("test-t0")
.transforms(|t| t.fields([t.field("bar.`123`").append_missing_elements(["987654321"])]))
.only_transform()
.add_to_transaction(&mut transaction)?;
transaction.commit().await?;
let doc_returned = db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.one("test-t0")
.await?;
assert_eq!(
doc_returned.map(|d| d.fields),
Some(
FirestoreDb::serialize_map_to_doc(
"",
[(
"bar",
FirestoreValue::from_map([("123", ["inner-value", "987654321"].into())]),
)],
)?
.fields
)
);
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/create-option-tests.rs | tests/create-option-tests.rs | use crate::common::setup;
use serde::{Deserialize, Serialize};
mod common;
use firestore::*;
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyTestStructure {
some_id: String,
some_string: Option<String>,
}
#[tokio::test]
async fn crud_tests() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
const TEST_COLLECTION_NAME: &str = "integration-test-options";
let db = setup().await?;
let my_struct1 = MyTestStructure {
some_id: "test-0".to_string(),
some_string: Some("some_string".to_string()),
};
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct1.some_id)
.execute()
.await?;
let object_returned: MyTestStructure = db
.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct1.some_id)
.object(&my_struct1)
.execute()
.await?;
assert_eq!(object_returned, my_struct1);
let object_updated: MyTestStructure = db
.fluent()
.update()
.fields(paths!(MyTestStructure::{some_string}))
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct1.some_id)
.object(&MyTestStructure {
some_string: None,
..my_struct1.clone()
})
.execute()
.await?;
assert_eq!(
object_updated,
MyTestStructure {
some_string: None,
..my_struct1.clone()
}
);
let find_it_again: Option<MyTestStructure> = db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one(&my_struct1.some_id)
.await?;
assert_eq!(Some(object_updated), find_it_again);
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/macro_path_test.rs | tests/macro_path_test.rs | #[test]
fn test_ambiguous_path_macro() {
struct MyTestStructure {
some_id: String,
some_num: u64,
}
assert_eq!(firestore::path!(MyTestStructure::some_id), "some_id");
assert_eq!(
firestore::paths!(MyTestStructure::{some_id, some_num}),
vec!["some_id".to_string(), "some_num".to_string()]
);
assert_eq!(
firestore::path_camel_case!(MyTestStructure::some_id),
"someId"
);
assert_eq!(
firestore::paths_camel_case!(MyTestStructure::{some_id, some_num}),
vec!["someId".to_string(), "someNum".to_string()]
);
}
mod struct_path {
#[macro_export]
macro_rules! path {
() => {
unreachable!()
};
}
#[macro_export]
macro_rules! paths {
($($x:tt)*) => {{
unreachable!()
}};
}
#[macro_export]
macro_rules! path_camel_case {
($($x:tt)*) => {{
unreachable!()
}};
}
#[macro_export]
macro_rules! paths_camel_case {
($($x:tt)*) => {{
unreachable!()
}};
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/caching_persistent_test.rs | tests/caching_persistent_test.rs | use crate::common::{eventually_async, populate_collection, setup};
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
use std::time::Duration;
use tokio::time::sleep;
mod common;
use firestore::*;
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyTestStructure {
some_id: String,
some_num: u64,
some_string: String,
}
#[tokio::test]
async fn precondition_tests() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let db = setup().await?;
const TEST_COLLECTION_NAME_NO_PRELOAD: &'static str =
"integration-test-caching-persistent-no-preload";
const TEST_COLLECTION_NAME_PRELOAD: &'static str =
"integration-test-caching-persistent-preload";
populate_collection(
&db,
TEST_COLLECTION_NAME_NO_PRELOAD,
10,
|i| MyTestStructure {
some_id: format!("test-{}", i),
some_num: i as u64,
some_string: format!("Test value {}", i),
},
|ms| ms.some_id.clone(),
)
.await?;
populate_collection(
&db,
TEST_COLLECTION_NAME_PRELOAD,
10,
|i| MyTestStructure {
some_id: format!("test-{}", i),
some_num: i as u64,
some_string: format!("Test value {}", i),
},
|ms| ms.some_id.clone(),
)
.await?;
sleep(Duration::from_secs(1)).await;
let temp_state_dir = tempfile::tempdir()?;
let temp_db_dir = tempfile::tempdir()?;
let mut cache = FirestoreCache::new(
"example-persistent-cache".into(),
&db,
FirestorePersistentCacheBackend::with_options(
FirestoreCacheConfiguration::new()
.add_collection_config(
&db,
FirestoreCacheCollectionConfiguration::new(
TEST_COLLECTION_NAME_NO_PRELOAD,
FirestoreListenerTarget::new(1000),
FirestoreCacheCollectionLoadMode::PreloadNone,
),
)
.add_collection_config(
&db,
FirestoreCacheCollectionConfiguration::new(
TEST_COLLECTION_NAME_PRELOAD,
FirestoreListenerTarget::new(1001),
FirestoreCacheCollectionLoadMode::PreloadAllDocs,
),
),
temp_db_dir.keep().join("redb"),
)?,
FirestoreTempFilesListenStateStorage::with_temp_dir(temp_state_dir.keep()),
)
.await?;
cache.load().await?;
let my_struct: Option<MyTestStructure> = db
.read_cached_only(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME_NO_PRELOAD)
.obj()
.one("test-0")
.await?;
assert!(my_struct.is_none());
let my_struct: Option<MyTestStructure> = db
.read_cached_only(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME_PRELOAD)
.obj()
.one("test-0")
.await?;
assert!(my_struct.is_some());
db.read_through_cache(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME_NO_PRELOAD)
.obj::<MyTestStructure>()
.one("test-1")
.await?;
let my_struct: Option<MyTestStructure> = db
.read_cached_only(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME_NO_PRELOAD)
.obj()
.one("test-1")
.await?;
assert!(my_struct.is_some());
let cached_db = db.read_cached_only(&cache);
let all_items_stream = cached_db
.fluent()
.list()
.from(TEST_COLLECTION_NAME_NO_PRELOAD)
.obj::<MyTestStructure>()
.stream_all_with_errors()
.await?;
let all_items = all_items_stream.try_collect::<Vec<_>>().await?;
assert_eq!(all_items.len(), 1);
let all_items_stream = cached_db
.fluent()
.list()
.from(TEST_COLLECTION_NAME_PRELOAD)
.obj::<MyTestStructure>()
.stream_all_with_errors()
.await?;
let all_items = all_items_stream.try_collect::<Vec<_>>().await?;
assert_eq!(all_items.len(), 10);
db.fluent()
.update()
.fields(paths!(MyTestStructure::some_string))
.in_col(TEST_COLLECTION_NAME_PRELOAD)
.document_id("test-2")
.object(&MyTestStructure {
some_id: "test-2".to_string(),
some_num: 2,
some_string: "updated".to_string(),
})
.execute::<()>()
.await?;
let cached_db = db.read_cached_only(&cache);
assert!(
eventually_async(10, Duration::from_millis(500), move || {
let cached_db = cached_db.clone();
async move {
let my_struct: Option<MyTestStructure> = cached_db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME_PRELOAD)
.obj()
.one("test-2")
.await?;
if let Some(my_struct) = my_struct {
return Ok(my_struct.some_string.as_str() == "updated");
}
Ok(false)
}
})
.await?
);
let cached_db = db.read_cached_only(&cache);
let queried = cached_db
.fluent()
.select()
.from(TEST_COLLECTION_NAME_PRELOAD)
.filter(|q| {
q.for_all([q
.field(path!(MyTestStructure::some_num))
.greater_than_or_equal(5)])
})
.order_by([(
path!(MyTestStructure::some_num),
FirestoreQueryDirection::Descending,
)])
.obj::<MyTestStructure>()
.stream_query_with_errors()
.await?;
let queried_items = queried.try_collect::<Vec<_>>().await?;
assert_eq!(queried_items.len(), 5);
assert_eq!(queried_items.first().map(|d| d.some_num), Some(9));
cache.shutdown().await?;
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/nested-collections-tests.rs | tests/nested-collections-tests.rs | use crate::common::setup;
use serde::{Deserialize, Serialize};
mod common;
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyParentStructure {
some_id: String,
some_string: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyChildStructure {
some_id: String,
another_string: String,
}
#[tokio::test]
async fn crud_tests() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
const TEST_PARENT_COLLECTION_NAME: &str = "integration-nested-test";
const TEST_CHILD_COLLECTION_NAME: &str = "integration-test-childs";
let db = setup().await?;
let parent_struct = MyParentStructure {
some_id: "test-parent".to_string(),
some_string: "Test".to_string(),
};
// Remove if it already exist
db.fluent()
.delete()
.from(TEST_PARENT_COLLECTION_NAME)
.document_id(&parent_struct.some_id)
.execute()
.await?;
// Creating a parent doc
db.fluent()
.insert()
.into(TEST_PARENT_COLLECTION_NAME)
.document_id(&parent_struct.some_id)
.object(&parent_struct)
.execute::<()>()
.await?;
// Creating a child doc
let child_struct = MyChildStructure {
some_id: "test-child".to_string(),
another_string: "TestChild".to_string(),
};
// The doc path where we store our childs
let parent_path = db.parent_path(TEST_PARENT_COLLECTION_NAME, &parent_struct.some_id)?;
// Remove child doc if exists
db.fluent()
.delete()
.from(TEST_CHILD_COLLECTION_NAME)
.parent(&parent_path)
.document_id(&child_struct.some_id)
.execute()
.await?;
// Create a child doc
db.fluent()
.insert()
.into(TEST_CHILD_COLLECTION_NAME)
.document_id(&child_struct.some_id)
.parent(&parent_path)
.object(&child_struct)
.execute::<()>()
.await?;
let find_parent: Option<MyParentStructure> = db
.fluent()
.select()
.by_id_in(TEST_PARENT_COLLECTION_NAME)
.obj()
.one(&parent_struct.some_id)
.await?;
assert_eq!(find_parent, Some(parent_struct));
let find_child: Option<MyChildStructure> = db
.fluent()
.select()
.by_id_in(TEST_CHILD_COLLECTION_NAME)
.parent(&parent_path)
.obj()
.one(&child_struct.some_id)
.await?;
assert_eq!(find_child, Some(child_struct));
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/transaction-tests.rs | tests/transaction-tests.rs | use crate::common::setup;
use serde::{Deserialize, Serialize};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
mod common;
use firestore::*;
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyTestStructure {
some_id: String,
some_string: String,
}
#[tokio::test]
async fn transaction_tests() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let db = setup().await?;
const TEST_COLLECTION_NAME: &str = "integration-test-transactions";
let my_struct = MyTestStructure {
some_id: "test-1".to_string(),
some_string: "Test".to_string(),
};
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
let object_created: MyTestStructure = db
.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.precondition(FirestoreWritePrecondition::Exists(false))
.document_id(&my_struct.some_id)
.object(&my_struct.clone())
.execute()
.await?;
assert_eq!(object_created, my_struct);
{
let transaction = db.begin_transaction().await?;
let db = db.clone_with_consistency_selector(FirestoreConsistencySelector::Transaction(
transaction.transaction_id().clone(),
));
db.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj::<MyTestStructure>()
.one(&my_struct.some_id)
.await?;
transaction.commit().await?;
}
{
let transaction = db.begin_transaction().await?;
let db = db.clone_with_consistency_selector(FirestoreConsistencySelector::Transaction(
transaction.transaction_id().clone(),
));
let object_updated: MyTestStructure = db
.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.precondition(FirestoreWritePrecondition::Exists(true))
.document_id(&my_struct.some_id)
.object(&my_struct.clone())
.execute()
.await?;
transaction.commit().await?;
assert_eq!(object_updated, my_struct);
}
// Handling permanent errors
{
let res: FirestoreResult<()> = db
.run_transaction(|_db, _tx| {
Box::pin(async move {
//Test returning an error
Err(backoff::Error::Permanent(common::CustomUserError::new(
"test error",
)))
})
})
.await;
assert!(res.is_err());
}
// Handling transient errors
{
let counter = Arc::new(AtomicUsize::new(1));
let res: FirestoreResult<()> = db
.run_transaction(|_db, _tx| {
let counter = counter.fetch_add(1, Ordering::Relaxed);
Box::pin(async move {
if counter > 2 {
return Ok(());
}
//Test returning an error
Err(backoff::Error::Transient {
err: common::CustomUserError::new("test error"),
retry_after: None,
})
})
})
.await;
assert!(res.is_ok());
}
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/complex-structure-serialize.rs | tests/complex-structure-serialize.rs | use approx::relative_eq;
use chrono::{DateTime, Utc};
use firestore::*;
use serde::{Deserialize, Serialize};
mod common;
use crate::common::setup;
#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
pub struct Test1(pub u8);
#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
pub struct Test1i(pub Test1);
#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
pub struct Test2 {
some_id: String,
some_bool: Option<bool>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub enum TestEnum {
TestChoice,
TestWithParam(String),
TestWithMultipleParams(String, String),
TestWithStruct(Test2),
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
struct MyTestStructure {
some_id: String,
some_string: String,
some_num: u64,
#[serde(with = "firestore::serialize_as_timestamp")]
created_at: DateTime<Utc>,
#[serde(default)]
#[serde(with = "firestore::serialize_as_optional_timestamp")]
updated_at: Option<DateTime<Utc>>,
#[serde(default)]
#[serde(with = "firestore::serialize_as_null_timestamp")]
updated_at_as_null: Option<DateTime<Utc>>,
test1: Test1,
test1i: Test1i,
test11: Option<Test1>,
test2: Option<Test2>,
test3: Vec<Test2>,
test4: TestEnum,
test5: (TestEnum, TestEnum),
test6: TestEnum,
test7: TestEnum,
#[serde(default)]
#[serde(with = "firestore::serialize_as_null")]
test_null1: Option<String>,
#[serde(default)]
#[serde(with = "firestore::serialize_as_null")]
test_null2: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyFloatStructure {
some_f32: f32,
some_f64: f64,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyVectorStructure {
some_vec: FirestoreVector,
}
#[tokio::test]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let db = setup().await?;
const TEST_COLLECTION_NAME: &str = "integration-test-complex";
let my_struct = MyTestStructure {
some_id: "test-1".to_string(),
some_string: "Test".to_string(),
some_num: 41,
created_at: Utc::now(),
updated_at: None,
updated_at_as_null: None,
test1: Test1(1),
test1i: Test1i(Test1(1)),
test11: Some(Test1(1)),
test2: Some(Test2 {
some_id: "test-1".to_string(),
some_bool: Some(true),
}),
test3: vec![
Test2 {
some_id: "test-2".to_string(),
some_bool: Some(false),
},
Test2 {
some_id: "test-2".to_string(),
some_bool: Some(true),
},
],
test4: TestEnum::TestChoice,
test5: (TestEnum::TestChoice, TestEnum::TestChoice),
test6: TestEnum::TestWithMultipleParams("ss".to_string(), "ss".to_string()),
test7: TestEnum::TestWithStruct(Test2 {
some_id: "test-2".to_string(),
some_bool: Some(true),
}),
test_null1: None,
test_null2: Some("Test".to_string()),
};
// Remove if it already exist
db.delete_by_id(TEST_COLLECTION_NAME, &my_struct.some_id, None)
.await?;
// Let's insert some data
db.create_obj::<_, (), _>(
TEST_COLLECTION_NAME,
Some(&my_struct.some_id),
&my_struct,
None,
)
.await?;
let to_update = MyTestStructure {
some_num: my_struct.some_num + 1,
some_string: "updated-value".to_string(),
..my_struct.clone()
};
// Update some field in it
let updated_obj: MyTestStructure = db
.update_obj(
TEST_COLLECTION_NAME,
&my_struct.some_id,
&to_update,
Some(paths!(MyTestStructure::{
some_num,
some_string
})),
None,
None,
)
.await?;
// Get object by id
let find_it_again: MyTestStructure =
db.get_obj(TEST_COLLECTION_NAME, &my_struct.some_id).await?;
assert_eq!(updated_obj.some_num, to_update.some_num);
println!("updated_obj.some_num: {:?}", to_update.some_num);
assert_eq!(updated_obj.some_string, to_update.some_string);
assert_eq!(updated_obj.test1, to_update.test1);
assert_eq!(updated_obj.some_num, find_it_again.some_num);
assert_eq!(updated_obj.some_string, find_it_again.some_string);
assert_eq!(updated_obj.test1, find_it_again.test1);
let my_float_structure = MyFloatStructure {
some_f32: 42.0,
some_f64: 42.0,
};
let my_float_structure_returned: MyFloatStructure = db
.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id("test-floats")
.object(&my_float_structure)
.execute()
.await?;
assert!(relative_eq!(
my_float_structure_returned.some_f32,
my_float_structure.some_f32
));
assert!(relative_eq!(
my_float_structure_returned.some_f64,
my_float_structure.some_f64
));
let my_vector_structure = MyVectorStructure {
some_vec: FirestoreVector::new(vec![1.0, 2.0, 3.0]),
};
let my_vector_structure_returned: MyVectorStructure = db
.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id("test-vectors")
.object(&my_vector_structure)
.execute()
.await?;
assert_eq!(
my_vector_structure.some_vec,
my_vector_structure_returned.some_vec
);
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/query-integration-tests.rs | tests/query-integration-tests.rs | use crate::common::setup;
use chrono::prelude::*;
use futures::stream::BoxStream;
use futures::StreamExt;
use serde::{Deserialize, Serialize};
mod common;
use firestore::*;
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::test]
async fn crud_tests() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
const TEST_COLLECTION_NAME: &str = "integration-test-query";
let db = setup().await?;
let my_struct1 = MyTestStructure {
some_id: "test-0".to_string(),
some_string: "some_string".to_string(),
one_more_string: "one_more_string".to_string(),
some_num: 42,
created_at: Utc::now(),
};
let my_struct2 = MyTestStructure {
some_id: "test-1".to_string(),
some_string: "some_string-1".to_string(),
one_more_string: "one_more_string-1".to_string(),
some_num: 17,
created_at: Utc::now(),
};
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct1.some_id)
.execute()
.await?;
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct2.some_id)
.execute()
.await?;
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct1.some_id)
.object(&my_struct1)
.execute::<()>()
.await?;
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct2.some_id)
.object(&my_struct2)
.execute::<()>()
.await?;
let object_stream: BoxStream<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.filter(|q| {
q.for_all([
q.field(path!(MyTestStructure::some_num)).is_not_null(),
q.field(path!(MyTestStructure::some_string))
.eq("some_string"),
])
})
.order_by([(
path!(MyTestStructure::some_num),
FirestoreQueryDirection::Descending,
)])
.obj()
.stream_query()
.await?;
let objects_as_vec1: Vec<MyTestStructure> = object_stream.collect().await;
assert_eq!(objects_as_vec1, vec![my_struct1.clone()]);
let object_stream: BoxStream<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.filter(|q| {
q.for_any([
q.field(path!(MyTestStructure::some_string))
.eq("some_string"),
q.field(path!(MyTestStructure::some_string))
.eq("some_string-1"),
])
})
.order_by([(
path!(MyTestStructure::some_num),
FirestoreQueryDirection::Descending,
)])
.obj()
.stream_query()
.await?;
let objects_as_vec2: Vec<MyTestStructure> = object_stream.collect().await;
assert_eq!(objects_as_vec2, vec![my_struct1, my_struct2]);
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/caching_memory_test.rs | tests/caching_memory_test.rs | use crate::common::{eventually_async, populate_collection, setup};
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
use std::time::Duration;
mod common;
use firestore::*;
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyTestStructure {
some_id: String,
some_string: String,
}
#[tokio::test]
async fn precondition_tests() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let db = setup().await?;
const TEST_COLLECTION_NAME_NO_PRELOAD: &'static str = "integration-test-caching-mem-no-preload";
const TEST_COLLECTION_NAME_PRELOAD: &'static str = "integration-test-caching-mem-preload";
populate_collection(
&db,
TEST_COLLECTION_NAME_NO_PRELOAD,
10,
|i| MyTestStructure {
some_id: format!("test-{}", i),
some_string: format!("Test value {}", i),
},
|ms| ms.some_id.clone(),
)
.await?;
populate_collection(
&db,
TEST_COLLECTION_NAME_PRELOAD,
10,
|i| MyTestStructure {
some_id: format!("test-{}", i),
some_string: format!("Test value {}", i),
},
|ms| ms.some_id.clone(),
)
.await?;
let mut cache = FirestoreCache::new(
"example-mem-cache".into(),
&db,
FirestoreMemoryCacheBackend::new(
FirestoreCacheConfiguration::new()
.add_collection_config(
&db,
FirestoreCacheCollectionConfiguration::new(
TEST_COLLECTION_NAME_NO_PRELOAD,
FirestoreListenerTarget::new(1000),
FirestoreCacheCollectionLoadMode::PreloadNone,
),
)
.add_collection_config(
&db,
FirestoreCacheCollectionConfiguration::new(
TEST_COLLECTION_NAME_PRELOAD,
FirestoreListenerTarget::new(1001),
FirestoreCacheCollectionLoadMode::PreloadAllDocs,
),
),
)?,
FirestoreMemListenStateStorage::new(),
)
.await?;
cache.load().await?;
let my_struct: Option<MyTestStructure> = db
.read_cached_only(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME_NO_PRELOAD)
.obj()
.one("test-0")
.await?;
assert!(my_struct.is_none());
let my_struct: Option<MyTestStructure> = db
.read_cached_only(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME_PRELOAD)
.obj()
.one("test-0")
.await?;
assert!(my_struct.is_some());
db.read_through_cache(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME_NO_PRELOAD)
.obj::<MyTestStructure>()
.one("test-1")
.await?;
let my_struct: Option<MyTestStructure> = db
.read_cached_only(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME_NO_PRELOAD)
.obj()
.one("test-1")
.await?;
assert!(my_struct.is_some());
let cached_db = db.read_cached_only(&cache);
let all_items_stream = cached_db
.fluent()
.list()
.from(TEST_COLLECTION_NAME_NO_PRELOAD)
.obj::<MyTestStructure>()
.stream_all_with_errors()
.await?;
let all_items = all_items_stream.try_collect::<Vec<_>>().await?;
assert_eq!(all_items.len(), 1);
let all_items_stream = cached_db
.fluent()
.list()
.from(TEST_COLLECTION_NAME_PRELOAD)
.obj::<MyTestStructure>()
.stream_all_with_errors()
.await?;
let all_items = all_items_stream.try_collect::<Vec<_>>().await?;
assert_eq!(all_items.len(), 10);
db.fluent()
.update()
.fields(paths!(MyTestStructure::some_string))
.in_col(TEST_COLLECTION_NAME_PRELOAD)
.document_id("test-2")
.object(&MyTestStructure {
some_id: "test-2".to_string(),
some_string: "updated".to_string(),
})
.execute::<()>()
.await?;
let cached_db = db.read_cached_only(&cache);
assert!(
eventually_async(10, Duration::from_millis(500), move || {
let cached_db = cached_db.clone();
async move {
let my_struct: Option<MyTestStructure> = cached_db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME_PRELOAD)
.obj()
.one("test-2")
.await?;
if let Some(my_struct) = my_struct {
return Ok(my_struct.some_string.as_str() == "updated");
}
Ok(false)
}
})
.await?
);
cache.shutdown().await?;
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/update-precondition-test.rs | tests/update-precondition-test.rs | use crate::common::setup;
use serde::{Deserialize, Serialize};
mod common;
use firestore::*;
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyTestStructure {
some_id: String,
some_string: String,
}
#[tokio::test]
async fn precondition_tests() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let db = setup().await?;
const TEST_COLLECTION_NAME: &str = "integration-test-precondition";
let my_struct = MyTestStructure {
some_id: "test-1".to_string(),
some_string: "Test".to_string(),
};
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
let should_fail: FirestoreResult<()> = db
.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.precondition(FirestoreWritePrecondition::Exists(true))
.document_id(&my_struct.some_id)
.object(&MyTestStructure {
some_string: "created-value".to_string(),
..my_struct.clone()
})
.execute()
.await;
assert!(should_fail.is_err());
let object_created: MyTestStructure = db
.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.precondition(FirestoreWritePrecondition::Exists(false))
.document_id(&my_struct.some_id)
.object(&my_struct.clone())
.execute()
.await?;
assert_eq!(object_created, my_struct);
let object_updated: MyTestStructure = db
.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.precondition(FirestoreWritePrecondition::Exists(true))
.document_id(&my_struct.some_id)
.object(&my_struct.clone())
.execute()
.await?;
assert_eq!(object_updated, my_struct);
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/crud-integration-tests.rs | tests/crud-integration-tests.rs | use crate::common::setup;
use chrono::{DateTime, Utc};
use firestore::*;
use futures::stream::BoxStream;
use futures::StreamExt;
use serde::{Deserialize, Serialize};
mod common;
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::test]
async fn crud_tests() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
const TEST_COLLECTION_NAME: &str = "integration-test-crud";
let db = setup().await?;
let my_struct1 = MyTestStructure {
some_id: "test-0".to_string(),
some_string: "some_string".to_string(),
one_more_string: "one_more_string".to_string(),
some_num: 42,
created_at: Utc::now(),
};
let my_struct2 = MyTestStructure {
some_id: "test-1".to_string(),
some_string: "some_string-1".to_string(),
one_more_string: "one_more_string-1".to_string(),
some_num: 17,
created_at: Utc::now(),
};
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct1.some_id)
.execute()
.await?;
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct2.some_id)
.execute()
.await?;
let object_returned: MyTestStructure = db
.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct1.some_id)
.object(&my_struct1)
.execute()
.await?;
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct2.some_id)
.object(&my_struct2)
.execute::<()>()
.await?;
assert_eq!(object_returned, my_struct1);
let object_updated: MyTestStructure = db
.fluent()
.update()
.fields(paths!(MyTestStructure::{some_num, one_more_string}))
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct1.some_id)
.object(&MyTestStructure {
some_num: my_struct1.some_num + 1,
some_string: "should-not-change".to_string(),
one_more_string: "updated-value".to_string(),
..my_struct1.clone()
})
.execute()
.await?;
assert_eq!(
object_updated,
MyTestStructure {
some_num: my_struct1.some_num + 1,
one_more_string: "updated-value".to_string(),
..my_struct1.clone()
}
);
let find_it_again: Option<MyTestStructure> = db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one(&my_struct1.some_id)
.await?;
assert_eq!(Some(object_updated.clone()), find_it_again);
let get_both_stream: BoxStream<Option<MyTestStructure>> = Box::pin(
db.batch_stream_get_objects(
TEST_COLLECTION_NAME,
[&my_struct1.some_id, &my_struct2.some_id],
None,
)
.await?
.map(|(_, obj)| obj),
);
let get_both_stream_vec: Vec<Option<MyTestStructure>> = get_both_stream.collect().await;
assert_eq!(vec![find_it_again, Some(my_struct2)], get_both_stream_vec);
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/tests/common/mod.rs | tests/common/mod.rs | use firestore::*;
use futures::future::BoxFuture;
use futures::FutureExt;
use serde::{Deserialize, Serialize};
use std::future::Future;
use std::ops::Mul;
use tokio::time::sleep;
use tracing::*;
#[allow(dead_code)]
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
#[allow(dead_code)]
pub async fn setup() -> Result<FirestoreDb, Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let filter =
tracing_subscriber::EnvFilter::builder().parse("info,firestore=debug,gcloud_sdk=debug")?;
let subscriber = tracing_subscriber::fmt().with_env_filter(filter).finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("GCP_PROJECT")?).await?;
Ok(db)
}
#[allow(dead_code)]
pub async fn populate_collection<T, DF>(
db: &FirestoreDb,
collection_name: &str,
max_items: usize,
sf: fn(usize) -> T,
df: DF,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>>
where
T: Serialize + Send + Sync + 'static,
for<'de> T: Deserialize<'de>,
DF: Fn(&T) -> String,
{
info!(collection_name, "Populating collection.");
let batch_writer = db.create_simple_batch_writer().await?;
let mut current_batch = batch_writer.new_batch();
for i in 0..max_items {
let my_struct = sf(i);
// Let's insert some data
db.fluent()
.update()
.in_col(collection_name)
.document_id(df(&my_struct).as_str())
.object(&my_struct)
.add_to_batch(&mut current_batch)?;
}
current_batch.write().await?;
Ok(())
}
#[allow(dead_code)]
pub fn eventually_async<'a, F, FN>(
max_retries: u32,
sleep_duration: std::time::Duration,
f: FN,
) -> BoxFuture<'a, Result<bool, Box<dyn std::error::Error + Send + Sync>>>
where
FN: Fn() -> F + Send + Sync + 'a,
F: Future<Output = Result<bool, Box<dyn std::error::Error + Send + Sync>>> + Send + 'a,
{
async move {
let mut retries = 0;
loop {
if f().await? {
return Ok(true);
}
retries += 1;
if retries > max_retries {
return Ok(false);
}
sleep(sleep_duration.mul(retries * retries)).await;
}
}
.boxed()
}
#[derive(Debug)]
pub struct CustomUserError {
details: String,
}
#[allow(dead_code)]
impl CustomUserError {
pub fn new(msg: &str) -> CustomUserError {
CustomUserError {
details: msg.to_string(),
}
}
}
impl std::fmt::Display for CustomUserError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.details)
}
}
impl std::error::Error for CustomUserError {
fn description(&self) -> &str {
&self.details
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/reference.rs | examples/reference.rs | use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_ref: FirestoreReference,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-reference";
let my_struct = MyTestStructure {
some_id: "test-1".to_string(),
some_ref: db.parent_path("test-latlng", "test-1")?.into(),
};
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// A fluent version of create document/object
let object_returned: MyTestStructure = db
.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute()
.await?;
println!("Created: {object_returned:?}");
// Query our data
let objects1: Vec<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.obj()
.query()
.await?;
println!("Now in the list: {objects1:?}");
let (parent_path, collection_name, document_id) = objects1
.first()
.unwrap()
.some_ref
.split(db.get_documents_path());
println!("Document ID: {document_id}");
println!("Collection name: {collection_name:?}");
println!("Parent Path: {parent_path:?}");
// Read by reference
let object_returned: Option<FirestoreDocument> = db
.fluent()
.select()
.by_id_in(&collection_name)
.one(document_id)
.await?;
println!("Object by reference: {object_returned:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/caching_memory_collections.rs | examples/caching_memory_collections.rs | use chrono::{DateTime, Utc};
use firestore::*;
use futures::stream::BoxStream;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{}: {}", name, e))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &'static str = "test-caching";
let mut cache = FirestoreCache::new(
"example-mem-cache".into(),
&db,
FirestoreMemoryCacheBackend::new(
FirestoreCacheConfiguration::new().add_collection_config(
&db,
FirestoreCacheCollectionConfiguration::new(
TEST_COLLECTION_NAME,
FirestoreListenerTarget::new(1000),
FirestoreCacheCollectionLoadMode::PreloadNone,
),
),
)?,
FirestoreMemListenStateStorage::new(),
)
.await?;
cache.load().await?;
if db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.one("test-0")
.await?
.is_none()
{
println!("Populating a test collection");
let batch_writer = db.create_simple_batch_writer().await?;
let mut current_batch = batch_writer.new_batch();
for i in 0..500 {
let my_struct = MyTestStructure {
some_id: format!("test-{}", i),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: i,
created_at: Utc::now(),
};
// Let's insert some data
db.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.add_to_batch(&mut current_batch)?;
}
current_batch.write().await?;
}
println!("Getting by id only from cache - won't exist");
let my_struct0: Option<MyTestStructure> = db
.read_cached_only(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one("test-1")
.await?;
println!("{:?}", my_struct0);
println!("Getting by id");
let my_struct1: Option<MyTestStructure> = db
.read_through_cache(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one("test-1")
.await?;
println!("{:?}", my_struct1);
println!("Getting by id from cache now");
let my_struct2: Option<MyTestStructure> = db
.read_through_cache(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one("test-1")
.await?;
println!("{:?}", my_struct2);
println!("Getting batch by ids");
let cached_db = db.read_through_cache(&cache);
let my_struct1_stream: BoxStream<FirestoreResult<(String, Option<MyTestStructure>)>> =
cached_db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.batch_with_errors(["test-1", "test-2"])
.await?;
let my_structs1 = my_struct1_stream.try_collect::<Vec<_>>().await?;
println!("{:?}", my_structs1);
// Now from cache
let my_struct2_stream: BoxStream<FirestoreResult<(String, Option<MyTestStructure>)>> =
cached_db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.batch_with_errors(["test-1", "test-2"])
.await?;
let my_structs2 = my_struct2_stream.try_collect::<Vec<_>>().await?;
println!("{:?}", my_structs2);
// List from cache
let cached_db = db.read_cached_only(&cache);
let all_items_stream = cached_db
.fluent()
.list()
.from(TEST_COLLECTION_NAME)
.obj::<MyTestStructure>()
.stream_all_with_errors()
.await?;
let listed_items = all_items_stream.try_collect::<Vec<_>>().await?;
println!("{:?}", listed_items.len());
// Query from cache
let all_items_stream = cached_db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.filter(|q| {
q.for_all(
q.field(path!(MyTestStructure::some_num))
.greater_than_or_equal(2),
)
})
.obj::<MyTestStructure>()
.stream_query_with_errors()
.await?;
let queried_items = all_items_stream.try_collect::<Vec<_>>().await?;
println!("{:?}", queried_items.len());
cache.shutdown().await?;
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/explain-query.rs | examples/explain-query.rs | use chrono::{DateTime, Utc};
use firestore::*;
use futures::stream::BoxStream;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-query";
if db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.one("test-0")
.await?
.is_none()
{
println!("Populating a test collection");
let batch_writer = db.create_simple_batch_writer().await?;
let mut current_batch = batch_writer.new_batch();
for i in 0..500 {
let my_struct = MyTestStructure {
some_id: format!("test-{i}"),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: i,
created_at: Utc::now(),
};
// Let's insert some data
db.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.add_to_batch(&mut current_batch)?;
}
current_batch.write().await?;
}
println!("Explain querying for a test collection as a stream using Fluent API");
// Query as a stream our data
let object_stream: BoxStream<FirestoreResult<FirestoreWithMetadata<MyTestStructure>>> = db
.fluent()
.select()
.fields(
paths!(MyTestStructure::{some_id, some_num, some_string, one_more_string, created_at}),
)
.from(TEST_COLLECTION_NAME)
.filter(|q| {
q.for_all([
q.field(path!(MyTestStructure::some_num)).is_not_null(),
q.field(path!(MyTestStructure::some_string)).eq("Test"),
Some("Test2")
.and_then(|value| q.field(path!(MyTestStructure::one_more_string)).eq(value)),
])
})
.order_by([(
path!(MyTestStructure::some_num),
FirestoreQueryDirection::Descending,
)])
.explain()
//.explain_with_options(FirestoreExplainOptions::new().with_analyze(true)) or with analyze
.obj()
.stream_query_with_metadata()
.await?;
let as_vec: Vec<FirestoreWithMetadata<MyTestStructure>> = object_stream.try_collect().await?;
println!("{as_vec:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/group-query.rs | examples/group-query.rs | use firestore::*;
use futures::stream::BoxStream;
use serde::{Deserialize, Serialize};
use tokio_stream::StreamExt;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyParentStructure {
some_id: String,
some_string: String,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyChildStructure {
some_id: String,
another_string: String,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_PARENT_COLLECTION_NAME: &str = "nested-test";
const TEST_CHILD_COLLECTION_NAME: &str = "test-childs";
println!("Populating parent doc/collection");
for parent_idx in 0..5 {
let parent_struct = MyParentStructure {
some_id: format!("test-parent-{parent_idx}"),
some_string: "Test".to_string(),
};
// Remove if it already exist
db.fluent()
.delete()
.from(TEST_PARENT_COLLECTION_NAME)
.document_id(&parent_struct.some_id)
.execute()
.await?;
db.fluent()
.insert()
.into(TEST_PARENT_COLLECTION_NAME)
.document_id(&parent_struct.some_id)
.object(&parent_struct)
.execute::<()>()
.await?;
for child_idx in 0..3 {
// Creating a child doc
let child_struct = MyChildStructure {
some_id: format!("test-parent{parent_idx}-child-{child_idx}"),
another_string: "TestChild".to_string(),
};
// The doc path where we store our childs
let parent_path =
db.parent_path(TEST_PARENT_COLLECTION_NAME, &parent_struct.some_id)?;
// Remove child doc if exists
db.fluent()
.delete()
.from(TEST_CHILD_COLLECTION_NAME)
.parent(&parent_path)
.document_id(&child_struct.some_id)
.execute()
.await?;
db.fluent()
.insert()
.into(TEST_CHILD_COLLECTION_NAME)
.document_id(&child_struct.some_id)
.parent(&parent_path)
.object(&child_struct)
.execute::<()>()
.await?;
}
}
println!("Query children");
let mut objs_stream: BoxStream<MyChildStructure> = db
.fluent()
.select()
.from(TEST_CHILD_COLLECTION_NAME)
//.parent(db.parent_path(TEST_PARENT_COLLECTION_NAME, "test-parent-0")) // if you need to search for only one root you need do disable with_all_descendants below
.all_descendants()
.filter(|q| {
q.for_all([q
.field(path!(MyChildStructure::another_string))
.eq("TestChild")])
})
.obj()
.stream_query()
.await?;
while let Some(object) = objs_stream.next().await {
println!("Object in stream: {object:?}");
}
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/list-docs.rs | examples/list-docs.rs | use firestore::*;
use futures::stream::BoxStream;
use serde::{Deserialize, Serialize};
use tokio_stream::StreamExt;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?)
.await?
.clone();
const TEST_COLLECTION_NAME: &str = "test";
println!("Populating a test collection");
for i in 0..10 {
let my_struct = MyTestStructure {
some_id: format!("test-{i}"),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: 42,
};
// Remove if it already exist
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// Let's insert some data
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute::<()>()
.await?;
}
println!("Listing objects as a stream");
// Query as a stream our data
let objs_stream: BoxStream<MyTestStructure> = db
.fluent()
.list()
.from(TEST_COLLECTION_NAME)
.page_size(3) // This is decreased just to show an example of automatic pagination, in the real usage please use bigger figure or don't specify it (default is 100)
.order_by([(
path!(MyTestStructure::some_id),
FirestoreQueryDirection::Descending,
)])
.obj()
.stream_all()
.await?;
let as_vec: Vec<MyTestStructure> = objs_stream.collect().await;
println!("{as_vec:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/list-collections.rs | examples/list-collections.rs | use firestore::*;
use futures::TryStreamExt;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?)
.await?
.clone();
println!("Listing collections as a stream");
let collections_stream = db
.fluent()
.list()
.collections()
.stream_all_with_errors()
.await?;
let collections: Vec<String> = collections_stream.try_collect().await?;
println!("Collections: {collections:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/nested_collections.rs | examples/nested_collections.rs | use firestore::*;
use futures::stream::BoxStream;
use serde::{Deserialize, Serialize};
use tokio_stream::StreamExt;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyParentStructure {
some_id: String,
some_string: String,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyChildStructure {
some_id: String,
another_string: String,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_PARENT_COLLECTION_NAME: &str = "nested-test";
const TEST_CHILD_COLLECTION_NAME: &str = "test-childs";
println!("Creating a parent doc/collection");
let parent_struct = MyParentStructure {
some_id: "test-parent".to_string(),
some_string: "Test".to_string(),
};
// Remove if it already exist
db.fluent()
.delete()
.from(TEST_PARENT_COLLECTION_NAME)
.document_id(&parent_struct.some_id)
.execute()
.await?;
// Creating a parent doc
db.fluent()
.insert()
.into(TEST_PARENT_COLLECTION_NAME)
.document_id(&parent_struct.some_id)
.object(&parent_struct)
.execute::<()>()
.await?;
// Creating a child doc
let child_struct = MyChildStructure {
some_id: "test-child".to_string(),
another_string: "TestChild".to_string(),
};
// The doc path where we store our childs
let parent_path = db.parent_path(TEST_PARENT_COLLECTION_NAME, parent_struct.some_id)?;
// Remove child doc if exists
db.fluent()
.delete()
.from(TEST_CHILD_COLLECTION_NAME)
.parent(&parent_path)
.document_id(&child_struct.some_id)
.execute()
.await?;
// Create a child doc
db.fluent()
.insert()
.into(TEST_CHILD_COLLECTION_NAME)
.document_id(&child_struct.some_id)
.parent(&parent_path)
.object(&child_struct)
.execute::<()>()
.await?;
println!("Listing all children");
let list_stream: BoxStream<MyChildStructure> = db
.fluent()
.list()
.from(TEST_CHILD_COLLECTION_NAME)
.parent(&parent_path)
.obj()
.stream_all()
.await?;
let as_vec: Vec<MyChildStructure> = list_stream.collect().await;
println!("{as_vec:?}");
println!("Querying in children");
let query_stream: BoxStream<MyChildStructure> = db
.fluent()
.select()
.from(TEST_CHILD_COLLECTION_NAME)
.parent(&parent_path)
.filter(|q| {
q.for_all([q
.field(path!(MyChildStructure::another_string))
.eq("TestChild")])
})
.obj()
.stream_query()
.await?;
let as_vec: Vec<MyChildStructure> = query_stream.collect().await;
println!("{as_vec:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/transactions.rs | examples/transactions.rs | use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test";
println!("Populating a test collection");
let batch_writer = db.create_simple_batch_writer().await?;
let mut current_batch = batch_writer.new_batch();
for i in 0..10 {
let my_struct = MyTestStructure {
some_id: format!("test-{i}"),
some_string: "Test".to_string(),
};
// Let's insert some data
db.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.add_to_batch(&mut current_batch)?;
}
current_batch.write().await?;
println!("Transaction update/delete on collection");
let mut transaction = db.begin_transaction().await?;
db.fluent()
.update()
.fields(paths!(MyTestStructure::{
some_string
}))
.in_col(TEST_COLLECTION_NAME)
.document_id("test-0")
.object(&MyTestStructure {
some_id: "test-0".to_string(),
some_string: "UpdatedTest".to_string(),
})
.add_to_transaction(&mut transaction)?;
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id("test-5")
.add_to_transaction(&mut transaction)?;
transaction.commit().await?;
println!("Listing objects as a stream with updated test-0 and removed test-5");
// Query as a stream our data
let objs: Vec<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.order_by([(
path!(MyTestStructure::some_id),
FirestoreQueryDirection::Descending,
)])
.obj()
.query()
.await?;
objs.iter().for_each(|obj| {
println!("Object in stream: {obj:?}");
});
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/listen-changes.rs | examples/listen-changes.rs | use chrono::prelude::*;
use firestore::*;
use serde::{Deserialize, Serialize};
use std::io::Read;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
#[serde(alias = "_firestore_id")]
doc_id: Option<String>,
some_id: String,
some_string: String,
some_num: u64,
#[serde(with = "firestore::serialize_as_timestamp")]
created_at: DateTime<Utc>,
}
const TEST_COLLECTION_NAME: &str = "test-listen";
// The IDs of targets - must be different for different listener targets/listeners in case you have many instances
const TEST_TARGET_ID_BY_QUERY: FirestoreListenerTarget = FirestoreListenerTarget::new(42_u32);
const TEST_TARGET_ID_BY_DOC_IDS: FirestoreListenerTarget = FirestoreListenerTarget::new(17_u32);
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?)
.await
.unwrap();
let mut listener = db
.create_listener(
FirestoreTempFilesListenStateStorage::new(), // or FirestoreMemListenStateStorage or your own implementation
)
.await?;
let my_struct = MyTestStructure {
doc_id: None,
some_id: "test-1".to_string(),
some_string: "test-str".to_string(),
some_num: 42,
created_at: Utc::now(),
};
let new_doc: MyTestStructure = db
.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.generate_document_id()
.object(&my_struct)
.execute()
.await?;
db.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.listen()
.add_target(TEST_TARGET_ID_BY_QUERY, &mut listener)?;
db.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.batch_listen([new_doc.doc_id.clone().expect("Doc must be created before")])
.add_target(TEST_TARGET_ID_BY_DOC_IDS, &mut listener)?;
listener
.start(|event| async move {
match event {
FirestoreListenEvent::DocumentChange(ref doc_change) => {
println!("Doc changed: {doc_change:?}");
if let Some(doc) = &doc_change.document {
let obj: MyTestStructure =
FirestoreDb::deserialize_doc_to::<MyTestStructure>(doc)
.expect("Deserialized object");
println!("As object: {obj:?}");
}
}
_ => {
println!("Received a listen response event to handle: {event:?}");
}
}
Ok(())
})
.await?;
// Wait any input until we shutdown
println!(
"Waiting any other changes. Try firebase console to change in {} now yourself. New doc created id: {:?}",
TEST_COLLECTION_NAME,new_doc.doc_id
);
std::io::stdin().read_exact(&mut [0u8; 1])?;
listener.shutdown().await?;
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/generated-document-id.rs | examples/generated-document-id.rs | use chrono::{DateTime, Utc};
use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
#[serde(alias = "_firestore_id")]
id: Option<String>,
#[serde(alias = "_firestore_created")]
created_at: Option<DateTime<Utc>>,
#[serde(alias = "_firestore_updated")]
updated_at: Option<DateTime<Utc>>,
test: Option<String>,
some_string: String,
one_more_string: String,
some_num: u64,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test";
let my_struct = MyTestStructure {
id: None,
created_at: None,
updated_at: None,
test: Some("tsst".to_string()),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: 41,
};
let object_returned: MyTestStructure = db
.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.generate_document_id()
.object(&my_struct)
.execute()
.await?;
println!("Created {object_returned:?}");
let generated_id = object_returned.id.unwrap();
let object_read: Option<MyTestStructure> = db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one(generated_id)
.await?;
println!("Read {object_read:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/dynamic_doc_level_crud.rs | examples/dynamic_doc_level_crud.rs | use chrono::prelude::*;
use firestore::*;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test";
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id("test-1")
.execute()
.await?;
let object_returned = db
.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id("test-1")
.document(FirestoreDb::serialize_map_to_doc(
"",
[
("some_id", "some-id-value".into()),
("some_string", "some-string-value".into()),
("one_more_string", "another-string-value".into()),
("some_num", 41.into()),
(
"embedded_obj",
FirestoreValue::from_map([
("inner_some_id", "inner-value".into()),
("inner_some_string", "inner-value".into()),
]),
),
("created_at", FirestoreTimestamp(Utc::now()).into()),
],
)?)
.execute()
.await?;
println!("Created {object_returned:?}");
let object_updated = db
.fluent()
.update()
.fields(["some_num", "one_more_string"])
.in_col(TEST_COLLECTION_NAME)
.document(FirestoreDb::serialize_map_to_doc(
db.parent_path(TEST_COLLECTION_NAME, "test-1")?,
[
("one_more_string", "update-string".into()),
("some_num", 42.into()),
],
)?)
.execute()
.await?;
println!("Updated {object_updated:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/timestamp.rs | examples/timestamp.rs | use chrono::{DateTime, Utc};
use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
// Using a special attribute to indicate timestamp serialization for Firestore
// (for serde_json it will be still the same, usually String serialization, so you can reuse the models)
#[serde(with = "firestore::serialize_as_timestamp")]
created_at: DateTime<Utc>,
// Or you can use a wrapping type
updated_at: Option<FirestoreTimestamp>,
updated_at_always_none: Option<FirestoreTimestamp>,
// Or one more attribute for optionals
#[serde(default)]
#[serde(with = "firestore::serialize_as_optional_timestamp")]
updated_at_attr: Option<DateTime<Utc>>,
#[serde(default)]
#[serde(with = "firestore::serialize_as_optional_timestamp")]
updated_at_attr_always_none: Option<DateTime<Utc>>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-ts1";
let my_struct = MyTestStructure {
some_id: "test-1".to_string(),
created_at: Utc::now(),
updated_at: Some(Utc::now().into()),
updated_at_always_none: None,
updated_at_attr: Some(Utc::now()),
updated_at_attr_always_none: None,
};
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// A fluent version of create document/object
let object_returned: MyTestStructure = db
.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute()
.await?;
println!("Created: {object_returned:?}");
// Query our data
let objects1: Vec<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.filter(|q| {
q.for_all([q
.field(path!(MyTestStructure::created_at))
.less_than_or_equal(
firestore::FirestoreTimestamp(Utc::now()), // Using the wrapping type to indicate serialization without attribute
)])
})
.obj()
.query()
.await?;
println!("Now in the list: {objects1:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/camel-case.rs | examples/camel-case.rs | use chrono::{DateTime, Utc};
use firestore::*;
use futures::stream::BoxStream;
use futures::StreamExt;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-camel-case";
println!("Populating a test collection");
for i in 0..10 {
let my_struct = MyTestStructure {
some_id: format!("test-{i}"),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: 42 - i,
created_at: Utc::now(),
};
// Remove if it already exist
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// Let's insert some data
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute::<()>()
.await?;
}
println!("Querying a test collection as a stream using Fluent API");
// Query as a stream our data
let object_stream: BoxStream<MyTestStructure> = db
.fluent()
.select()
.fields(
paths_camel_case!(MyTestStructure::{some_id, some_num, some_string, one_more_string, created_at}),
)
.from(TEST_COLLECTION_NAME)
.filter(|q| {
q.for_all([
q.field(path_camel_case!(MyTestStructure::some_string)).eq("Test"),
])
})
.obj()
.stream_query()
.await?;
let as_vec: Vec<MyTestStructure> = object_stream.collect().await;
println!("{as_vec:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/crud.rs | examples/crud.rs | use chrono::{DateTime, Utc};
use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test";
let my_struct = MyTestStructure {
some_id: "test-1".to_string(),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: 41,
created_at: Utc::now(),
};
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// A fluent version of create document/object
let object_returned: MyTestStructure = db
.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute()
.await?;
println!("Created {object_returned:?}");
// Get by id
let obj_by_id: Option<MyTestStructure> = db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one(&my_struct.some_id)
.await?;
println!("Get by id {obj_by_id:?}");
let object_updated: MyTestStructure = db
.fluent()
.update()
.fields(paths!(MyTestStructure::{some_num, one_more_string}))
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&MyTestStructure {
some_num: my_struct.some_num + 1,
one_more_string: "updated-value".to_string(),
..my_struct.clone()
})
.execute()
.await?;
println!("Updated {object_updated:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/nearest-vector-query.rs | examples/nearest-vector-query.rs | use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
some_vec: FirestoreVector,
distance: Option<f64>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-query-vec";
if db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.one("test-0")
.await?
.is_none()
{
println!("Populating a test collection");
let batch_writer = db.create_simple_batch_writer().await?;
let mut current_batch = batch_writer.new_batch();
for i in 0..500 {
let my_struct = MyTestStructure {
some_id: format!("test-{i}"),
some_string: "Test".to_string(),
some_vec: vec![i as f64, (i * 10) as f64, (i * 20) as f64].into(),
distance: None,
};
// Let's insert some data
db.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.add_to_batch(&mut current_batch)?;
}
current_batch.write().await?;
}
println!("Show sample documents in the test collection");
let as_vec: Vec<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.limit(3)
.obj()
.query()
.await?;
println!("Examples: {as_vec:?}");
println!("Search for a test collection with a vector closest");
let as_vec: Vec<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.find_nearest_with_options(
FirestoreFindNearestOptions::new(
path!(MyTestStructure::some_vec),
vec![0.0_f64, 0.0_f64, 0.0_f64].into(),
FirestoreFindNearestDistanceMeasure::Euclidean,
5,
)
.with_distance_result_field(path!(MyTestStructure::distance)),
)
.obj()
.query()
.await?;
println!("Found: {as_vec:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/batch-write-streaming.rs | examples/batch-write-streaming.rs | use chrono::{DateTime, Utc};
use firestore::*;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-batch-write";
println!("Populating a test collection");
let (batch_writer, mut batch_results_reader) = db.create_streaming_batch_writer().await?;
let response_thread = tokio::spawn(async move {
while let Ok(Some(response)) = batch_results_reader.try_next().await {
println!("{response:?}");
}
});
let mut current_batch = batch_writer.new_batch();
for idx in 0..10000 {
let my_struct = MyTestStructure {
some_id: format!("test-{idx}"),
some_string: "Test".to_string(),
created_at: Utc::now(),
};
db.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.add_to_batch(&mut current_batch)?;
if idx % 100 == 0 {
current_batch.write().await?;
current_batch = batch_writer.new_batch();
}
}
println!("Finishing...");
batch_writer.finish().await;
let _ = tokio::join!(response_thread);
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/database_id_option.rs | examples/database_id_option.rs | use chrono::{DateTime, Utc};
use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::with_options(
FirestoreDbOptions::new(config_env_var("PROJECT_ID")?)
.with_database_id(config_env_var("DATABASE_ID")?),
)
.await?;
const TEST_COLLECTION_NAME: &str = "test";
let my_struct = MyTestStructure {
some_id: "test-1".to_string(),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: 41,
created_at: Utc::now(),
};
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// A fluent version of create document/object
let object_returned: MyTestStructure = db
.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute()
.await?;
println!("Created {object_returned:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/partition-query.rs | examples/partition-query.rs | use chrono::{DateTime, Utc};
use firestore::*;
use futures::stream::BoxStream;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-partitions";
//println!("Populating a test collection");
// for i in 0..40000 {
// let my_struct = MyTestStructure {
// some_id: format!("test-{}", i),
// some_string: "Test".to_string(),
// one_more_string: "Test2".to_string(),
// some_num: i,
// created_at: Utc::now(),
// };
//
// if db
// .fluent()
// .select()
// .by_id_in(TEST_COLLECTION_NAME)
// .one(&my_struct.some_id)
// .await?
// .is_none()
// {
// // Let's insert some data
// db.fluent()
// .insert()
// .into(TEST_COLLECTION_NAME)
// .document_id(&my_struct.some_id)
// .object(&my_struct)
// .execute()
// .await?;
// }
// }
let partition_stream: BoxStream<FirestoreResult<(FirestorePartition, MyTestStructure)>> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.obj()
.partition_query()
.parallelism(2)
.page_size(10)
.stream_partitions_with_errors()
.await?;
let as_vec: Vec<(FirestorePartition, MyTestStructure)> = partition_stream.try_collect().await?;
println!("{}", as_vec.len());
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/consistency-selector.rs | examples/consistency-selector.rs | use firestore::*;
use futures::stream::BoxStream;
use serde::{Deserialize, Serialize};
use tokio_stream::StreamExt;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test";
println!("Populating a test collection");
for i in 0..10 {
let my_struct = MyTestStructure {
some_id: format!("test-{i}"),
some_string: "Test".to_string(),
};
// Remove if it already exist
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// Let's insert some data
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute::<()>()
.await?;
}
println!("Read only transaction to read the state before changes");
let transaction = db
.begin_transaction_with_options(
FirestoreTransactionOptions::new().with_mode(FirestoreTransactionMode::ReadOnly),
)
.await?;
// Working with consistency selector for reading when necessary
let cdb = db.clone_with_consistency_selector(FirestoreConsistencySelector::Transaction(
transaction.transaction_id().clone(),
));
let consistency_read_test: Option<MyTestStructure> = cdb
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one("test-0")
.await?;
println!("The original one: {consistency_read_test:?}");
transaction.commit().await?;
println!("Listing objects as a stream with updated test-0 and removed test-5");
// Query as a stream our data
let mut objs_stream: BoxStream<MyTestStructure> = db
.fluent()
.list()
.from(TEST_COLLECTION_NAME)
.order_by([(
path!(MyTestStructure::some_id),
FirestoreQueryDirection::Descending,
)])
.obj()
.stream_all()
.await?;
while let Some(object) = objs_stream.next().await {
println!("Object in stream: {object:?}");
}
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/latlng.rs | examples/latlng.rs | use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_latlng: FirestoreLatLng,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-latlng";
let my_struct = MyTestStructure {
some_id: "test-1".to_string(),
some_latlng: FirestoreLatLng(FirestoreGeoPoint {
latitude: 1.0,
longitude: 2.0,
}),
};
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// A fluent version of create document/object
let object_returned: MyTestStructure = db
.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute()
.await?;
println!("Created: {object_returned:?}");
// Query our data
let objects1: Vec<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.obj()
.query()
.await?;
println!("Now in the list: {objects1:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/query.rs | examples/query.rs | use chrono::{DateTime, Utc};
use firestore::*;
use futures::stream::BoxStream;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-query";
if db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.one("test-0")
.await?
.is_none()
{
println!("Populating a test collection");
let batch_writer = db.create_simple_batch_writer().await?;
let mut current_batch = batch_writer.new_batch();
for i in 0..500 {
let my_struct = MyTestStructure {
some_id: format!("test-{i}"),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: i,
created_at: Utc::now(),
};
// Let's insert some data
db.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.add_to_batch(&mut current_batch)?;
}
current_batch.write().await?;
}
println!("Querying a test collection as a stream using Fluent API");
// Simple query into vector
// Query as a stream our data
let as_vec: Vec<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.obj()
.query()
.await?;
println!("{as_vec:?}");
// Query as a stream our data with filters and ordering
let object_stream: BoxStream<FirestoreResult<MyTestStructure>> = db
.fluent()
.select()
.fields(
paths!(MyTestStructure::{some_id, some_num, some_string, one_more_string, created_at}),
)
.from(TEST_COLLECTION_NAME)
.filter(|q| {
q.for_all([
q.field(path!(MyTestStructure::some_num)).is_not_null(),
q.field(path!(MyTestStructure::some_string)).eq("Test"),
Some("Test2")
.and_then(|value| q.field(path!(MyTestStructure::one_more_string)).eq(value)),
])
})
.order_by([(
path!(MyTestStructure::some_num),
FirestoreQueryDirection::Descending,
)])
.obj()
.stream_query_with_errors()
.await?;
let as_vec: Vec<MyTestStructure> = object_stream.try_collect().await?;
println!("{as_vec:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/read-write-transactions.rs | examples/read-write-transactions.rs | use firestore::*;
use futures::stream::FuturesOrdered;
use futures::FutureExt;
use serde::{Deserialize, Serialize};
use tokio_stream::StreamExt;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
test_string: String,
}
const TEST_COLLECTION_NAME: &str = "test-rw-trans";
const TEST_DOCUMENT_ID: &str = "test_doc_id";
/// Creates a document with a counter set to 0 and then concurrently executes futures for `COUNT_ITERATIONS` iterations.
/// Finally, it reads the document again and verifies that the counter matches the expected number of iterations.
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const COUNT_ITERATIONS: usize = 50;
println!("Creating initial document...");
// Remove if it already exists
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(TEST_DOCUMENT_ID)
.execute()
.await?;
// Let's insert some data
let my_struct = MyTestStructure {
test_string: String::new(),
};
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(TEST_DOCUMENT_ID)
.object(&my_struct)
.execute::<()>()
.await?;
println!("Running transactions...");
let mut futures = FuturesOrdered::new();
for _ in 0..COUNT_ITERATIONS {
futures.push_back(update_value(&db));
}
futures.collect::<Vec<_>>().await;
println!("Testing results...");
let test_structure: MyTestStructure = db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one(TEST_DOCUMENT_ID)
.await?
.expect("Missing document");
assert_eq!(test_structure.test_string.len(), COUNT_ITERATIONS);
Ok(())
}
async fn update_value(db: &FirestoreDb) -> FirestoreResult<()> {
db.run_transaction(|db, transaction| {
async move {
let mut test_structure: MyTestStructure = db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one(TEST_DOCUMENT_ID)
.await?
.expect("Missing document");
// Perform some kind of operation that depends on the state of the document
test_structure.test_string += "a";
db.fluent()
.update()
.fields(paths!(MyTestStructure::{
test_string
}))
.in_col(TEST_COLLECTION_NAME)
.document_id(TEST_DOCUMENT_ID)
.object(&test_structure)
.add_to_transaction(transaction)?;
Ok(())
}
.boxed()
})
.await?;
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/caching_persistent_collections.rs | examples/caching_persistent_collections.rs | use chrono::{DateTime, Utc};
use firestore::*;
use futures::stream::BoxStream;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{}: {}", name, e))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &'static str = "test-caching";
let mut cache = FirestoreCache::new(
"example-persistent-cache".into(),
&db,
FirestorePersistentCacheBackend::new(
FirestoreCacheConfiguration::new().add_collection_config(
&db,
FirestoreCacheCollectionConfiguration::new(
TEST_COLLECTION_NAME,
FirestoreListenerTarget::new(1000),
FirestoreCacheCollectionLoadMode::PreloadAllIfEmpty,
),
),
)?,
FirestoreTempFilesListenStateStorage::new(),
)
.await?;
cache.load().await?;
if db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.one("test-0")
.await?
.is_none()
{
println!("Populating a test collection");
let batch_writer = db.create_simple_batch_writer().await?;
let mut current_batch = batch_writer.new_batch();
for i in 0..500 {
let my_struct = MyTestStructure {
some_id: format!("test-{}", i),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: i,
created_at: Utc::now(),
};
// Let's insert some data
db.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.add_to_batch(&mut current_batch)?;
}
current_batch.write().await?;
}
println!("Getting by id only from cache");
let my_struct0: Option<MyTestStructure> = db
.read_cached_only(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one("test-1")
.await?;
println!("{:?}", my_struct0);
println!("Getting by id");
let my_struct1: Option<MyTestStructure> = db
.read_through_cache(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one("test-1")
.await?;
println!("{:?}", my_struct1);
println!("Getting by id from cache now");
let my_struct2: Option<MyTestStructure> = db
.read_through_cache(&cache)
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.one("test-1")
.await?;
println!("{:?}", my_struct2);
println!("Getting batch by ids");
let cached_db = db.read_through_cache(&cache);
let my_struct1_stream: BoxStream<FirestoreResult<(String, Option<MyTestStructure>)>> =
cached_db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.batch_with_errors(["test-1", "test-2"])
.await?;
let my_structs1 = my_struct1_stream.try_collect::<Vec<_>>().await?;
println!("{:?}", my_structs1);
// Now from cache
let my_struct2_stream: BoxStream<FirestoreResult<(String, Option<MyTestStructure>)>> =
cached_db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.batch_with_errors(["test-1", "test-2"])
.await?;
let my_structs2 = my_struct2_stream.try_collect::<Vec<_>>().await?;
println!("{:?}", my_structs2);
// List from cache
let cached_db = db.read_cached_only(&cache);
let all_items_stream = cached_db
.fluent()
.list()
.from(TEST_COLLECTION_NAME)
.obj::<MyTestStructure>()
.stream_all_with_errors()
.await?;
let listed_items = all_items_stream.try_collect::<Vec<_>>().await?;
println!("{:?}", listed_items.len());
// Query from cache
let all_items_stream = cached_db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.filter(|q| {
q.for_all(
q.field(path!(MyTestStructure::some_num))
.greater_than_or_equal(250),
)
})
.order_by([(
path!(MyTestStructure::some_num),
FirestoreQueryDirection::Ascending,
)])
.obj::<MyTestStructure>()
.stream_query_with_errors()
.await?;
let queried_items = all_items_stream.try_collect::<Vec<_>>().await?;
println!(
"{:?} {:?}...",
queried_items.len(),
queried_items.iter().take(5).collect::<Vec<_>>()
);
cache.shutdown().await?;
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/update-precondition.rs | examples/update-precondition.rs | use chrono::{DateTime, Utc};
use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test";
let my_struct = MyTestStructure {
some_id: "test-1".to_string(),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: 41,
created_at: Utc::now(),
};
let object_updated: MyTestStructure = db
.fluent()
.update()
.fields(paths!(MyTestStructure::{some_num, one_more_string}))
.in_col(TEST_COLLECTION_NAME)
.precondition(FirestoreWritePrecondition::Exists(true))
.document_id(&my_struct.some_id)
.object(&MyTestStructure {
some_num: my_struct.some_num + 1,
one_more_string: "updated-value".to_string(),
..my_struct.clone()
})
.execute()
.await?;
println!("Updated {object_updated:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/token_auth.rs | examples/token_auth.rs | use chrono::{DateTime, Utc};
use firestore::*;
use futures::stream::BoxStream;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
use std::ops::Add;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
async fn my_token() -> gcloud_sdk::error::Result<gcloud_sdk::Token> {
Ok(gcloud_sdk::Token::new(
"Bearer".to_string(),
config_env_var("TOKEN_VALUE")
.expect("TOKEN_VALUE must be specified")
.into(),
chrono::Utc::now().add(std::time::Duration::from_secs(3600)),
))
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::with_options_token_source(
FirestoreDbOptions::new(config_env_var("PROJECT_ID")?),
gcloud_sdk::GCP_DEFAULT_SCOPES.clone(),
gcloud_sdk::TokenSourceType::ExternalSource(Box::new(
gcloud_sdk::ExternalJwtFunctionSource::new(my_token),
)),
)
.await?;
const TEST_COLLECTION_NAME: &str = "test-query";
// Query as a stream our data
let object_stream: BoxStream<FirestoreResult<MyTestStructure>> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.obj()
.stream_query_with_errors()
.await?;
let as_vec: Vec<MyTestStructure> = object_stream.try_collect().await?;
println!("{as_vec:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/batch-write-simple.rs | examples/batch-write-simple.rs | use chrono::{DateTime, Utc};
use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-batch-write";
println!("Populating a test collection");
let batch_writer = db.create_simple_batch_writer().await?;
let mut current_batch = batch_writer.new_batch();
for idx in 0..500 {
let my_struct = MyTestStructure {
some_id: format!("test-{idx}"),
some_string: "Test".to_string(),
created_at: Utc::now(),
};
db.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.add_to_batch(&mut current_batch)?;
if idx % 100 == 0 {
let response = current_batch.write().await?;
current_batch = batch_writer.new_batch();
println!("{response:?}");
}
}
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/query-with-cursor.rs | examples/query-with-cursor.rs | use chrono::{DateTime, Utc};
use firestore::*;
use futures::stream::BoxStream;
use futures::StreamExt;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test";
println!("Populating a test collection");
for i in 0..10 {
let my_struct = MyTestStructure {
some_id: format!("test-{i}"),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: 42,
created_at: Utc::now(),
};
// Remove if it already exist
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// Let's insert some data
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute::<()>()
.await?;
}
println!("Querying a test collection in defined order");
// Querying as a stream with errors when needed
let object_stream: BoxStream<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.start_at(FirestoreQueryCursor::BeforeValue(vec!["test-5".into()]))
.order_by([(
path!(MyTestStructure::some_id),
FirestoreQueryDirection::Ascending,
)])
.obj()
.stream_query()
.await?;
let as_vec: Vec<MyTestStructure> = object_stream.collect().await;
println!("{as_vec:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/document-transform.rs | examples/document-transform.rs | use firestore::*;
use futures::stream::BoxStream;
use serde::{Deserialize, Serialize};
use tokio_stream::StreamExt;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_num: i32,
some_string: String,
some_array: Vec<i32>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test-transforms";
println!("Populating a test collection");
for i in 0..10 {
let my_struct = MyTestStructure {
some_id: format!("test-{i}"),
some_num: i,
some_string: "Test".to_string(),
some_array: vec![1, 2, 3],
};
// Remove if it already exist
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// Let's insert some data
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute::<()>()
.await?;
}
println!("Transaction with transformations");
let mut transaction = db.begin_transaction().await?;
// Only transforms
db.fluent()
.update()
.in_col(TEST_COLLECTION_NAME)
.document_id("test-4")
.transforms(|t| {
t.fields([
t.field(path!(MyTestStructure::some_num)).increment(10),
t.field(path!(MyTestStructure::some_array))
.append_missing_elements([4, 5]),
t.field(path!(MyTestStructure::some_array))
.remove_all_from_array([3]),
])
})
.only_transform()
.add_to_transaction(&mut transaction)?;
// Transforms with update
db.fluent()
.update()
.fields(paths!(MyTestStructure::{
some_string
}))
.in_col(TEST_COLLECTION_NAME)
.document_id("test-5")
.object(&MyTestStructure {
some_id: "test-5".to_string(),
some_num: 0,
some_string: "UpdatedTest".to_string(),
some_array: vec![1, 2, 3],
})
.transforms(|t| {
t.fields([
t.field(path!(MyTestStructure::some_num)).increment(10),
t.field(path!(MyTestStructure::some_array))
.append_missing_elements([4, 5]),
t.field(path!(MyTestStructure::some_array))
.remove_all_from_array([3]),
])
})
.add_to_transaction(&mut transaction)?;
transaction.commit().await?;
println!("Listing objects");
// Query as a stream our data
let mut objs_stream: BoxStream<MyTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.order_by([(
path!(MyTestStructure::some_id),
FirestoreQueryDirection::Descending,
)])
.obj()
.stream_query()
.await?;
while let Some(object) = objs_stream.next().await {
println!("Object in stream: {object:?}");
}
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/aggregated-query.rs | examples/aggregated-query.rs | use firestore::*;
use serde::{Deserialize, Serialize};
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyAggTestStructure {
counter: usize,
calc_sum: Option<usize>,
calc_avg: Option<f64>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test";
println!("Aggregated query a test collection as a stream");
let objs: Vec<MyAggTestStructure> = db
.fluent()
.select()
.from(TEST_COLLECTION_NAME)
.aggregate(|a| {
a.fields([
a.field(path!(MyAggTestStructure::counter)).count(),
a.field(path!(MyAggTestStructure::calc_sum)).sum("some_num"),
a.field(path!(MyAggTestStructure::calc_avg)).avg("some_num"),
])
})
.obj()
.query()
.await?;
println!("{objs:?}");
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/examples/batch-get-stream.rs | examples/batch-get-stream.rs | use chrono::{DateTime, Utc};
use firestore::*;
use futures::stream::BoxStream;
use serde::{Deserialize, Serialize};
use tokio_stream::StreamExt;
pub fn config_env_var(name: &str) -> Result<String, String> {
std::env::var(name).map_err(|e| format!("{name}: {e}"))
}
// Example structure to play with
#[derive(Debug, Clone, Deserialize, Serialize)]
struct MyTestStructure {
some_id: String,
some_string: String,
one_more_string: String,
some_num: u64,
created_at: DateTime<Utc>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Logging with debug enabled
let subscriber = tracing_subscriber::fmt()
.with_env_filter("firestore=debug")
.finish();
tracing::subscriber::set_global_default(subscriber)?;
// Create an instance
let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
const TEST_COLLECTION_NAME: &str = "test";
println!("Populating a test collection");
for i in 0..10 {
let my_struct = MyTestStructure {
some_id: format!("test-{i}"),
some_string: "Test".to_string(),
one_more_string: "Test2".to_string(),
some_num: 42,
created_at: Utc::now(),
};
// Remove if it already exist
db.fluent()
.delete()
.from(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.execute()
.await?;
// Let's insert some data
db.fluent()
.insert()
.into(TEST_COLLECTION_NAME)
.document_id(&my_struct.some_id)
.object(&my_struct)
.execute::<()>()
.await?;
}
println!("Getting objects by IDs as a stream");
// Query as a stream our data
let mut object_stream: BoxStream<(String, Option<MyTestStructure>)> = db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.batch(vec!["test-0", "test-5"])
.await?;
while let Some(object) = object_stream.next().await {
println!("Object in stream: {object:?}");
}
// Getting as a stream with errors when needed
let mut object_stream_with_errors: BoxStream<
FirestoreResult<(String, Option<MyTestStructure>)>,
> = db
.fluent()
.select()
.by_id_in(TEST_COLLECTION_NAME)
.obj()
.batch_with_errors(vec!["test-0", "test-5"])
.await?;
while let Some(object) = object_stream_with_errors.try_next().await? {
println!("Object in stream: {object:?}");
}
Ok(())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/src/with_const_generics.rs | src/with_const_generics.rs | use crate::ringbuffer_trait::{RingBufferIntoIterator, RingBufferIterator, RingBufferMutIterator};
use crate::{impl_ring_buffer_set_len, RingBuffer, SetLen};
use core::iter::FromIterator;
use core::mem::MaybeUninit;
use core::mem::{self, ManuallyDrop};
use core::ops::{Index, IndexMut};
/// The `ConstGenericRingBuffer` struct is a `RingBuffer` implementation which does not require `alloc` but
/// uses const generics instead.
///
/// [`ConstGenericRingBuffer`] allocates the ringbuffer on the stack, and the size must be known at
/// compile time through const-generics.
///
/// # Example
/// ```
/// use ringbuffer::{ConstGenericRingBuffer, RingBuffer};
///
/// let mut buffer = ConstGenericRingBuffer::<_, 2>::new();
///
/// // First entry of the buffer is now 5.
/// buffer.enqueue(5);
///
/// // The last item we enqueued is 5
/// assert_eq!(buffer.back(), Some(&5));
///
/// // Second entry is now 42.
/// buffer.enqueue(42);
///
/// assert_eq!(buffer.peek(), Some(&5));
/// assert!(buffer.is_full());
///
/// // Because capacity is reached the next enqueue will be the first item of the buffer.
/// buffer.enqueue(1);
/// assert_eq!(buffer.to_vec(), vec![42, 1]);
/// ```
#[derive(Debug)]
pub struct ConstGenericRingBuffer<T, const CAP: usize> {
pub(crate) buf: [MaybeUninit<T>; CAP],
readptr: usize,
writeptr: usize,
}
impl<T, const CAP: usize> From<[T; CAP]> for ConstGenericRingBuffer<T, CAP> {
fn from(value: [T; CAP]) -> Self {
let v = ManuallyDrop::new(value);
Self {
// Safety:
// T has the same layout as MaybeUninit<T>
// [T; N] has the same layout as [MaybeUninit<T>; N]
// Without ManuallyDrop this would be unsound as
// transmute_copy doesn't take ownership
buf: unsafe { mem::transmute_copy(&v) },
readptr: 0,
writeptr: CAP,
}
}
}
impl<T: Clone, const CAP: usize> From<&[T; CAP]> for ConstGenericRingBuffer<T, CAP> {
fn from(value: &[T; CAP]) -> Self {
Self::from(value.clone())
}
}
impl<T: Clone, const CAP: usize> From<&[T]> for ConstGenericRingBuffer<T, CAP> {
fn from(value: &[T]) -> Self {
value.iter().cloned().collect()
}
}
impl<T: Clone, const CAP: usize> From<&mut [T; CAP]> for ConstGenericRingBuffer<T, CAP> {
fn from(value: &mut [T; CAP]) -> Self {
Self::from(value.clone())
}
}
impl<T: Clone, const CAP: usize> From<&mut [T]> for ConstGenericRingBuffer<T, CAP> {
fn from(value: &mut [T]) -> Self {
value.iter().cloned().collect()
}
}
#[cfg(feature = "alloc")]
impl<T, const CAP: usize> From<alloc::vec::Vec<T>> for ConstGenericRingBuffer<T, CAP> {
fn from(value: alloc::vec::Vec<T>) -> Self {
value.into_iter().collect()
}
}
#[cfg(feature = "alloc")]
impl<T, const CAP: usize> From<alloc::collections::VecDeque<T>> for ConstGenericRingBuffer<T, CAP> {
fn from(value: alloc::collections::VecDeque<T>) -> Self {
value.into_iter().collect()
}
}
#[cfg(feature = "alloc")]
impl<T, const CAP: usize> From<alloc::collections::LinkedList<T>>
for ConstGenericRingBuffer<T, CAP>
{
fn from(value: alloc::collections::LinkedList<T>) -> Self {
value.into_iter().collect()
}
}
#[cfg(feature = "alloc")]
impl<const CAP: usize> From<alloc::string::String> for ConstGenericRingBuffer<char, CAP> {
fn from(value: alloc::string::String) -> Self {
value.chars().collect()
}
}
impl<const CAP: usize> From<&str> for ConstGenericRingBuffer<char, CAP> {
fn from(value: &str) -> Self {
value.chars().collect()
}
}
#[cfg(feature = "alloc")]
impl<T, const CAP: usize> From<crate::GrowableAllocRingBuffer<T>>
for ConstGenericRingBuffer<T, CAP>
{
fn from(mut value: crate::GrowableAllocRingBuffer<T>) -> Self {
value.drain().collect()
}
}
#[cfg(feature = "alloc")]
impl<T, const CAP: usize> From<crate::AllocRingBuffer<T>> for ConstGenericRingBuffer<T, CAP> {
fn from(mut value: crate::AllocRingBuffer<T>) -> Self {
value.drain().collect()
}
}
impl<T, const CAP: usize> Drop for ConstGenericRingBuffer<T, CAP> {
fn drop(&mut self) {
self.drain().for_each(drop);
}
}
impl<T: Clone, const CAP: usize> Clone for ConstGenericRingBuffer<T, CAP> {
fn clone(&self) -> Self {
let mut new = ConstGenericRingBuffer::<T, CAP>::new();
new.extend(self.iter().cloned());
new
}
}
// We need to manually implement PartialEq because MaybeUninit isn't PartialEq
impl<T: PartialEq, const CAP: usize> PartialEq for ConstGenericRingBuffer<T, CAP> {
fn eq(&self, other: &Self) -> bool {
if self.len() == other.len() {
for (a, b) in self.iter().zip(other.iter()) {
if a != b {
return false;
}
}
true
} else {
false
}
}
}
impl<T: PartialEq, const CAP: usize> Eq for ConstGenericRingBuffer<T, CAP> {}
impl<T, const CAP: usize> ConstGenericRingBuffer<T, CAP> {
const ERROR_CAPACITY_IS_NOT_ALLOWED_TO_BE_ZERO: () =
assert!(CAP != 0, "Capacity is not allowed to be zero");
/// Creates a const generic ringbuffer, size is passed as a const generic.
///
/// Note that the size does not have to be a power of two, but that not using a power
/// of two might be significantly (up to 3 times) slower.
#[inline]
#[must_use]
pub const fn new<const N: usize>() -> Self
where
ConstGenericRingBuffer<T, CAP>: From<ConstGenericRingBuffer<T, N>>,
{
#[allow(clippy::let_unit_value)]
let () = Self::ERROR_CAPACITY_IS_NOT_ALLOWED_TO_BE_ZERO;
Self {
buf: [const { MaybeUninit::<T>::uninit() }; CAP],
writeptr: 0,
readptr: 0,
}
}
}
/// Get a const pointer to the buffer
unsafe fn get_base_ptr<T, const N: usize>(rb: *const ConstGenericRingBuffer<T, N>) -> *const T {
(*rb).buf.as_ptr().cast()
}
/// Get a mut pointer to the buffer
unsafe fn get_base_mut_ptr<T, const N: usize>(rb: *mut ConstGenericRingBuffer<T, N>) -> *mut T {
(*rb).buf.as_mut_ptr().cast()
}
/// Get a reference from the buffer without checking it is initialized
/// Caller MUST be sure this index is initialized, or undefined behavior will happen
unsafe fn get_unchecked<'a, T, const N: usize>(
rb: *const ConstGenericRingBuffer<T, N>,
index: usize,
) -> &'a T {
(*rb).buf[index]
.as_ptr()
.as_ref()
.expect("const array ptr shouldn't be null!")
}
/// Get a mutable reference from the buffer without checking it is initialized
/// Caller MUST be sure this index is initialized, or undefined behavior will happen
unsafe fn get_unchecked_mut<T, const N: usize>(
rb: *mut ConstGenericRingBuffer<T, N>,
index: usize,
) -> *mut T {
(*rb).buf[index]
.as_mut_ptr()
.as_mut()
.expect("const array ptr shouldn't be null!")
}
impl<T, const CAP: usize> IntoIterator for ConstGenericRingBuffer<T, CAP> {
type Item = T;
type IntoIter = RingBufferIntoIterator<T, Self>;
fn into_iter(self) -> Self::IntoIter {
RingBufferIntoIterator::new(self)
}
}
#[allow(clippy::into_iter_without_iter)]
// iter() is implemented on the trait
impl<'a, T, const CAP: usize> IntoIterator for &'a ConstGenericRingBuffer<T, CAP> {
type Item = &'a T;
type IntoIter = RingBufferIterator<'a, T, ConstGenericRingBuffer<T, CAP>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
#[allow(clippy::into_iter_without_iter)]
// iter_mut() is implemented on the trait
impl<'a, T, const CAP: usize> IntoIterator for &'a mut ConstGenericRingBuffer<T, CAP> {
type Item = &'a mut T;
type IntoIter = RingBufferMutIterator<'a, T, ConstGenericRingBuffer<T, CAP>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<T, const CAP: usize> Extend<T> for ConstGenericRingBuffer<T, CAP> {
fn extend<A: IntoIterator<Item = T>>(&mut self, iter: A) {
let iter = iter.into_iter();
for i in iter {
let _ = self.enqueue(i);
}
}
}
unsafe impl<T, const CAP: usize> RingBuffer<T> for ConstGenericRingBuffer<T, CAP> {
#[inline]
unsafe fn ptr_capacity(_: *const Self) -> usize {
CAP
}
#[inline]
unsafe fn ptr_buffer_size(_: *const Self) -> usize {
CAP
}
impl_ringbuffer!(readptr, writeptr);
#[inline]
fn enqueue(&mut self, value: T) -> Option<T> {
let mut ret = None;
if self.is_full() {
let previous_value = mem::replace(
&mut self.buf[crate::mask_modulo(CAP, self.readptr)],
MaybeUninit::uninit(),
);
// make sure we drop whatever is being overwritten
// SAFETY: the buffer is full, so this must be initialized
// : also, index has been masked
ret = Some(unsafe { previous_value.assume_init() });
self.readptr += 1;
}
let index = crate::mask_modulo(CAP, self.writeptr);
self.buf[index] = MaybeUninit::new(value);
self.writeptr += 1;
ret
}
fn dequeue(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
let index = crate::mask_modulo(CAP, self.readptr);
let res = mem::replace(&mut self.buf[index], MaybeUninit::uninit());
self.readptr += 1;
// Safety: the fact that we got this maybeuninit from the buffer (with mask) means that
// it's initialized. If it wasn't the is_empty call would have caught it. Values
// are always initialized when inserted so this is safe.
unsafe { Some(res.assume_init()) }
}
}
impl_ringbuffer_ext!(
get_base_ptr,
get_base_mut_ptr,
get_unchecked,
get_unchecked_mut,
readptr,
writeptr,
crate::mask_modulo
);
#[inline]
fn fill_with<F: FnMut() -> T>(&mut self, mut f: F) {
self.clear();
self.readptr = 0;
self.writeptr = CAP;
self.buf.fill_with(|| MaybeUninit::new(f()));
}
}
impl<T, const CAP: usize> Default for ConstGenericRingBuffer<T, CAP> {
/// Creates a buffer with a capacity specified through the Cap type parameter.
/// # Panics
/// Panics if `CAP` is 0
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<RB, const CAP: usize> FromIterator<RB> for ConstGenericRingBuffer<RB, CAP> {
fn from_iter<T: IntoIterator<Item = RB>>(iter: T) -> Self {
let mut res = Self::default();
for i in iter {
let _ = res.enqueue(i);
}
res
}
}
impl<T, const CAP: usize> Index<usize> for ConstGenericRingBuffer<T, CAP> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index).expect("index out of bounds")
}
}
impl<T, const CAP: usize> IndexMut<usize> for ConstGenericRingBuffer<T, CAP> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).expect("index out of bounds")
}
}
impl<T, const CAP: usize> SetLen for ConstGenericRingBuffer<T, CAP> {
impl_ring_buffer_set_len!(readptr, writeptr);
}
#[cfg(test)]
mod tests {
use crate::{AllocRingBuffer, ConstGenericRingBuffer, GrowableAllocRingBuffer, RingBuffer};
use alloc::collections::{LinkedList, VecDeque};
use alloc::string::ToString;
use alloc::vec;
#[test]
fn test_not_power_of_two() {
let mut rb = ConstGenericRingBuffer::<usize, 10>::new();
const NUM_VALS: usize = 1000;
// recycle the ringbuffer a bunch of time to see if noneof the logic
// messes up
for _ in 0..100 {
for i in 0..NUM_VALS {
let _ = rb.enqueue(i);
}
assert!(rb.is_full());
for i in 0..10 {
assert_eq!(Some(i + NUM_VALS - rb.capacity()), rb.dequeue());
}
assert!(rb.is_empty());
}
}
#[test]
#[should_panic]
fn test_index_zero_length() {
let b = ConstGenericRingBuffer::<i32, 2>::new();
let _ = b[2];
}
#[test]
fn test_extend() {
let mut buf = ConstGenericRingBuffer::<u8, 4>::new();
(0..4).for_each(|_| {
let _ = buf.enqueue(0);
});
let new_data = [0, 1, 2];
buf.extend(new_data);
let expected = [0, 0, 1, 2];
for i in 0..4 {
let actual = buf[i];
let expected = expected[i];
assert_eq!(actual, expected);
}
}
#[test]
fn test_extend_with_overflow() {
let mut buf = ConstGenericRingBuffer::<u8, 8>::new();
(0..8).for_each(|_| {
let _ = buf.enqueue(0);
});
let new_data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
buf.extend(new_data);
let expected = [2, 3, 4, 5, 6, 7, 8, 9];
for i in 0..8 {
let actual = buf[i];
let expected = expected[i];
assert_eq!(actual, expected);
}
}
#[test]
fn from() {
assert_eq!(
ConstGenericRingBuffer::<i32, 3>::from([1, 2, 3]).to_vec(),
vec![1, 2, 3]
);
let v: &[i32; 3] = &[1, 2, 3];
assert_eq!(
ConstGenericRingBuffer::<i32, 3>::from(v).to_vec(),
vec![1, 2, 3]
);
let v: &[i32] = &[1, 2, 3];
assert_eq!(
ConstGenericRingBuffer::<i32, 3>::from(v).to_vec(),
vec![1, 2, 3]
);
let v: &mut [i32; 3] = &mut [1, 2, 3];
assert_eq!(
ConstGenericRingBuffer::<i32, 3>::from(v).to_vec(),
vec![1, 2, 3]
);
let v: &mut [i32] = &mut [1, 2, 3];
assert_eq!(
ConstGenericRingBuffer::<i32, 3>::from(v).to_vec(),
vec![1, 2, 3]
);
assert_eq!(
ConstGenericRingBuffer::<i32, 3>::from(vec![1, 2, 3]).to_vec(),
vec![1, 2, 3]
);
assert_eq!(
ConstGenericRingBuffer::<i32, 3>::from(
vec![1, 2, 3].into_iter().collect::<VecDeque<_>>()
)
.to_vec(),
vec![1, 2, 3]
);
assert_eq!(
ConstGenericRingBuffer::<i32, 3>::from(
vec![1, 2, 3].into_iter().collect::<LinkedList<_>>()
)
.to_vec(),
vec![1, 2, 3]
);
assert_eq!(
ConstGenericRingBuffer::<_, 3>::from("abc".to_string()).to_vec(),
vec!['a', 'b', 'c']
);
assert_eq!(
ConstGenericRingBuffer::<_, 3>::from("abc").to_vec(),
vec!['a', 'b', 'c']
);
assert_eq!(
ConstGenericRingBuffer::<_, 3>::from(GrowableAllocRingBuffer::from(vec![1, 2, 3]))
.to_vec(),
vec![1, 2, 3]
);
assert_eq!(
ConstGenericRingBuffer::<_, 3>::from(AllocRingBuffer::from(vec![1, 2, 3])).to_vec(),
vec![1, 2, 3]
);
}
}
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/src/set_len_trait.rs | src/set_len_trait.rs | /// `SetLen` is a trait defining the unsafe `set_len` method
/// on ringbuffers that support the operation.
pub trait SetLen {
/// Force the length of the ringbuffer to `new_len`.
///
/// Note that downsizing will not call Drop on elements at `new_len..old_len`,
/// potentially causing a memory leak.
///
/// # Panics
/// Panics if `new_len` is greater than the ringbuffer capacity.
///
/// # Safety
/// - Safe when `new_len <= old_len`.
/// - Safe when `new_len > old_len` and all the elements at `old_len..new_len` are already initialized.
unsafe fn set_len(&mut self, new_len: usize);
}
/// Implement `set_len` given a `readptr` and a `writeptr`.
#[macro_export]
macro_rules! impl_ring_buffer_set_len {
($readptr: ident, $writeptr: ident) => {
#[inline]
unsafe fn set_len(&mut self, new_len: usize) {
let cap = self.capacity();
assert!(new_len <= cap, "Cannot set the a length of {new_len} on a ringbuffer with capacity for {cap} items");
self.$writeptr = self.$readptr + new_len;
}
};
}
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/src/lib.rs | src/lib.rs | #![no_std]
#![deny(missing_docs)]
#![deny(warnings)]
#![deny(unused_import_braces)]
#![deny(unused_results)]
#![deny(trivial_casts)]
#![deny(trivial_numeric_casts)]
#![deny(unused_qualifications)]
#![deny(clippy::must_use_candidate)]
#![deny(clippy::default_trait_access)]
#![deny(clippy::doc_markdown)]
#![deny(clippy::semicolon_if_nothing_returned)]
#![allow(unused_unsafe)] // to support older rust versions
#![doc = include_str!("../README.md")]
#[cfg(feature = "alloc")]
extern crate alloc;
#[macro_use]
pub(crate) mod ringbuffer_trait;
pub use ringbuffer_trait::RingBuffer;
mod set_len_trait;
pub use set_len_trait::SetLen;
#[cfg(feature = "alloc")]
mod with_alloc;
#[cfg(feature = "alloc")]
pub use with_alloc::alloc_ringbuffer::AllocRingBuffer;
#[cfg(feature = "alloc")]
pub use with_alloc::vecdeque::GrowableAllocRingBuffer;
mod with_const_generics;
pub use with_const_generics::ConstGenericRingBuffer;
/// Used internally. Computes the bitmask used to properly wrap the ringbuffers.
#[inline]
#[cfg(feature = "alloc")]
const fn mask_and(cap: usize, index: usize) -> usize {
debug_assert!(cap.is_power_of_two());
index & (cap - 1)
}
/// Used internally. Computes the bitmask used to properly wrap the ringbuffers.
#[inline]
const fn mask_modulo(cap: usize, index: usize) -> usize {
index % cap
}
#[cfg(test)]
#[allow(non_upper_case_globals)]
mod tests {
extern crate std;
use core::fmt::Debug;
use std::vec;
use std::vec::Vec;
use crate::ringbuffer_trait::{RingBufferIterator, RingBufferMutIterator};
use crate::{AllocRingBuffer, ConstGenericRingBuffer, GrowableAllocRingBuffer, RingBuffer};
#[test]
fn run_test_neg_index() {
//! Test for issue #43
const capacity: usize = 8;
fn test_neg_index(mut b: impl RingBuffer<usize>) {
for i in 0..capacity + 2 {
let _ = b.enqueue(i);
assert_eq!(b.get_signed(-1), Some(&i));
}
}
test_neg_index(AllocRingBuffer::new(capacity));
test_neg_index(ConstGenericRingBuffer::<usize, capacity>::new());
test_neg_index(GrowableAllocRingBuffer::with_capacity(capacity));
}
#[test]
fn run_test_default() {
fn test_default(b: impl RingBuffer<i32>) {
assert_eq!(b.capacity(), 8);
assert_eq!(b.len(), 0);
}
test_default(AllocRingBuffer::new(8));
test_default(GrowableAllocRingBuffer::with_capacity(8));
test_default(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_new() {
fn test_new(b: impl RingBuffer<i32>) {
assert_eq!(b.capacity(), 8);
assert_eq!(b.len(), 0);
}
test_new(AllocRingBuffer::new(8));
test_new(GrowableAllocRingBuffer::with_capacity(8));
test_new(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn test_default_eq_new() {
assert_eq!(
GrowableAllocRingBuffer::<i32>::default(),
GrowableAllocRingBuffer::<i32>::new()
);
assert_eq!(
ConstGenericRingBuffer::<i32, 8>::default(),
ConstGenericRingBuffer::<i32, 8>::new()
);
}
#[test]
fn run_test_len() {
fn test_len(mut b: impl RingBuffer<i32>) {
assert_eq!(0, b.len());
let _ = b.enqueue(1);
assert_eq!(1, b.len());
let _ = b.enqueue(2);
assert_eq!(2, b.len());
}
test_len(AllocRingBuffer::new(8));
test_len(GrowableAllocRingBuffer::with_capacity(8));
test_len(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_len_wrap() {
fn test_len_wrap(mut b: impl RingBuffer<i32>) {
assert_eq!(0, b.len());
let _ = b.enqueue(1);
assert_eq!(1, b.len());
let _ = b.enqueue(2);
assert_eq!(2, b.len());
// Now we are wrapping
let _ = b.enqueue(3);
assert_eq!(2, b.len());
let _ = b.enqueue(4);
assert_eq!(2, b.len());
}
test_len_wrap(AllocRingBuffer::new(2));
test_len_wrap(ConstGenericRingBuffer::<i32, 2>::new());
// the growable ringbuffer actually should grow instead of wrap
let mut grb = GrowableAllocRingBuffer::with_capacity(2);
assert_eq!(0, grb.len());
let _ = grb.enqueue(0);
assert_eq!(1, grb.len());
let _ = grb.enqueue(1);
assert_eq!(2, grb.len());
let _ = grb.enqueue(2);
assert_eq!(3, grb.len());
}
#[test]
fn run_test_clear() {
fn test_clear(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
b.clear();
assert!(b.is_empty());
assert_eq!(0, b.len());
}
test_clear(AllocRingBuffer::new(8));
test_clear(GrowableAllocRingBuffer::with_capacity(8));
test_clear(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_empty() {
fn test_empty(mut b: impl RingBuffer<i32>) {
assert!(b.is_empty());
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
assert!(!b.is_empty());
b.clear();
assert!(b.is_empty());
assert_eq!(0, b.len());
}
test_empty(AllocRingBuffer::new(8));
test_empty(GrowableAllocRingBuffer::with_capacity(8));
test_empty(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_iter() {
fn test_iter(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
let _ = b.enqueue(4);
let _ = b.enqueue(5);
let _ = b.enqueue(6);
let _ = b.enqueue(7);
let mut iter = b.iter();
assert_eq!(&1, iter.next().unwrap());
assert_eq!(&7, iter.next_back().unwrap());
assert_eq!(&2, iter.next().unwrap());
assert_eq!(&3, iter.next().unwrap());
assert_eq!(&6, iter.next_back().unwrap());
assert_eq!(&5, iter.next_back().unwrap());
assert_eq!(&4, iter.next().unwrap());
assert_eq!(None, iter.next());
}
test_iter(AllocRingBuffer::new(8));
test_iter(GrowableAllocRingBuffer::with_capacity(8));
test_iter(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_forward_iter_non_power_of_two() {
fn test_iter(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
let _ = b.enqueue(4);
let _ = b.enqueue(5);
let _ = b.enqueue(6);
let _ = b.enqueue(7);
let mut iter = b.iter();
assert_eq!(&1, iter.next().unwrap());
assert_eq!(&2, iter.next().unwrap());
assert_eq!(&3, iter.next().unwrap());
assert_eq!(&4, iter.next().unwrap());
assert_eq!(&5, iter.next().unwrap());
assert_eq!(&6, iter.next().unwrap());
assert_eq!(&7, iter.next().unwrap());
assert_eq!(None, iter.next());
}
test_iter(AllocRingBuffer::new(7));
test_iter(GrowableAllocRingBuffer::with_capacity(7));
test_iter(ConstGenericRingBuffer::<i32, 7>::new());
}
#[test]
fn run_test_iter_non_power_of_two() {
fn test_iter(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
let _ = b.enqueue(4);
let _ = b.enqueue(5);
let _ = b.enqueue(6);
let _ = b.enqueue(7);
let mut iter = b.iter();
assert_eq!(&1, iter.next().unwrap());
assert_eq!(&7, iter.next_back().unwrap());
assert_eq!(&2, iter.next().unwrap());
assert_eq!(&3, iter.next().unwrap());
assert_eq!(&6, iter.next_back().unwrap());
assert_eq!(&5, iter.next_back().unwrap());
assert_eq!(&4, iter.next().unwrap());
assert_eq!(None, iter.next());
}
test_iter(AllocRingBuffer::new(7));
test_iter(GrowableAllocRingBuffer::with_capacity(7));
test_iter(ConstGenericRingBuffer::<i32, 7>::new());
}
#[test]
fn run_test_iter_ref() {
fn test_iter<B>(mut b: B)
where
B: RingBuffer<i32>,
for<'a> &'a B: IntoIterator<Item = &'a i32, IntoIter = RingBufferIterator<'a, i32, B>>,
{
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
let _ = b.enqueue(4);
let _ = b.enqueue(5);
let _ = b.enqueue(6);
let _ = b.enqueue(7);
let mut iter = (&b).into_iter();
assert_eq!(&1, iter.next().unwrap());
assert_eq!(&7, iter.next_back().unwrap());
assert_eq!(&2, iter.next().unwrap());
assert_eq!(&3, iter.next().unwrap());
assert_eq!(&6, iter.next_back().unwrap());
assert_eq!(&5, iter.next_back().unwrap());
assert_eq!(&4, iter.next().unwrap());
assert_eq!(None, iter.next());
}
test_iter(AllocRingBuffer::new(8));
test_iter(GrowableAllocRingBuffer::with_capacity(8));
test_iter(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_into_iter() {
fn test_iter(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
let _ = b.enqueue(4);
let _ = b.enqueue(5);
let _ = b.enqueue(6);
let _ = b.enqueue(7);
let mut iter = b.into_iter();
assert_eq!(1, iter.next().unwrap());
assert_eq!(2, iter.next().unwrap());
assert_eq!(3, iter.next().unwrap());
assert_eq!(4, iter.next().unwrap());
assert_eq!(5, iter.next().unwrap());
assert_eq!(6, iter.next().unwrap());
assert_eq!(7, iter.next().unwrap());
assert_eq!(None, iter.next());
}
test_iter(AllocRingBuffer::new(8));
test_iter(GrowableAllocRingBuffer::with_capacity(8));
test_iter(ConstGenericRingBuffer::<i32, 8>::new());
}
#[cfg(feature = "alloc")]
#[test]
fn run_test_iter_with_lifetimes() {
fn test_iter<'a>(string: &'a str, mut b: impl RingBuffer<&'a str>) {
let _ = b.enqueue(&string[0..1]);
let _ = b.enqueue(&string[1..2]);
let _ = b.enqueue(&string[2..3]);
let mut iter = b.iter();
assert_eq!(&&string[0..1], iter.next().unwrap());
assert_eq!(&&string[1..2], iter.next().unwrap());
assert_eq!(&&string[2..3], iter.next().unwrap());
}
extern crate alloc;
use alloc::string::ToString as _;
let string = "abc".to_string();
test_iter(&string, AllocRingBuffer::new(8));
test_iter(&string, GrowableAllocRingBuffer::with_capacity(8));
test_iter(&string, ConstGenericRingBuffer::<&str, 8>::new());
}
#[test]
fn run_test_double_iter() {
fn test_double_iter(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
let mut iter1 = b.iter();
let mut iter2 = b.iter();
assert_eq!(&1, iter1.next().unwrap());
assert_eq!(&2, iter1.next().unwrap());
assert_eq!(&3, iter1.next().unwrap());
assert_eq!(&1, iter2.next().unwrap());
assert_eq!(&2, iter2.next().unwrap());
assert_eq!(&3, iter2.next().unwrap());
}
test_double_iter(AllocRingBuffer::new(8));
test_double_iter(GrowableAllocRingBuffer::with_capacity(8));
test_double_iter(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_iter_wrap() {
fn test_iter_wrap(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
// Wrap
let _ = b.enqueue(3);
let mut iter = b.iter();
assert_eq!(&2, iter.next().unwrap());
assert_eq!(&3, iter.next().unwrap());
}
test_iter_wrap(AllocRingBuffer::new(2));
test_iter_wrap(ConstGenericRingBuffer::<i32, 2>::new());
// the growable ringbuffer shouldn't actually stop growing
let mut b = GrowableAllocRingBuffer::with_capacity(2);
let _ = b.enqueue(1);
let _ = b.enqueue(2);
// No wrap
let _ = b.enqueue(3);
let mut iter = b.iter();
assert_eq!(&1, iter.next().unwrap());
assert_eq!(&2, iter.next().unwrap());
assert_eq!(&3, iter.next().unwrap());
assert!(iter.next().is_none());
}
#[test]
fn run_test_iter_mut() {
fn test_iter_mut(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
for el in b.iter_mut() {
*el += 1;
}
assert_eq!(vec![2, 3, 4], b.to_vec());
}
test_iter_mut(AllocRingBuffer::new(8));
test_iter_mut(GrowableAllocRingBuffer::with_capacity(8));
test_iter_mut(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_iter_mut_ref() {
fn test_iter_mut<B>(mut b: B)
where
B: RingBuffer<i32>,
for<'a> &'a mut B:
IntoIterator<Item = &'a mut i32, IntoIter = RingBufferMutIterator<'a, i32, B>>,
{
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
for el in &mut b {
*el += 1;
}
assert_eq!(vec![2, 3, 4], b.to_vec());
}
test_iter_mut(AllocRingBuffer::new(8));
test_iter_mut(GrowableAllocRingBuffer::with_capacity(8));
test_iter_mut(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn test_iter_mut_wrap() {
fn run_test_iter_mut_wrap(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
for i in b.iter_mut() {
*i += 1;
}
assert_eq!(vec![3, 4], b.to_vec());
}
run_test_iter_mut_wrap(AllocRingBuffer::new(2));
run_test_iter_mut_wrap(ConstGenericRingBuffer::<i32, 2>::new());
// The growable ringbuffer actually shouldn't wrap
let mut b = GrowableAllocRingBuffer::with_capacity(2);
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
for i in b.iter_mut() {
*i += 1;
}
assert_eq!(vec![2, 3, 4], b.to_vec());
}
#[test]
fn test_iter_mut_miri_fail() {
fn run_test_iter_mut_wrap(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
let buf = b.iter_mut().collect::<Vec<_>>();
for i in buf {
*i += 1;
}
assert_eq!(vec![3, 4], b.to_vec());
}
run_test_iter_mut_wrap(AllocRingBuffer::new(2));
run_test_iter_mut_wrap(ConstGenericRingBuffer::<i32, 2>::new());
// the growable ringbuffer actually shouldn't wrap
let mut b = GrowableAllocRingBuffer::with_capacity(2);
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
let buf = b.iter_mut().collect::<Vec<_>>();
for i in buf {
*i += 1;
}
assert_eq!(vec![2, 3, 4], b.to_vec());
}
#[test]
fn run_test_to_vec() {
fn test_to_vec(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
assert_eq!(vec![1, 2, 3], b.to_vec());
}
test_to_vec(AllocRingBuffer::new(8));
test_to_vec(GrowableAllocRingBuffer::with_capacity(8));
test_to_vec(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_to_vec_wrap() {
fn test_to_vec_wrap(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
// Wrap
let _ = b.enqueue(3);
assert_eq!(vec![2, 3], b.to_vec());
}
test_to_vec_wrap(AllocRingBuffer::new(2));
test_to_vec_wrap(ConstGenericRingBuffer::<i32, 2>::new());
// The growable ringbuffer should actually remember all items
let mut b = GrowableAllocRingBuffer::with_capacity(2);
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
assert_eq!(vec![1, 2, 3], b.to_vec());
}
#[test]
fn run_test_index() {
fn test_index(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(2);
assert_eq!(b[0], 2);
}
test_index(AllocRingBuffer::new(8));
test_index(GrowableAllocRingBuffer::with_capacity(8));
test_index(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_get() {
fn test_index(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(0);
let _ = b.enqueue(1);
let _ = b.enqueue(2);
let _ = b.enqueue(3);
let _ = b.enqueue(4);
let _ = b.enqueue(5);
let _ = b.enqueue(6);
let _ = b.enqueue(7);
assert_eq!(b.get(0), Some(&0));
assert_eq!(b.get(1), Some(&1));
assert_eq!(b.get(2), Some(&2));
assert_eq!(b.get(3), Some(&3));
assert_eq!(b.get(4), Some(&4));
assert_eq!(b.get(5), Some(&5));
assert_eq!(b.get(6), Some(&6));
assert_eq!(b.get(7), Some(&7));
}
test_index(AllocRingBuffer::new(8));
test_index(GrowableAllocRingBuffer::with_capacity(8));
test_index(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_index_mut() {
fn test_index_mut(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(2);
assert_eq!(b[0], 2);
b[0] = 5;
assert_eq!(b[0], 5);
}
test_index_mut(AllocRingBuffer::new(8));
test_index_mut(GrowableAllocRingBuffer::with_capacity(8));
test_index_mut(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_peek_some() {
fn test_peek_some(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
assert_eq!(b.peek(), Some(&1));
}
test_peek_some(AllocRingBuffer::new(2));
test_peek_some(GrowableAllocRingBuffer::with_capacity(2));
test_peek_some(ConstGenericRingBuffer::<i32, 2>::new());
}
#[test]
fn run_test_peek_none() {
fn test_peek_none(b: impl RingBuffer<i32>) {
assert_eq!(b.peek(), None);
}
test_peek_none(AllocRingBuffer::new(8));
test_peek_none(GrowableAllocRingBuffer::with_capacity(8));
test_peek_none(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_get_relative() {
fn test_get_relative(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(0);
let _ = b.enqueue(1);
// get[(index + 1) % len] = 1
assert_eq!(b.get(0).unwrap(), &0);
assert_eq!(b.get(1).unwrap(), &1);
// Wraps around
assert_eq!(b.get(2).unwrap(), &0);
assert_eq!(b.get(3).unwrap(), &1);
}
test_get_relative(AllocRingBuffer::new(8));
test_get_relative(GrowableAllocRingBuffer::with_capacity(8));
test_get_relative(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_wrapping_get_relative() {
fn test_wrapping_get_relative(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(0);
let _ = b.enqueue(1);
let _ = b.enqueue(2);
// [0, ...]
// ^
// [0, 1]
// ^
// [2, 1]
// ^
// get(0) == b[index] = 1
// get(1) == b[(index+1) % len] = 1
assert_eq!(b.get(0).unwrap(), &1);
assert_eq!(b.get(1).unwrap(), &2);
}
test_wrapping_get_relative(AllocRingBuffer::new(2));
test_wrapping_get_relative(ConstGenericRingBuffer::<i32, 2>::new());
// the growable ringbuffer actually shouldn't wrap
let mut b = GrowableAllocRingBuffer::with_capacity(2);
let _ = b.enqueue(0);
let _ = b.enqueue(1);
let _ = b.enqueue(2);
assert_eq!(b.get(0).unwrap(), &0);
assert_eq!(b.get(1).unwrap(), &1);
assert_eq!(b.get(2).unwrap(), &2);
}
#[test]
fn run_test_get_relative_zero_length() {
fn test_get_relative_zero_length(b: impl RingBuffer<i32>) {
assert!(b.get(1).is_none());
}
test_get_relative_zero_length(AllocRingBuffer::new(8));
test_get_relative_zero_length(GrowableAllocRingBuffer::with_capacity(8));
test_get_relative_zero_length(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_get_relative_mut() {
fn test_get_relative_mut(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(0);
let _ = b.enqueue(1);
// [0, ...]
// ^
// [0, 1, ...]
// ^
// get[(index + 0) % len] = 0 (wrap to 0 because len == 2)
// get[(index + 1) % len] = 1
*b.get_mut(0).unwrap() = 3;
*b.get_mut(1).unwrap() = 4;
assert_eq!(b.get(0).unwrap(), &3);
assert_eq!(b.get(1).unwrap(), &4);
}
test_get_relative_mut(AllocRingBuffer::new(8));
test_get_relative_mut(GrowableAllocRingBuffer::with_capacity(8));
test_get_relative_mut(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_wrapping_get_relative_mut() {
fn test_wrapping_get_relative_mut(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(0);
let _ = b.enqueue(1);
let _ = b.enqueue(2);
*b.get_mut(0).unwrap() = 3;
// [0, ...]
// ^
// [0, 1]
// ^
// [2, 1]
// ^
// get(0) == b[index] = 1
// get(1) == b[(index+1) % len] = 1
assert_eq!(b.get(0).unwrap(), &3);
assert_eq!(b.get(1).unwrap(), &2);
}
test_wrapping_get_relative_mut(AllocRingBuffer::new(2));
test_wrapping_get_relative_mut(ConstGenericRingBuffer::<i32, 2>::new());
// the growable ringbuffer actually shouldn't wrap
let mut b = GrowableAllocRingBuffer::with_capacity(2);
let _ = b.enqueue(0);
let _ = b.enqueue(1);
let _ = b.enqueue(2);
*b.get_mut(0).unwrap() = 3;
assert_eq!(b.get(0).unwrap(), &3);
assert_eq!(b.get(1).unwrap(), &1);
assert_eq!(b.get(2).unwrap(), &2);
}
#[test]
fn run_test_get_relative_mut_zero_length() {
fn test_get_relative_mut_zero_length(mut b: impl RingBuffer<i32>) {
assert!(b.get_mut(1).is_none());
}
test_get_relative_mut_zero_length(AllocRingBuffer::new(8));
test_get_relative_mut_zero_length(GrowableAllocRingBuffer::with_capacity(8));
test_get_relative_mut_zero_length(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_from_iterator() {
fn test_from_iterator<T: RingBuffer<i32> + FromIterator<i32>>() {
let b: T = std::iter::repeat(1).take(1024).collect();
assert_eq!(b.len(), 1024);
assert_eq!(b.to_vec(), vec![1; 1024]);
}
test_from_iterator::<GrowableAllocRingBuffer<i32>>();
test_from_iterator::<ConstGenericRingBuffer<i32, 1024>>();
}
#[test]
fn run_test_from_iterator_wrap() {
fn test_from_iterator_wrap<T: RingBuffer<i32> + FromIterator<i32>>() {
let b: T = std::iter::repeat(1).take(8000).collect();
assert_eq!(b.len(), b.capacity());
assert_eq!(b.to_vec(), vec![1; b.capacity()]);
}
test_from_iterator_wrap::<GrowableAllocRingBuffer<i32>>();
test_from_iterator_wrap::<ConstGenericRingBuffer<i32, 1024>>();
}
#[test]
fn run_test_get_relative_negative() {
fn test_get_relative_negative(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(0);
let _ = b.enqueue(1);
// [0, ...]
// ^
// [0, 1, ...]
// ^
// get[(index + -1) % len] = 1
// get[(index + -2) % len] = 0 (wrap to 1 because len == 2)
assert_eq!(b.get_signed(-1).unwrap(), &1);
assert_eq!(b.get_signed(-2).unwrap(), &0);
// Wraps around
assert_eq!(b.get_signed(-3).unwrap(), &1);
assert_eq!(b.get_signed(-4).unwrap(), &0);
}
test_get_relative_negative(AllocRingBuffer::new(8));
test_get_relative_negative(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_contains() {
fn test_contains(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
assert!(b.contains(&1));
assert!(b.contains(&2));
}
test_contains(AllocRingBuffer::new(8));
test_contains(GrowableAllocRingBuffer::with_capacity(8));
test_contains(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_is_full() {
fn test_is_full(mut b: impl RingBuffer<i32>) {
assert!(!b.is_full());
let _ = b.enqueue(1);
assert!(!b.is_full());
let _ = b.enqueue(2);
assert!(b.is_full());
}
test_is_full(AllocRingBuffer::new(2));
test_is_full(GrowableAllocRingBuffer::with_capacity(2));
test_is_full(ConstGenericRingBuffer::<i32, 2>::new());
}
#[test]
fn run_test_front_some() {
fn test_front_some(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
assert_eq!(b.front(), Some(&1));
}
test_front_some(AllocRingBuffer::new(2));
test_front_some(GrowableAllocRingBuffer::with_capacity(2));
test_front_some(ConstGenericRingBuffer::<i32, 2>::new());
}
#[test]
fn run_test_front_none() {
fn test_front_none(b: impl RingBuffer<i32>) {
assert_eq!(b.front(), None);
}
test_front_none(AllocRingBuffer::new(8));
test_front_none(GrowableAllocRingBuffer::with_capacity(8));
test_front_none(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_back_some() {
fn test_back_some(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
assert_eq!(b.back(), Some(&2));
}
test_back_some(AllocRingBuffer::new(2));
test_back_some(GrowableAllocRingBuffer::with_capacity(2));
test_back_some(ConstGenericRingBuffer::<i32, 2>::new());
}
#[test]
fn run_test_back_none() {
fn test_back_none(b: impl RingBuffer<i32>) {
assert_eq!(b.back(), None);
}
test_back_none(AllocRingBuffer::new(8));
test_back_none(GrowableAllocRingBuffer::with_capacity(8));
test_back_none(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_front_some_mut() {
fn test_front_some_mut(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
assert_eq!(b.front_mut(), Some(&mut 1));
}
test_front_some_mut(AllocRingBuffer::new(2));
test_front_some_mut(GrowableAllocRingBuffer::with_capacity(2));
test_front_some_mut(ConstGenericRingBuffer::<i32, 2>::new());
}
#[test]
fn run_test_front_none_mut() {
fn test_front_none_mut(mut b: impl RingBuffer<i32>) {
assert_eq!(b.front_mut(), None);
}
test_front_none_mut(AllocRingBuffer::new(8));
test_front_none_mut(GrowableAllocRingBuffer::with_capacity(8));
test_front_none_mut(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_back_some_mut() {
fn test_back_some_mut(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(1);
let _ = b.enqueue(2);
assert_eq!(b.back_mut(), Some(&mut 2));
}
test_back_some_mut(AllocRingBuffer::new(2));
test_back_some_mut(GrowableAllocRingBuffer::with_capacity(2));
test_back_some_mut(ConstGenericRingBuffer::<i32, 2>::new());
}
#[test]
fn run_test_back_none_mut() {
fn test_back_none_mut(mut b: impl RingBuffer<i32>) {
assert_eq!(b.back_mut(), None);
}
test_back_none_mut(AllocRingBuffer::new(8));
test_back_none_mut(GrowableAllocRingBuffer::with_capacity(8));
test_back_none_mut(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_dequeue() {
fn run_test_dequeue(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(0);
let _ = b.enqueue(1);
assert_eq!(b.len(), 2);
assert_eq!(b.dequeue(), Some(0));
assert_eq!(b.dequeue(), Some(1));
assert_eq!(b.len(), 0);
assert_eq!(b.dequeue(), None);
}
run_test_dequeue(AllocRingBuffer::new(8));
run_test_dequeue(GrowableAllocRingBuffer::with_capacity(8));
run_test_dequeue(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_skip() {
#[allow(deprecated)]
fn test_skip(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(0);
let _ = b.enqueue(1);
assert_eq!(b.len(), 2);
b.skip();
b.skip();
assert_eq!(b.len(), 0);
}
test_skip(AllocRingBuffer::new(8));
test_skip(GrowableAllocRingBuffer::with_capacity(8));
test_skip(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_skip_2() {
#[allow(deprecated)]
fn test_skip2(mut rb: impl RingBuffer<i32>) {
rb.skip();
rb.skip();
rb.skip();
let _ = rb.enqueue(1);
assert_eq!(rb.dequeue(), Some(1));
assert_eq!(rb.dequeue(), None);
rb.skip();
assert_eq!(rb.dequeue(), None);
}
test_skip2(AllocRingBuffer::new(2));
test_skip2(GrowableAllocRingBuffer::with_capacity(2));
test_skip2(ConstGenericRingBuffer::<i32, 2>::new());
}
#[test]
#[allow(deprecated)]
fn run_test_push_pop() {
fn test_push_pop(mut b: impl RingBuffer<i32>) {
b.push(0);
b.push(1);
assert_eq!(b.dequeue(), Some(0));
assert_eq!(b.dequeue(), Some(1));
assert_eq!(b.dequeue(), None);
b.push(0);
b.push(1);
assert_eq!(b.dequeue(), Some(0));
assert_eq!(b.dequeue(), Some(1));
assert_eq!(b.dequeue(), None);
}
test_push_pop(AllocRingBuffer::new(8));
test_push_pop(GrowableAllocRingBuffer::with_capacity(8));
test_push_pop(ConstGenericRingBuffer::<i32, 8>::new());
}
#[test]
fn run_test_enqueue_dequeue_enqueue() {
fn test_enqueue_dequeue_enqueue(mut b: impl RingBuffer<i32>) {
let _ = b.enqueue(0);
let _ = b.enqueue(1);
assert_eq!(b.dequeue(), Some(0));
assert_eq!(b.dequeue(), Some(1));
assert_eq!(b.dequeue(), None);
let _ = b.enqueue(0);
let _ = b.enqueue(1);
assert_eq!(b.dequeue(), Some(0));
assert_eq!(b.dequeue(), Some(1));
assert_eq!(b.dequeue(), None);
}
test_enqueue_dequeue_enqueue(AllocRingBuffer::new(8));
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | true |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/src/ringbuffer_trait.rs | src/ringbuffer_trait.rs | use core::ops::{Index, IndexMut};
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
/// `RingBuffer` is a trait defining the standard interface for all `RingBuffer`
/// implementations ([`AllocRingBuffer`](crate::AllocRingBuffer), [`ConstGenericRingBuffer`](crate::ConstGenericRingBuffer))
///
/// This trait is not object safe, so can't be used dynamically. However it is possible to
/// define a generic function over types implementing `RingBuffer`.
///
/// # Safety
/// Implementing this implies that the ringbuffer upholds some safety
/// guarantees, such as returning a different value from `get_mut` any
/// for every different index passed in. See the exact requirements
/// in the safety comment on the next function of the mutable Iterator
/// implementation, since these safety guarantees are necessary for
/// [`iter_mut`](RingBuffer::iter_mut) to work
pub unsafe trait RingBuffer<T>:
Sized + IntoIterator<Item = T> + Extend<T> + Index<usize, Output = T> + IndexMut<usize>
{
/// Returns the length of the internal buffer.
/// This length grows up to the capacity and then stops growing.
/// This is because when the length is reached, new items are appended at the start.
fn len(&self) -> usize {
// Safety: self is a RingBuffer
unsafe { Self::ptr_len(self) }
}
/// Raw pointer version of len
///
/// # Safety
/// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer`
#[doc(hidden)]
unsafe fn ptr_len(rb: *const Self) -> usize;
/// Returns true if the buffer is entirely empty.
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns true when the length of the ringbuffer equals the capacity. This happens whenever
/// more elements than capacity have been pushed to the buffer.
#[inline]
fn is_full(&self) -> bool {
self.len() == self.capacity()
}
/// Returns the capacity of the buffer.
fn capacity(&self) -> usize {
// Safety: self is a RingBuffer
unsafe { Self::ptr_capacity(self) }
}
/// Returns the number of elements allocated for this ringbuffer (can be larger than capacity).
fn buffer_size(&self) -> usize {
// Safety: self is a RingBuffer
unsafe { Self::ptr_buffer_size(self) }
}
/// Raw pointer version of capacity.
///
/// # Safety
/// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer`
#[doc(hidden)]
unsafe fn ptr_capacity(rb: *const Self) -> usize;
/// Raw pointer version of `buffer_size`.
///
/// # Safety
/// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer`
#[doc(hidden)]
unsafe fn ptr_buffer_size(rb: *const Self) -> usize;
/// Alias for [`enqueue`]
#[deprecated = "use enqueue instead"]
#[inline]
fn push(&mut self, value: T) {
let _ = self.enqueue(value);
}
/// Adds a value onto the buffer.
///
/// Cycles around if capacity is reached.
/// Forms a more natural counterpart to [`dequeue`](RingBuffer::dequeue).
/// An alias is provided with [`push`](RingBuffer::push).
fn enqueue(&mut self, value: T) -> Option<T>;
/// dequeues the top item off the ringbuffer, and moves this item out.
fn dequeue(&mut self) -> Option<T>;
/// dequeues the top item off the queue, but does not return it. Instead it is dropped.
/// If the ringbuffer is empty, this function is a nop.
#[inline]
#[deprecated = "use dequeue instead"]
fn skip(&mut self) {
let _ = self.dequeue();
}
/// Returns an iterator over the elements in the ringbuffer,
/// dequeueing elements as they are iterated over.
///
/// ```
/// use ringbuffer::{AllocRingBuffer, RingBuffer};
///
/// let mut rb = AllocRingBuffer::new(16);
/// for i in 0..8 {
/// rb.push(i);
/// }
///
/// assert_eq!(rb.len(), 8);
///
/// for i in rb.drain() {
/// // prints the numbers 0 through 8
/// println!("{}", i);
/// }
///
/// // No elements remain
/// assert_eq!(rb.len(), 0);
///
/// ```
fn drain(&mut self) -> RingBufferDrainingIterator<'_, T, Self> {
RingBufferDrainingIterator::new(self)
}
/// Sets every element in the ringbuffer to the value returned by f.
fn fill_with<F: FnMut() -> T>(&mut self, f: F);
/// Sets every element in the ringbuffer to it's default value
fn fill_default(&mut self)
where
T: Default,
{
self.fill_with(Default::default);
}
/// Sets every element in the ringbuffer to `value`
fn fill(&mut self, value: T)
where
T: Clone,
{
self.fill_with(|| value.clone());
}
/// Empties the buffer entirely. Sets the length to 0 but keeps the capacity allocated.
fn clear(&mut self);
/// Gets a value relative to the current index. 0 is the next index to be written to with push.
/// -1 and down are the last elements pushed and 0 and up are the items that were pushed the longest ago.
fn get_signed(&self, index: isize) -> Option<&T>;
/// Gets a value relative to the current index. 0 is the next index to be written to with push.
fn get(&self, index: usize) -> Option<&T>;
/// Gets a value relative to the current index mutably. 0 is the next index to be written to with push.
/// -1 and down are the last elements pushed and 0 and up are the items that were pushed the longest ago.
#[inline]
fn get_mut_signed(&mut self, index: isize) -> Option<&mut T> {
// Safety: self is a RingBuffer
unsafe { Self::ptr_get_mut_signed(self, index).map(|i| &mut *i) }
}
/// Gets a value relative to the current index mutably. 0 is the next index to be written to with push.
#[inline]
fn get_mut(&mut self, index: usize) -> Option<&mut T> {
// Safety: self is a RingBuffer
unsafe { Self::ptr_get_mut(self, index).map(|i| &mut *i) }
}
/// same as [`get_mut`](RingBuffer::get_mut) but on raw pointers.
///
/// # Safety
/// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer`
#[doc(hidden)]
unsafe fn ptr_get_mut(rb: *mut Self, index: usize) -> Option<*mut T>;
/// same as [`get_mut`](RingBuffer::get_mut) but on raw pointers.
///
/// # Safety
/// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer`
#[doc(hidden)]
unsafe fn ptr_get_mut_signed(rb: *mut Self, index: isize) -> Option<*mut T>;
/// Returns the value at the current index.
/// This is the value that will be overwritten by the next push and also the value pushed
/// the longest ago. (alias of [`Self::front`])
#[inline]
fn peek(&self) -> Option<&T> {
self.front()
}
/// Returns the value at the front of the queue.
/// This is the value that will be overwritten by the next push and also the value pushed
/// the longest ago.
/// (alias of peek)
#[inline]
fn front(&self) -> Option<&T> {
self.get(0)
}
/// Returns a mutable reference to the value at the back of the queue.
/// This is the value that will be overwritten by the next push.
/// (alias of peek)
#[inline]
fn front_mut(&mut self) -> Option<&mut T> {
self.get_mut(0)
}
/// Returns the value at the back of the queue.
/// This is the item that was pushed most recently.
#[inline]
fn back(&self) -> Option<&T> {
self.get_signed(-1)
}
/// Returns a mutable reference to the value at the back of the queue.
/// This is the item that was pushed most recently.
#[inline]
fn back_mut(&mut self) -> Option<&mut T> {
self.get_mut_signed(-1)
}
/// Creates a mutable iterator over the buffer starting from the item pushed the longest ago,
/// and ending at the element most recently pushed.
#[inline]
fn iter_mut(&mut self) -> RingBufferMutIterator<'_, T, Self> {
RingBufferMutIterator::new(self)
}
/// Creates an iterator over the buffer starting from the item pushed the longest ago,
/// and ending at the element most recently pushed.
#[inline]
fn iter(&self) -> RingBufferIterator<'_, T, Self> {
RingBufferIterator::new(self)
}
/// Converts the buffer to a vector. This Copies all elements in the ringbuffer.
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Vec<T>
where
T: Clone,
{
self.iter().cloned().collect()
}
/// Returns true if elem is in the ringbuffer.
fn contains(&self, elem: &T) -> bool
where
T: PartialEq,
{
self.iter().any(|i| i == elem)
}
/// Efficiently copy items from the ringbuffer to a target slice.
///
/// # Panics
/// Panics if the buffer length minus the offset is NOT equal to `target.len()`.
///
/// # Safety
/// ONLY SAFE WHEN self is a *const to to an implementor of `RingBuffer`
unsafe fn ptr_copy_to_slice(rb: *const Self, offset: usize, dst: &mut [T])
where
T: Copy;
/// Efficiently copy items from the ringbuffer to a target slice.
///
/// # Panics
/// Panics if the buffer length minus the offset is NOT equal to `target.len()`.
fn copy_to_slice(&self, offset: usize, dst: &mut [T])
where
T: Copy,
{
unsafe { Self::ptr_copy_to_slice(self, offset, dst) }
}
/// Efficiently copy items from a slice to the ringbuffer.
/// # Panics
/// Panics if the buffer length minus the offset is NOT equal to `source.len()`.
///
/// # Safety
/// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer`
unsafe fn ptr_copy_from_slice(rb: *mut Self, offset: usize, src: &[T])
where
T: Copy;
/// Efficiently copy items from a slice to the ringbuffer.
///
/// # Panics
/// Panics if the buffer length minus the offset is NOT equal to `source.len()`.
fn copy_from_slice(&mut self, offset: usize, src: &[T])
where
T: Copy,
{
unsafe { Self::ptr_copy_from_slice(self, offset, src) }
}
}
mod iter {
use crate::RingBuffer;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::ptr::NonNull;
/// `RingBufferIterator` holds a reference to a `RingBuffer` and iterates over it. `index` is the
/// current iterator position.
pub struct RingBufferIterator<'rb, T, RB: RingBuffer<T>> {
obj: &'rb RB,
len: usize,
index: usize,
phantom: PhantomData<T>,
}
impl<'rb, T, RB: RingBuffer<T>> RingBufferIterator<'rb, T, RB> {
#[inline]
pub fn new(obj: &'rb RB) -> Self {
Self {
obj,
len: obj.len(),
index: 0,
phantom: PhantomData,
}
}
}
impl<'rb, T: 'rb, RB: RingBuffer<T>> Iterator for RingBufferIterator<'rb, T, RB> {
type Item = &'rb T;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let res = self.obj.get(self.index);
self.index += 1;
res
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len, Some(self.len))
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.index = (self.index + n).min(self.len);
self.next()
}
}
impl<'rb, T: 'rb, RB: RingBuffer<T>> FusedIterator for RingBufferIterator<'rb, T, RB> {}
impl<'rb, T: 'rb, RB: RingBuffer<T>> ExactSizeIterator for RingBufferIterator<'rb, T, RB> {}
impl<'rb, T: 'rb, RB: RingBuffer<T>> DoubleEndedIterator for RingBufferIterator<'rb, T, RB> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
if self.len > 0 && self.index < self.len {
let res = self.obj.get(self.len - 1);
self.len -= 1;
res
} else {
None
}
}
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
self.len = self.len - n.min(self.len);
self.next_back()
}
}
/// `RingBufferMutIterator` holds a reference to a `RingBuffer` and iterates over it. `index` is the
/// current iterator position.
///
/// WARNING: NEVER ACCESS THE `obj` FIELD OUTSIDE OF NEXT. It's private on purpose, and
/// can technically be accessed in the same module. However, this breaks the safety of `next()`
pub struct RingBufferMutIterator<'rb, T, RB: RingBuffer<T>> {
obj: NonNull<RB>,
index: usize,
len: usize,
phantom: PhantomData<&'rb mut T>,
}
impl<'rb, T, RB: RingBuffer<T>> RingBufferMutIterator<'rb, T, RB> {
pub fn new(obj: &'rb mut RB) -> Self {
Self {
len: obj.len(),
obj: NonNull::from(obj),
index: 0,
phantom: PhantomData,
}
}
}
impl<'rb, T: 'rb, RB: RingBuffer<T> + 'rb> FusedIterator for RingBufferMutIterator<'rb, T, RB> {}
impl<'rb, T: 'rb, RB: RingBuffer<T> + 'rb> ExactSizeIterator for RingBufferMutIterator<'rb, T, RB> {}
impl<'rb, T: 'rb, RB: RingBuffer<T> + 'rb> DoubleEndedIterator
for RingBufferMutIterator<'rb, T, RB>
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
if self.len > 0 && self.index < self.len {
self.len -= 1;
let res = unsafe { RB::ptr_get_mut(self.obj.as_ptr(), self.len) };
res.map(|i| unsafe { &mut *i })
} else {
None
}
}
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
self.len = self.len - n.min(self.len);
self.next_back()
}
}
impl<'rb, T, RB: RingBuffer<T> + 'rb> Iterator for RingBufferMutIterator<'rb, T, RB> {
type Item = &'rb mut T;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let res = unsafe { RB::ptr_get_mut(self.obj.as_ptr(), self.index) };
self.index += 1;
// Safety: ptr_get_mut always returns a valid pointer
res.map(|i| unsafe { &mut *i })
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len, Some(self.len))
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.index = (self.index + n).min(self.len);
self.next()
}
}
/// `RingBufferMutIterator` holds a reference to a `RingBuffer` and iterates over it.
pub struct RingBufferDrainingIterator<'rb, T, RB: RingBuffer<T>> {
obj: &'rb mut RB,
phantom: PhantomData<T>,
}
impl<'rb, T, RB: RingBuffer<T>> RingBufferDrainingIterator<'rb, T, RB> {
#[inline]
pub fn new(obj: &'rb mut RB) -> Self {
Self {
obj,
phantom: PhantomData,
}
}
}
impl<T, RB: RingBuffer<T>> Iterator for RingBufferDrainingIterator<'_, T, RB> {
type Item = T;
fn next(&mut self) -> Option<T> {
self.obj.dequeue()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.obj.len(), Some(self.obj.len()))
}
}
/// `RingBufferIntoIterator` holds a `RingBuffer` and iterates over it.
pub struct RingBufferIntoIterator<T, RB: RingBuffer<T>> {
obj: RB,
phantom: PhantomData<T>,
}
impl<T, RB: RingBuffer<T>> RingBufferIntoIterator<T, RB> {
#[inline]
pub fn new(obj: RB) -> Self {
Self {
obj,
phantom: PhantomData,
}
}
}
impl<T, RB: RingBuffer<T>> Iterator for RingBufferIntoIterator<T, RB> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.obj.dequeue()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.obj.len(), Some(self.obj.len()))
}
}
}
pub use iter::{
RingBufferDrainingIterator, RingBufferIntoIterator, RingBufferIterator, RingBufferMutIterator,
};
/// Implement various functions on implementors of [`RingBuffer`].
/// This is to avoid duplicate code.
macro_rules! impl_ringbuffer {
($readptr: ident, $writeptr: ident) => {
#[inline]
unsafe fn ptr_len(rb: *const Self) -> usize {
(*rb).$writeptr - (*rb).$readptr
}
};
}
/// Implement various functions on implementors of [`RingBuffer`].
/// This is to avoid duplicate code.
macro_rules! impl_ringbuffer_ext {
($get_base_ptr: ident, $get_base_mut_ptr: ident, $get_unchecked: ident, $get_unchecked_mut: ident, $readptr: ident, $writeptr: ident, $mask: expr) => {
#[inline]
fn get_signed(&self, index: isize) -> Option<&T> {
use core::ops::Not;
self.is_empty().not().then(move || {
let index_from_readptr = if index >= 0 {
index
} else {
self.len() as isize + index
};
let normalized_index =
self.$readptr as isize + index_from_readptr.rem_euclid(self.len() as isize);
unsafe {
// SAFETY: index has been modulo-ed to be within range
// to be within bounds
$get_unchecked(self, $mask(self.buffer_size(), normalized_index as usize))
}
})
}
#[inline]
fn get(&self, index: usize) -> Option<&T> {
use core::ops::Not;
self.is_empty().not().then(move || {
let normalized_index = self.$readptr + index.rem_euclid(self.len());
unsafe {
// SAFETY: index has been modulo-ed to be within range
// to be within bounds
$get_unchecked(self, $mask(self.buffer_size(), normalized_index))
}
})
}
#[inline]
#[doc(hidden)]
unsafe fn ptr_get_mut_signed(rb: *mut Self, index: isize) -> Option<*mut T> {
(Self::ptr_len(rb) != 0).then(move || {
let index_from_readptr = if index >= 0 {
index
} else {
Self::ptr_len(rb) as isize + index
};
let normalized_index = (*rb).$readptr as isize
+ index_from_readptr.rem_euclid(Self::ptr_len(rb) as isize);
unsafe {
// SAFETY: index has been modulo-ed to be within range
// to be within bounds
$get_unchecked_mut(
rb,
$mask(Self::ptr_buffer_size(rb), normalized_index as usize),
)
}
})
}
#[inline]
#[doc(hidden)]
unsafe fn ptr_get_mut(rb: *mut Self, index: usize) -> Option<*mut T> {
(Self::ptr_len(rb) != 0).then(move || {
let normalized_index = (*rb).$readptr + index.rem_euclid(Self::ptr_len(rb));
unsafe {
// SAFETY: index has been modulo-ed to be within range
// to be within bounds
$get_unchecked_mut(rb, $mask(Self::ptr_buffer_size(rb), normalized_index))
}
})
}
#[inline]
fn clear(&mut self) {
for i in self.drain() {
drop(i);
}
self.$readptr = 0;
self.$writeptr = 0;
}
unsafe fn ptr_copy_to_slice(rb: *const Self, offset: usize, dst: &mut [T])
where
T: Copy,
{
let len = Self::ptr_len(rb);
let dst_len = dst.len();
assert!(
(offset == 0 && len == 0) || offset < len,
"offset ({offset}) is out of bounds for the current buffer length ({len})"
);
assert!(len - offset == dst_len, "destination slice length ({dst_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})");
if dst_len == 0 {
return;
}
let base: *const T = $get_base_ptr(rb);
let size = Self::ptr_buffer_size(rb);
let offset_readptr = (*rb).$readptr + offset;
let from_idx = $mask(size, offset_readptr);
let to_idx = $mask(size, offset_readptr + dst_len);
if from_idx < to_idx {
dst.copy_from_slice(unsafe {
// SAFETY: index has been modulo-ed to be within range
// to be within bounds
core::slice::from_raw_parts(base.add(from_idx), dst_len)
});
} else {
dst[..size - from_idx].copy_from_slice(unsafe {
// SAFETY: index has been modulo-ed to be within range
// to be within bounds
core::slice::from_raw_parts(base.add(from_idx), size - from_idx)
});
dst[size - from_idx..].copy_from_slice(unsafe {
// SAFETY: index has been modulo-ed to be within range
// to be within bounds
core::slice::from_raw_parts(base, to_idx)
});
}
}
unsafe fn ptr_copy_from_slice(rb: *mut Self, offset: usize, src: &[T])
where
T: Copy,
{
let len = Self::ptr_len(rb);
let src_len = src.len();
assert!(
(offset == 0 && len == 0) || offset < len,
"offset ({offset}) is out of bounds for the current buffer length ({len})"
);
assert!(len - offset == src_len, "source slice length ({src_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})");
if src_len == 0 {
return;
}
let base: *mut T = $get_base_mut_ptr(rb);
let size = Self::ptr_buffer_size(rb);
let offset_readptr = (*rb).$readptr + offset;
let from_idx = $mask(size, offset_readptr);
let to_idx = $mask(size, offset_readptr + src_len);
if from_idx < to_idx {
unsafe {
// SAFETY: index has been modulo-ed to be within range
// to be within bounds
core::slice::from_raw_parts_mut(base.add(from_idx), src_len)
}
.copy_from_slice(src);
} else {
unsafe {
// SAFETY: index has been modulo-ed to be within range
// to be within bounds
core::slice::from_raw_parts_mut(base.add(from_idx), size - from_idx)
}
.copy_from_slice(&src[..size - from_idx]);
unsafe {
// SAFETY: index has been modulo-ed to be within range
// to be within bounds
core::slice::from_raw_parts_mut(base, to_idx)
}
.copy_from_slice(&src[size - from_idx..]);
}
}
};
}
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/src/with_alloc/alloc_ringbuffer.rs | src/with_alloc/alloc_ringbuffer.rs | use core::ops::{Index, IndexMut};
use crate::ringbuffer_trait::{
RingBuffer, RingBufferIntoIterator, RingBufferIterator, RingBufferMutIterator,
};
extern crate alloc;
// We need boxes, so depend on alloc
use crate::{impl_ring_buffer_set_len, mask_and, GrowableAllocRingBuffer, SetLen};
use core::ptr;
/// The `AllocRingBuffer` is a `RingBuffer` which is based on a Vec. This means it allocates at runtime
/// on the heap, and therefore needs the [`alloc`] crate. This struct and therefore the dependency on
/// alloc can be disabled by disabling the `alloc` (default) feature.
///
/// # Example
/// ```
/// use ringbuffer::{AllocRingBuffer, RingBuffer};
///
/// let mut buffer = AllocRingBuffer::new(2);
///
/// // First entry of the buffer is now 5.
/// buffer.enqueue(5);
///
/// // The last item we enqueued is 5
/// assert_eq!(buffer.back(), Some(&5));
///
/// // Second entry is now 42.
/// buffer.enqueue(42);
///
/// assert_eq!(buffer.peek(), Some(&5));
/// assert!(buffer.is_full());
///
/// // Because capacity is reached the next enqueue will be the first item of the buffer.
/// buffer.enqueue(1);
/// assert_eq!(buffer.to_vec(), vec![42, 1]);
/// ```
#[derive(Debug)]
pub struct AllocRingBuffer<T> {
pub(crate) buf: *mut T,
// the size of the allocation. Next power of 2 up from the capacity
size: usize,
// maximum number of elements actually allowed in the ringbuffer.
// Always less than or equal than the size
capacity: usize,
readptr: usize,
writeptr: usize,
}
// SAFETY: all methods that require mutable access take &mut,
// being send and sync was the old behavior but broke when we switched to *mut T.
unsafe impl<T: Sync> Sync for AllocRingBuffer<T> {}
unsafe impl<T: Send> Send for AllocRingBuffer<T> {}
impl<T, const N: usize> From<[T; N]> for AllocRingBuffer<T> {
fn from(value: [T; N]) -> Self {
let mut rb = Self::new(value.len());
rb.extend(value);
rb
}
}
impl<T: Clone, const N: usize> From<&[T; N]> for AllocRingBuffer<T> {
// the cast here is actually not trivial
#[allow(trivial_casts)]
fn from(value: &[T; N]) -> Self {
Self::from(value as &[T])
}
}
impl<T: Clone> From<&[T]> for AllocRingBuffer<T> {
fn from(value: &[T]) -> Self {
let mut rb = Self::new(value.len());
rb.extend(value.iter().cloned());
rb
}
}
impl<T> From<GrowableAllocRingBuffer<T>> for AllocRingBuffer<T> {
fn from(mut v: GrowableAllocRingBuffer<T>) -> AllocRingBuffer<T> {
let mut rb = AllocRingBuffer::new(v.len());
rb.extend(v.drain());
rb
}
}
impl<T: Clone> From<&mut [T]> for AllocRingBuffer<T> {
fn from(value: &mut [T]) -> Self {
Self::from(&*value)
}
}
impl<T: Clone, const CAP: usize> From<&mut [T; CAP]> for AllocRingBuffer<T> {
fn from(value: &mut [T; CAP]) -> Self {
Self::from(value.clone())
}
}
impl<T> From<alloc::vec::Vec<T>> for AllocRingBuffer<T> {
fn from(value: alloc::vec::Vec<T>) -> Self {
let mut res = AllocRingBuffer::new(value.len());
res.extend(value);
res
}
}
impl<T> From<alloc::collections::VecDeque<T>> for AllocRingBuffer<T> {
fn from(value: alloc::collections::VecDeque<T>) -> Self {
let mut res = AllocRingBuffer::new(value.len());
res.extend(value);
res
}
}
impl<T> From<alloc::collections::LinkedList<T>> for AllocRingBuffer<T> {
fn from(value: alloc::collections::LinkedList<T>) -> Self {
let mut res = AllocRingBuffer::new(value.len());
res.extend(value);
res
}
}
impl From<alloc::string::String> for AllocRingBuffer<char> {
fn from(value: alloc::string::String) -> Self {
let mut res = AllocRingBuffer::new(value.len());
res.extend(value.chars());
res
}
}
impl From<&str> for AllocRingBuffer<char> {
fn from(value: &str) -> Self {
let mut res = AllocRingBuffer::new(value.len());
res.extend(value.chars());
res
}
}
impl<T, const CAP: usize> From<crate::ConstGenericRingBuffer<T, CAP>> for AllocRingBuffer<T> {
fn from(mut value: crate::ConstGenericRingBuffer<T, CAP>) -> Self {
let mut res = AllocRingBuffer::new(value.len());
res.extend(value.drain());
res
}
}
impl<T> Drop for AllocRingBuffer<T> {
fn drop(&mut self) {
self.drain().for_each(drop);
let layout = alloc::alloc::Layout::array::<T>(self.size).unwrap();
unsafe {
alloc::alloc::dealloc(self.buf.cast(), layout);
}
}
}
impl<T: Clone> Clone for AllocRingBuffer<T> {
fn clone(&self) -> Self {
debug_assert_ne!(self.capacity, 0);
let mut new = Self::new(self.capacity);
new.extend(self.iter().cloned());
new
}
}
impl<T: PartialEq> PartialEq for AllocRingBuffer<T> {
fn eq(&self, other: &Self) -> bool {
self.capacity == other.capacity
&& self.len() == other.len()
&& self.iter().zip(other.iter()).all(|(a, b)| a == b)
}
}
impl<T: Eq + PartialEq> Eq for AllocRingBuffer<T> {}
impl<T> IntoIterator for AllocRingBuffer<T> {
type Item = T;
type IntoIter = RingBufferIntoIterator<T, Self>;
fn into_iter(self) -> Self::IntoIter {
RingBufferIntoIterator::new(self)
}
}
#[allow(clippy::into_iter_without_iter)]
// iter() is implemented on the trait
impl<'a, T> IntoIterator for &'a AllocRingBuffer<T> {
type Item = &'a T;
type IntoIter = RingBufferIterator<'a, T, AllocRingBuffer<T>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
#[allow(clippy::into_iter_without_iter)]
// iter_mut() is implemented on the trait
impl<'a, T> IntoIterator for &'a mut AllocRingBuffer<T> {
type Item = &'a mut T;
type IntoIter = RingBufferMutIterator<'a, T, AllocRingBuffer<T>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<T> Extend<T> for AllocRingBuffer<T> {
fn extend<A: IntoIterator<Item = T>>(&mut self, iter: A) {
let iter = iter.into_iter();
for i in iter {
let _ = self.enqueue(i);
}
}
}
unsafe impl<T> RingBuffer<T> for AllocRingBuffer<T> {
#[inline]
unsafe fn ptr_capacity(rb: *const Self) -> usize {
(*rb).capacity
}
#[inline]
unsafe fn ptr_buffer_size(rb: *const Self) -> usize {
(*rb).size
}
impl_ringbuffer!(readptr, writeptr);
#[inline]
fn enqueue(&mut self, value: T) -> Option<T> {
let mut ret = None;
if self.is_full() {
// mask with and is allowed here because size is always a power of two
let previous_value =
unsafe { ptr::read(get_unchecked_mut(self, mask_and(self.size, self.readptr))) };
ret = Some(previous_value);
self.readptr += 1;
}
// mask with and is allowed here because size is always a power of two
let index = mask_and(self.size, self.writeptr);
unsafe {
ptr::write(get_unchecked_mut(self, index), value);
}
self.writeptr += 1;
ret
}
fn dequeue(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
// mask with and is allowed here because size is always a power of two
let index = mask_and(self.size, self.readptr);
let res = unsafe { get_unchecked_mut(self, index) };
self.readptr += 1;
// Safety: the fact that we got this maybeuninit from the buffer (with mask) means that
// it's initialized. If it wasn't the is_empty call would have caught it. Values
// are always initialized when inserted so this is safe.
unsafe { Some(ptr::read(res)) }
}
}
impl_ringbuffer_ext!(
get_base_ptr,
get_base_mut_ptr,
get_unchecked,
get_unchecked_mut,
readptr,
writeptr,
mask_and
);
#[inline]
fn fill_with<F: FnMut() -> T>(&mut self, mut f: F) {
self.clear();
self.readptr = 0;
self.writeptr = self.capacity;
for i in 0..self.capacity {
unsafe { ptr::write(get_unchecked_mut(self, i), f()) };
}
}
}
impl<T> AllocRingBuffer<T> {
/// Creates a `AllocRingBuffer` with a certain capacity. The actual capacity is the input to the
/// function raised to the power of two (effectively the input is the log2 of the actual capacity)
#[inline]
#[must_use]
pub fn with_capacity_power_of_2(cap_power_of_two: usize) -> Self {
Self::new(1 << cap_power_of_two)
}
#[inline]
/// Alias of [`with_capacity`](AllocRingBuffer::new).
#[must_use]
#[deprecated = "alias of new"]
pub fn with_capacity(cap: usize) -> Self {
Self::new(cap)
}
/// Creates a `AllocRingBuffer` with a certain capacity. The capacity must not be zero.
///
/// # Panics
/// Panics when capacity is zero
#[inline]
#[must_use]
pub fn new(capacity: usize) -> Self {
assert_ne!(capacity, 0, "Capacity must be greater than 0");
let size = capacity.next_power_of_two();
let layout = alloc::alloc::Layout::array::<T>(size).unwrap();
let buf = unsafe { alloc::alloc::alloc(layout).cast() };
Self {
buf,
size,
capacity,
readptr: 0,
writeptr: 0,
}
}
}
/// Get a const pointer to the buffer
unsafe fn get_base_ptr<T>(rb: *const AllocRingBuffer<T>) -> *const T {
(*rb).buf.cast()
}
/// Get a mut pointer to the buffer
unsafe fn get_base_mut_ptr<T>(rb: *mut AllocRingBuffer<T>) -> *mut T {
(*rb).buf
}
/// Get a reference from the buffer without checking it is initialized.
///
/// Caller must be sure the index is in bounds, or this will panic.
#[inline]
unsafe fn get_unchecked<'a, T>(rb: *const AllocRingBuffer<T>, index: usize) -> &'a T {
let p = (*rb).buf.add(index);
// Safety: caller makes sure the index is in bounds for the ringbuffer.
// All in bounds values in the ringbuffer are initialized
&*p
}
/// Get a mut reference from the buffer without checking it is initialized.
///
/// Caller must be sure the index is in bounds, or this will panic.
#[inline]
unsafe fn get_unchecked_mut<T>(rb: *mut AllocRingBuffer<T>, index: usize) -> *mut T {
let p = (*rb).buf.add(index);
// Safety: caller makes sure the index is in bounds for the ringbuffer.
// All in bounds values in the ringbuffer are initialized
p.cast()
}
impl<T> Index<usize> for AllocRingBuffer<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index).expect("index out of bounds")
}
}
impl<T> IndexMut<usize> for AllocRingBuffer<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).expect("index out of bounds")
}
}
impl<T> SetLen for AllocRingBuffer<T> {
impl_ring_buffer_set_len!(readptr, writeptr);
}
#[cfg(test)]
mod tests {
use crate::{AllocRingBuffer, RingBuffer};
// just test that this compiles
#[test]
fn test_generic_clone() {
fn helper(a: &AllocRingBuffer<i32>) -> AllocRingBuffer<i32> {
a.clone()
}
_ = helper(&AllocRingBuffer::new(2));
_ = helper(&AllocRingBuffer::new(5));
}
#[test]
fn test_not_power_of_two() {
let mut rb = AllocRingBuffer::new(10);
const NUM_VALS: usize = 1000;
// recycle the ringbuffer a bunch of time to see if noneof the logic
// messes up
for _ in 0..100 {
for i in 0..NUM_VALS {
let _ = rb.enqueue(i);
}
assert!(rb.is_full());
for i in 0..10 {
assert_eq!(Some(i + NUM_VALS - rb.capacity()), rb.dequeue());
}
assert!(rb.is_empty());
}
}
#[test]
fn test_with_capacity_power_of_two() {
let b = AllocRingBuffer::<i32>::with_capacity_power_of_2(2);
assert_eq!(b.capacity, 4);
}
#[test]
#[should_panic]
fn test_index_zero_length() {
let b = AllocRingBuffer::<i32>::new(2);
let _ = b[2];
}
#[test]
fn test_extend() {
let mut buf = AllocRingBuffer::<u8>::new(4);
(0..4).for_each(|_| {
let _ = buf.enqueue(0);
});
let new_data = [0, 1, 2];
buf.extend(new_data);
let expected = [0, 0, 1, 2];
for i in 0..4 {
let actual = buf[i];
let expected = expected[i];
assert_eq!(actual, expected);
}
}
#[test]
fn test_extend_with_overflow() {
let mut buf = AllocRingBuffer::<u8>::new(8);
(0..8).for_each(|_| {
let _ = buf.enqueue(0);
});
let new_data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
buf.extend(new_data);
let expected = [2, 3, 4, 5, 6, 7, 8, 9];
for i in 0..8 {
let actual = buf[i];
let expected = expected[i];
assert_eq!(actual, expected);
}
}
#[test]
fn test_conversions() {
// from &[T]
let data: &[i32] = &[1, 2, 3, 4];
let buf = AllocRingBuffer::from(data);
assert_eq!(buf.capacity, 4);
assert_eq!(buf.to_vec(), alloc::vec![1, 2, 3, 4]);
// from &[T; N]
let buf = AllocRingBuffer::from(&[1, 2, 3, 4]);
assert_eq!(buf.capacity, 4);
assert_eq!(buf.to_vec(), alloc::vec![1, 2, 3, 4]);
// from [T; N]
let buf = AllocRingBuffer::from([1, 2, 3, 4]);
assert_eq!(buf.capacity, 4);
assert_eq!(buf.to_vec(), alloc::vec![1, 2, 3, 4]);
}
}
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/src/with_alloc/vecdeque.rs | src/with_alloc/vecdeque.rs | use crate::ringbuffer_trait::{RingBufferIntoIterator, RingBufferIterator, RingBufferMutIterator};
use crate::{AllocRingBuffer, RingBuffer};
use alloc::collections::VecDeque;
use core::ops::{Deref, DerefMut, Index, IndexMut};
/// A growable ringbuffer. Once capacity is reached, the size is doubled.
/// Wrapper of the built-in [`VecDeque`] struct.
///
/// The reason this is a wrapper, is that we want `RingBuffers` to implement `Index<isize>`,
/// which we cannot do for remote types like `VecDeque`
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct GrowableAllocRingBuffer<T>(VecDeque<T>);
impl<T, const N: usize> From<[T; N]> for GrowableAllocRingBuffer<T> {
fn from(value: [T; N]) -> Self {
Self(VecDeque::from(value))
}
}
impl<T> From<VecDeque<T>> for GrowableAllocRingBuffer<T> {
fn from(value: VecDeque<T>) -> Self {
Self(value)
}
}
impl<T: Clone, const N: usize> From<&[T; N]> for GrowableAllocRingBuffer<T> {
// the cast here is actually not trivial
#[allow(trivial_casts)]
fn from(value: &[T; N]) -> Self {
Self::from(value as &[T])
}
}
impl<T: Clone> From<&[T]> for GrowableAllocRingBuffer<T> {
fn from(value: &[T]) -> Self {
let mut rb = Self::new();
rb.extend(value.iter().cloned());
rb
}
}
impl<T> From<AllocRingBuffer<T>> for GrowableAllocRingBuffer<T> {
fn from(mut v: AllocRingBuffer<T>) -> GrowableAllocRingBuffer<T> {
let mut rb = GrowableAllocRingBuffer::new();
rb.extend(v.drain());
rb
}
}
impl<T: Clone> From<&mut [T]> for GrowableAllocRingBuffer<T> {
fn from(value: &mut [T]) -> Self {
Self::from(&*value)
}
}
impl<T: Clone, const CAP: usize> From<&mut [T; CAP]> for GrowableAllocRingBuffer<T> {
fn from(value: &mut [T; CAP]) -> Self {
Self::from(value.clone())
}
}
impl<T> From<alloc::vec::Vec<T>> for GrowableAllocRingBuffer<T> {
fn from(value: alloc::vec::Vec<T>) -> Self {
let mut res = GrowableAllocRingBuffer::new();
res.extend(value);
res
}
}
impl<T> From<alloc::collections::LinkedList<T>> for GrowableAllocRingBuffer<T> {
fn from(value: alloc::collections::LinkedList<T>) -> Self {
let mut res = GrowableAllocRingBuffer::new();
res.extend(value);
res
}
}
impl From<alloc::string::String> for GrowableAllocRingBuffer<char> {
fn from(value: alloc::string::String) -> Self {
let mut res = GrowableAllocRingBuffer::new();
res.extend(value.chars());
res
}
}
impl From<&str> for GrowableAllocRingBuffer<char> {
fn from(value: &str) -> Self {
let mut res = GrowableAllocRingBuffer::new();
res.extend(value.chars());
res
}
}
impl<T, const CAP: usize> From<crate::ConstGenericRingBuffer<T, CAP>>
for GrowableAllocRingBuffer<T>
{
fn from(mut value: crate::ConstGenericRingBuffer<T, CAP>) -> Self {
let mut res = GrowableAllocRingBuffer::new();
res.extend(value.drain());
res
}
}
impl<T> Deref for GrowableAllocRingBuffer<T> {
type Target = VecDeque<T>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for GrowableAllocRingBuffer<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T> Default for GrowableAllocRingBuffer<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> AsRef<VecDeque<T>> for GrowableAllocRingBuffer<T> {
fn as_ref(&self) -> &VecDeque<T> {
&self.0
}
}
impl<T> GrowableAllocRingBuffer<T> {
/// Creates an empty ringbuffer.
#[must_use]
pub fn new() -> Self {
Self(VecDeque::new())
}
/// Creates an empty ringbuffer with space for at least capacity elements.
#[must_use]
pub fn with_capacity(capacity: usize) -> Self {
Self(VecDeque::with_capacity(capacity))
}
}
impl<T> IntoIterator for GrowableAllocRingBuffer<T> {
type Item = T;
type IntoIter = RingBufferIntoIterator<T, Self>;
fn into_iter(self) -> Self::IntoIter {
RingBufferIntoIterator::new(self)
}
}
impl<'a, T> IntoIterator for &'a GrowableAllocRingBuffer<T> {
type Item = &'a T;
type IntoIter = RingBufferIterator<'a, T, GrowableAllocRingBuffer<T>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T> IntoIterator for &'a mut GrowableAllocRingBuffer<T> {
type Item = &'a mut T;
type IntoIter = RingBufferMutIterator<'a, T, GrowableAllocRingBuffer<T>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
unsafe impl<T> RingBuffer<T> for GrowableAllocRingBuffer<T> {
unsafe fn ptr_len(rb: *const Self) -> usize {
(*rb).0.len()
}
#[inline]
unsafe fn ptr_capacity(rb: *const Self) -> usize {
(*rb).0.capacity()
}
#[inline]
unsafe fn ptr_buffer_size(rb: *const Self) -> usize {
(*rb).0.capacity()
}
fn dequeue(&mut self) -> Option<T> {
self.pop_front()
}
fn enqueue(&mut self, value: T) -> Option<T> {
self.push_back(value);
None
}
fn fill_with<F: FnMut() -> T>(&mut self, mut f: F) {
self.clear();
let initial_capacity = self.0.capacity();
for _ in 0..initial_capacity {
self.0.push_back(f());
}
debug_assert_eq!(initial_capacity, self.0.capacity());
}
fn clear(&mut self) {
self.0.clear();
}
fn get(&self, index: usize) -> Option<&T> {
if self.is_empty() {
None
} else {
self.0.get(crate::mask_modulo(self.0.len(), index))
}
}
fn get_signed(&self, index: isize) -> Option<&T> {
if self.is_empty() {
None
} else if index >= 0 {
self.0
.get(crate::mask_modulo(self.0.len(), index.unsigned_abs()))
} else {
let positive_index = index.unsigned_abs() - 1;
let masked = crate::mask_modulo(self.0.len(), positive_index);
let index = self.0.len() - 1 - masked;
self.0.get(index)
}
}
unsafe fn ptr_get_mut_signed(rb: *mut Self, index: isize) -> Option<*mut T> {
#[allow(trivial_casts)]
if RingBuffer::ptr_len(rb) == 0 {
None
} else if index >= 0 {
(*rb).0.get_mut(index.unsigned_abs())
} else {
let len = Self::ptr_len(rb);
let positive_index = index.unsigned_abs() + 1;
let masked = crate::mask_modulo(len, positive_index);
let index = len - 1 - masked;
(*rb).0.get_mut(index)
}
.map(|i| i as *mut T)
}
unsafe fn ptr_get_mut(rb: *mut Self, index: usize) -> Option<*mut T> {
#[allow(trivial_casts)]
if RingBuffer::ptr_len(rb) == 0 {
None
} else {
(*rb).0.get_mut(index)
}
.map(|i| i as *mut T)
}
unsafe fn ptr_copy_to_slice(rb: *const Self, offset: usize, dst: &mut [T])
where
T: Copy,
{
let len = Self::ptr_len(rb);
let dst_len = dst.len();
assert!(
(offset == 0 && len == 0) || offset < len,
"offset ({offset}) is out of bounds for the current buffer length ({len})"
);
assert!(len - offset == dst_len, "destination slice length ({dst_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})");
if dst_len == 0 {
return;
}
let (front, back) = (*rb).0.as_slices();
let first_len = front.len();
if offset < first_len {
let n_in_first = first_len - offset;
dst[..n_in_first].copy_from_slice(&front[offset..]);
if n_in_first < dst_len {
dst[n_in_first..].copy_from_slice(&back[..dst_len - n_in_first]);
}
} else {
dst.copy_from_slice(&back[offset - first_len..]);
}
}
unsafe fn ptr_copy_from_slice(rb: *mut Self, offset: usize, src: &[T])
where
T: Copy,
{
let len = Self::ptr_len(rb);
let src_len = src.len();
assert!(
(offset == 0 && len == 0) || offset < len,
"offset ({offset}) is out of bounds for the current buffer length ({len})"
);
assert!(len - offset == src_len, "source slice length ({src_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})");
if src_len == 0 {
return;
}
let (front, back) = (*rb).0.as_mut_slices();
let first_len = front.len();
if offset < first_len {
let n_in_first = first_len - offset;
front[offset..].copy_from_slice(&src[..n_in_first]);
if n_in_first < src_len {
back[..src_len - n_in_first].copy_from_slice(&src[n_in_first..]);
}
} else {
back[offset - first_len..].copy_from_slice(src);
}
}
}
impl<T> Extend<T> for GrowableAllocRingBuffer<T> {
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
self.0.extend(iter);
}
}
impl<T> Index<usize> for GrowableAllocRingBuffer<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index).expect("index out of bounds")
}
}
impl<T> IndexMut<usize> for GrowableAllocRingBuffer<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).expect("index out of bounds")
}
}
impl<T> FromIterator<T> for GrowableAllocRingBuffer<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
Self(VecDeque::from_iter(iter))
}
}
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/src/with_alloc/mod.rs | src/with_alloc/mod.rs | pub mod alloc_ringbuffer;
pub mod vecdeque;
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/tests/conversions.rs | tests/conversions.rs | extern crate alloc;
use alloc::collections::{LinkedList, VecDeque};
use alloc::string::ToString;
use core::ops::Deref;
use ringbuffer::RingBuffer;
use ringbuffer::{AllocRingBuffer, ConstGenericRingBuffer, GrowableAllocRingBuffer};
use std::vec;
macro_rules! convert_test {
($name: ident: $from: expr => $to: ty) => {
#[test]
fn $name() {
let a = $from;
let mut b: $to = a.into();
assert_eq!(b.to_vec(), vec!['1', '2']);
b.enqueue('3');
assert_eq!(b, b);
}
};
}
macro_rules! convert_tests {
(
[$($name: ident: $from: expr),* $(,)?]
=> $to: ty
) => {
$(
convert_test!($name: $from => $to);
)*
};
}
convert_tests!(
[
alloc_from_vec: vec!['1', '2'],
alloc_from_ll: {let mut l = LinkedList::new(); l.push_back('1'); l.push_back('2'); l},
alloc_from_vd: {let mut l = VecDeque::new(); l.push_back('1'); l.push_back('2'); l},
alloc_from_str: "12".to_string(),
alloc_from_str_slice: "12",
alloc_from_slice: {let a: &[char] = &['1', '2']; a},
alloc_from_const_slice: {let a: &[char; 2] = &['1', '2']; a},
alloc_from_arr: {let a: [char; 2] = ['1', '2']; a},
alloc_from_cgrb: { ConstGenericRingBuffer::from(['1', '2'])},
alloc_from_garb: { GrowableAllocRingBuffer::from(['1', '2'])},
] => AllocRingBuffer::<_>
);
convert_tests!(
[
growable_alloc_from_vec: vec!['1', '2'],
growable_alloc_from_ll: {let mut l = LinkedList::new(); l.push_back('1'); l.push_back('2'); l},
growable_alloc_from_vd: {let mut l = VecDeque::new(); l.push_back('1'); l.push_back('2'); l},
growable_alloc_from_str: "12".to_string(),
growable_alloc_from_str_slice: "12",
growable_alloc_from_slice: {let a: &[char] = &['1', '2']; a},
growable_alloc_from_const_slice: {let a: &[char; 2] = &['1', '2']; a},
growable_alloc_from_arr: {let a: [char; 2] = ['1', '2']; a},
growable_alloc_from_cgrb: { ConstGenericRingBuffer::from(['1', '2'])},
growable_alloc_from_arb: { AllocRingBuffer::from(['1', '2'])},
] => GrowableAllocRingBuffer::<_>
);
convert_tests!(
[
const_from_vec: vec!['1', '2'],
const_from_ll: {let mut l = LinkedList::new(); l.push_back('1'); l.push_back('2'); l},
const_from_vd: {let mut l = VecDeque::new(); l.push_back('1'); l.push_back('2'); l},
const_from_str: "12".to_string(),
const_from_str_slice: "12",
const_from_slice: {let a: &[char] = &['1', '2']; a},
const_from_const_slice: {let a: &[char; 2] = &['1', '2']; a},
const_from_arr: {let a: [char; 2] = ['1', '2']; a},
const_from_garb: { GrowableAllocRingBuffer::from(['1', '2'])},
const_from_arb: { AllocRingBuffer::from(['1', '2'])},
] => ConstGenericRingBuffer::<_, 2>
);
#[test]
fn test_extra_conversions_growable() {
let a: &mut [i32; 2] = &mut [1, 2];
let a = GrowableAllocRingBuffer::from(a);
assert_eq!(a.to_vec(), vec![1, 2]);
let a: &mut [i32] = &mut [1, 2];
let a = GrowableAllocRingBuffer::from(a);
assert_eq!(a.to_vec(), vec![1, 2]);
let mut b = VecDeque::<i32>::new();
b.push_back(1);
b.push_back(2);
assert_eq!(a.deref(), &b);
assert_eq!(a.as_ref(), &b);
}
#[test]
fn test_extra_conversions_alloc() {
let a: &mut [i32; 2] = &mut [1, 2];
let a = AllocRingBuffer::from(a);
assert_eq!(a.to_vec(), vec![1, 2]);
let a: &mut [i32] = &mut [1, 2];
let a = AllocRingBuffer::from(a);
assert_eq!(a.to_vec(), vec![1, 2]);
}
#[test]
fn test_extra_conversions_const() {
let a: &mut [i32; 2] = &mut [1, 2];
let a = ConstGenericRingBuffer::<_, 2>::from(a);
assert_eq!(a.to_vec(), vec![1, 2]);
let a: &mut [i32] = &mut [1, 2];
let a = ConstGenericRingBuffer::<_, 2>::from(a);
assert_eq!(a.to_vec(), vec![1, 2]);
}
#[test]
fn test_const_generic_new_parameter() {
// Can we specify size only on the method?
let mut a = ConstGenericRingBuffer::new::<2>();
let _ = a.enqueue(5);
// Can we specify size in both positions?
let mut a = ConstGenericRingBuffer::<i32, 50>::new::<50>();
let _ = a.enqueue(5);
// Can we specify size only on the struct?
let mut a = ConstGenericRingBuffer::<i32, 50>::new();
let _ = a.enqueue(5);
}
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/tests/compiletests.rs | tests/compiletests.rs | extern crate compiletest_rs as compiletest;
use std::path::PathBuf;
#[cfg(test)]
mod conversions;
fn run_mode(mode: &'static str) {
let mut config = compiletest::Config::default();
config.mode = mode.parse().expect("Invalid mode");
config.src_base = PathBuf::from(format!("tests/{}", mode));
config.link_deps(); // Populate config.target_rustcflags with dependencies on the path
config.clean_rmeta(); // If your tests import the parent crate, this helps with E0464
compiletest::run_tests(&config);
}
#[test]
#[cfg_attr(miri, ignore)]
fn compile_test() {
run_mode("compile-fail");
}
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/tests/compile-fail/test_const_generic_array_zero_length.rs | tests/compile-fail/test_const_generic_array_zero_length.rs | extern crate ringbuffer;
use ringbuffer::ConstGenericRingBuffer;
fn main() {
let _ = ConstGenericRingBuffer::<i32, 0>::new();
//~^ note: the above error was encountered while instantiating `fn ringbuffer::ConstGenericRingBuffer::<i32, 0>::new::<0>`
// ringbuffer can't be zero length
}
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/tests/compile-fail/test_const_generic_array_zero_length_new.rs | tests/compile-fail/test_const_generic_array_zero_length_new.rs | extern crate ringbuffer;
use ringbuffer::{ConstGenericRingBuffer, RingBuffer};
fn main() {
let mut buf = ConstGenericRingBuffer::new::<0>();
//~^ note: the above error was encountered while instantiating `fn ringbuffer::ConstGenericRingBuffer::<i32, 0>::new::<0>`
// ringbuffer can't be zero length
let _ = buf.enqueue(5);
}
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
NULLx76/ringbuffer | https://github.com/NULLx76/ringbuffer/blob/8aaaec89b0da0cc2d2508ade813557758c3d5041/benches/bench.rs | benches/bench.rs | #![feature(coverage_attribute)]
#![coverage(off)]
use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion};
use ringbuffer::{AllocRingBuffer, ConstGenericRingBuffer, RingBuffer, SetLen};
fn benchmark_push<T: RingBuffer<i32>, F: Fn() -> T>(b: &mut Bencher, new: F) {
b.iter(|| {
let mut rb = new();
for i in 0..1_000_000 {
rb.enqueue(i);
black_box(());
}
rb
})
}
fn benchmark_push_dequeue<T: RingBuffer<i32>, F: Fn() -> T>(b: &mut Bencher, new: F) {
b.iter(|| {
let mut rb = new();
for _i in 0..100_000 {
let _ = rb.enqueue(1);
black_box(());
let _ = rb.enqueue(2);
black_box(());
assert_eq!(black_box(rb.dequeue()), Some(1));
assert_eq!(black_box(rb.dequeue()), Some(2));
let _ = rb.enqueue(1);
black_box(());
let _ = rb.enqueue(2);
black_box(());
assert_eq!(black_box(rb.dequeue()), Some(1));
assert_eq!(black_box(rb.dequeue()), Some(2));
let _ = rb.enqueue(1);
black_box(());
let _ = rb.enqueue(2);
black_box(());
assert_eq!(black_box(rb.get_signed(-1)), Some(&2));
assert_eq!(black_box(rb.get_signed(-2)), Some(&1));
}
rb
})
}
fn benchmark_various<T: RingBuffer<i32>, F: Fn() -> T>(b: &mut Bencher, new: F) {
b.iter(|| {
let mut rb = new();
for i in 0..100_000 {
rb.enqueue(i);
black_box(());
black_box(rb.back());
}
rb
})
}
fn benchmark_skip<T: RingBuffer<i32>, F: Fn() -> T>(b: &mut Bencher, new: F) {
let mut rb = new();
rb.fill(9);
b.iter(|| {
for i in 0..rb.len() {
assert_eq!(rb.iter().skip(i).next(), Some(&9));
}
})
}
fn benchmark_copy_to_slice_vs_extend<T: RingBuffer<i32>, F: Fn() -> T>(
rb_size: usize,
rb_type: &str,
fn_name: &str,
c: &mut Criterion,
new: F,
) {
let mut group = c.benchmark_group(format!("{fn_name}({rb_type}, {rb_size})"));
let mut output = vec![0; rb_size];
group.bench_function(format!("CopyTo({rb_type}; {rb_size})"), |b| {
let mut rb = new();
rb.fill(9);
// making sure the read/write pointers wrap around
for _ in 0..rb_size / 2 {
let _ = rb.dequeue();
let _ = rb.enqueue(9);
}
b.iter(|| {
rb.copy_to_slice(0, &mut output);
assert_eq!(output[output.len() / 2], 9);
assert_eq!(output.len(), rb_size);
})
});
let mut output: Vec<i32> = Vec::with_capacity(rb_size);
group.bench_function(format!("ExtendVec({rb_type}; {rb_size})"), |b| {
let mut rb = new();
rb.fill(9);
// making sure the read/write pointers wrap around
for _ in 0..rb_size / 2 {
let _ = rb.dequeue();
let _ = rb.enqueue(9);
}
b.iter(|| {
unsafe { output.set_len(0) };
output.extend(rb.iter());
assert_eq!(output[output.len() / 2], 9);
assert_eq!(output.len(), rb_size);
})
});
group.finish();
}
fn benchmark_copy_from_slice_vs_extend<T: RingBuffer<i32> + SetLen, F: Fn() -> T>(
rb_size: usize,
rb_type: &str,
fn_name: &str,
c: &mut Criterion,
new: F,
) {
let mut group = c.benchmark_group(format!("{fn_name}({rb_type}, {rb_size})"));
let input = vec![9; rb_size];
group.bench_function(format!("CopyFrom({rb_type}; {rb_size})"), |b| {
let mut rb = new();
rb.fill(0);
// making sure the read/write pointers wrap around
for _ in 0..rb_size / 2 {
let _ = rb.dequeue();
let _ = rb.enqueue(0);
}
for _ in 0..rb_size / 2 {}
b.iter(|| {
rb.copy_from_slice(0, &input);
assert_eq!(rb[rb.len() / 2], 9);
assert_eq!(rb.len(), rb_size);
})
});
group.bench_function(format!("ExtendRb({rb_type}; {rb_size})"), |b| {
let mut rb = new();
// making sure the read/write pointers wrap around
for _ in 0..rb_size / 2 {
let _ = rb.dequeue();
let _ = rb.enqueue(0);
}
b.iter(|| {
unsafe { rb.set_len(0) };
rb.extend(input.iter().copied());
assert_eq!(rb[rb.len() / 2], 9);
assert_eq!(rb.len(), rb_size);
})
});
group.finish();
}
macro_rules! generate_benches {
(called, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => {
$(
$c.bench_function(&format!("{} {} 1M capacity {}", stringify!($rb), stringify!($bmfunc), stringify!($i)), |b| $bmfunc(b, || {
$rb::<$ty>::$fn($i)
}));
)*
};
(non_power_two, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => {
$(
$c.bench_function(&format!("{} {} 1M capacity not power of two {}", stringify!($rb), stringify!($bmfunc), stringify!($i)), |b| $bmfunc(b, || {
$rb::<$ty>::$fn($i)
}));
)*
};
(typed, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => {
$(
$c.bench_function(&format!("{} {} 1M capacity {}", stringify!($rb), stringify!($bmfunc) ,stringify!($i)), |b| $bmfunc(b, || {
$rb::<$ty, $i>::$fn()
}));
)*
};
(compare, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => {
$(
$bmfunc($i, stringify!($rb), stringify!($bmfunc), $c, || {
$rb::<$ty>::$fn($i)
});
)*
};
(compare_typed, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => {
$(
$bmfunc($i, stringify!($rb), stringify!($bmfunc), $c, || {
$rb::<$ty, $i>::$fn()
});
)*
};
}
fn criterion_benchmark(c: &mut Criterion) {
// TODO: Improve benchmarks
// * What are representative operations
// * Make sure it's accurate
// * more general benchmarks but preferably less/quickjer
generate_benches![
called,
c,
AllocRingBuffer,
i32,
new,
benchmark_push,
16,
1024,
4096,
8192
];
generate_benches![
typed,
c,
ConstGenericRingBuffer,
i32,
new,
benchmark_push,
16,
1024,
4096,
8192
];
generate_benches![
called,
c,
AllocRingBuffer,
i32,
new,
benchmark_various,
16,
1024,
4096,
8192
];
generate_benches![
typed,
c,
ConstGenericRingBuffer,
i32,
new,
benchmark_various,
16,
1024,
4096,
8192
];
generate_benches![
called,
c,
AllocRingBuffer,
i32,
new,
benchmark_push_dequeue,
16,
1024,
4096,
8192
];
generate_benches![
typed,
c,
ConstGenericRingBuffer,
i32,
new,
benchmark_push_dequeue,
16,
1024,
4096,
8192
];
generate_benches![
non_power_two,
c,
AllocRingBuffer,
i32,
new,
benchmark_various,
16,
17,
1024,
4096,
8192,
8195
];
generate_benches![
typed,
c,
ConstGenericRingBuffer,
i32,
new,
benchmark_skip,
16,
1024,
4096,
8192
];
generate_benches![
called,
c,
AllocRingBuffer,
i32,
new,
benchmark_skip,
16,
17,
1024,
4096,
8192,
8195
];
generate_benches![
compare,
c,
AllocRingBuffer,
i32,
new,
benchmark_copy_to_slice_vs_extend,
16,
1024,
4096,
8192,
1_000_000,
1_048_576
];
generate_benches![
compare_typed,
c,
ConstGenericRingBuffer,
i32,
new,
benchmark_copy_to_slice_vs_extend,
16,
1024,
4096,
8192,
1_000_000,
1_048_576
];
generate_benches![
compare,
c,
AllocRingBuffer,
i32,
new,
benchmark_copy_from_slice_vs_extend,
16,
1024,
4096,
8192,
1_000_000,
1_048_576
];
generate_benches![
compare_typed,
c,
ConstGenericRingBuffer,
i32,
new,
benchmark_copy_from_slice_vs_extend,
16,
1024,
4096,
8192,
1_000_000,
1_048_576
];
generate_benches![
compare,
c,
AllocRingBuffer,
i32,
new,
benchmark_copy_to_slice_vs_extend,
16,
1024,
4096,
8192,
1_000_000,
1_048_576
];
generate_benches![
compare_typed,
c,
ConstGenericRingBuffer,
i32,
new,
benchmark_copy_to_slice_vs_extend,
16,
1024,
4096,
8192,
1_000_000,
1_048_576
];
generate_benches![
compare,
c,
AllocRingBuffer,
i32,
new,
benchmark_copy_from_slice_vs_extend,
16,
1024,
4096,
8192,
1_000_000,
1_048_576
];
generate_benches![
compare_typed,
c,
ConstGenericRingBuffer,
i32,
new,
benchmark_copy_from_slice_vs_extend,
16,
1024,
4096,
8192,
1_000_000,
1_048_576
];
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | MIT | 8aaaec89b0da0cc2d2508ade813557758c3d5041 | 2026-01-04T20:18:16.987405Z | false |
Shafin098/pakhi-bhasha | https://github.com/Shafin098/pakhi-bhasha/blob/9805017f595169a9b49c9f36d9b30bbbee3e7b28/src/lib.rs | src/lib.rs | pub mod frontend;
pub mod backend;
pub mod common;
use crate::frontend::{lexer, parser};
use crate::backend::interpreter;
use crate::common::io::IO;
use crate::common::pakhi_error::PakhiErr;
use crate::common::pakhi_error::PakhiErr::UnexpectedError;
pub fn start_pakhi<T: IO>(main_module_path: String, io: &mut T) -> Result<(), PakhiErr>{
//println!("Source file: {}", filename);
match io.read_src_code_from_file(&main_module_path) {
Ok(src_string) => {
// println!("{}", src_string);
let src_chars: Vec<char> = src_string.chars().collect();
let tokens = lexer::tokenize(src_chars, main_module_path.clone())?;
//println!("{:#?}", tokens);
let ast_tree = parser::parse(main_module_path, tokens)?;
//println!("Ast : {:#?}", ast_tree);
// println!();
// println!("Interpreter");
// println!("____________");
return interpreter::run(ast_tree);
},
Err(e) => return Err(UnexpectedError(format!("{}", e))),
}
} | rust | MIT | 9805017f595169a9b49c9f36d9b30bbbee3e7b28 | 2026-01-04T20:18:15.998668Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.