{"repo_name": "cocoindex", "file_name": "/cocoindex/src/service/flows.rs", "inference_info": {"prefix_code": "use crate::prelude::*;\n\nuse crate::execution::{evaluator, indexing_status, memoization, row_indexer, stats};\nuse crate::lib_context::LibContext;\nuse crate::{base::schema::FlowSchema, ops::interface::SourceExecutorListOptions};\nuse axum::{\n Json,\n extract::{Path, State},\n http::StatusCode,\n};\nuse axum_extra::extract::Query;\n\npub async fn list_flows(\n State(lib_context): State>,\n) -> Result>, ApiError> {\n Ok(Json(\n lib_context.flows.lock().unwrap().keys().cloned().collect(),\n ))\n}\n\npub async fn get_flow_schema(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n Ok(Json(flow_ctx.flow.data_schema.clone()))\n}\n\n#[derive(Serialize)]\npub struct GetFlowResponse {\n flow_spec: spec::FlowInstanceSpec,\n data_schema: FlowSchema,\n fingerprint: utils::fingerprint::Fingerprint,\n}\n\n", "suffix_code": "\n\n#[derive(Deserialize)]\npub struct GetKeysParam {\n field: String,\n}\n\n#[derive(Serialize)]\npub struct GetKeysResponse {\n key_type: schema::EnrichedValueType,\n keys: Vec,\n}\n\npub async fn get_keys(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let schema = &flow_ctx.flow.data_schema;\n\n let field_idx = schema\n .fields\n .iter()\n .position(|f| f.name == query.field)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field not found: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n let key_type = schema.fields[field_idx]\n .value_type\n .typ\n .key_type()\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field has no key: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n\n let execution_plan = flow_ctx.flow.get_execution_plan().await?;\n let import_op = execution_plan\n .import_ops\n .iter()\n .find(|op| op.output.field_idx == field_idx as u32)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field is not a source: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n\n let mut rows_stream = import_op.executor.list(&SourceExecutorListOptions {\n include_ordinal: false,\n });\n let mut keys = Vec::new();\n while let Some(rows) = rows_stream.next().await {\n keys.extend(rows?.into_iter().map(|row| row.key));\n }\n Ok(Json(GetKeysResponse {\n key_type: key_type.clone(),\n keys,\n }))\n}\n\n#[derive(Deserialize)]\npub struct SourceRowKeyParams {\n field: String,\n key: Vec,\n}\n\n#[derive(Serialize)]\npub struct EvaluateDataResponse {\n schema: FlowSchema,\n data: value::ScopeValue,\n}\n\nstruct SourceRowKeyContextHolder<'a> {\n plan: Arc,\n import_op_idx: usize,\n schema: &'a FlowSchema,\n key: value::KeyValue,\n}\n\nimpl<'a> SourceRowKeyContextHolder<'a> {\n async fn create(flow_ctx: &'a FlowContext, source_row_key: SourceRowKeyParams) -> Result {\n let schema = &flow_ctx.flow.data_schema;\n let import_op_idx = flow_ctx\n .flow\n .flow_instance\n .import_ops\n .iter()\n .position(|op| op.name == source_row_key.field)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"source field not found: {}\", source_row_key.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n let plan = flow_ctx.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[import_op_idx];\n let field_schema = &schema.fields[import_op.output.field_idx as usize];\n let table_schema = match &field_schema.value_type.typ {\n schema::ValueType::Table(table) => table,\n _ => api_bail!(\"field is not a table: {}\", source_row_key.field),\n };\n let key_field = table_schema\n .key_field()\n .ok_or_else(|| api_error!(\"field {} does not have a key\", source_row_key.field))?;\n let key = value::KeyValue::from_strs(source_row_key.key, &key_field.value_type.typ)?;\n Ok(Self {\n plan,\n import_op_idx,\n schema,\n key,\n })\n }\n\n fn as_context<'b>(&'b self) -> evaluator::SourceRowEvaluationContext<'b> {\n evaluator::SourceRowEvaluationContext {\n plan: &self.plan,\n import_op: &self.plan.import_ops[self.import_op_idx],\n schema: self.schema,\n key: &self.key,\n import_op_idx: self.import_op_idx,\n }\n }\n}\n\npub async fn evaluate_data(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let source_row_key_ctx = SourceRowKeyContextHolder::create(&flow_ctx, query).await?;\n let execution_ctx = flow_ctx.use_execution_ctx().await?;\n let evaluate_output = row_indexer::evaluate_source_entry_with_memory(\n &source_row_key_ctx.as_context(),\n &execution_ctx.setup_execution_context,\n memoization::EvaluationMemoryOptions {\n enable_cache: true,\n evaluation_only: true,\n },\n lib_context.require_builtin_db_pool()?,\n )\n .await?\n .ok_or_else(|| {\n api_error!(\n \"value not found for source at the specified key: {key:?}\",\n key = source_row_key_ctx.key\n )\n })?;\n\n Ok(Json(EvaluateDataResponse {\n schema: flow_ctx.flow.data_schema.clone(),\n data: evaluate_output.data_scope.into(),\n }))\n}\n\npub async fn update(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let live_updater = execution::FlowLiveUpdater::start(\n flow_ctx.clone(),\n lib_context.require_builtin_db_pool()?,\n execution::FlowLiveUpdaterOptions {\n live_mode: false,\n ..Default::default()\n },\n )\n .await?;\n live_updater.wait().await?;\n Ok(Json(live_updater.index_update_info()))\n}\n\npub async fn get_row_indexing_status(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let source_row_key_ctx = SourceRowKeyContextHolder::create(&flow_ctx, query).await?;\n\n let execution_ctx = flow_ctx.use_execution_ctx().await?;\n let indexing_status = indexing_status::get_source_row_indexing_status(\n &source_row_key_ctx.as_context(),\n &execution_ctx.setup_execution_context,\n lib_context.require_builtin_db_pool()?,\n )\n .await?;\n Ok(Json(indexing_status))\n}\n", "middle_code": "pub async fn get_flow(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let flow_spec = flow_ctx.flow.flow_instance.clone();\n let data_schema = flow_ctx.flow.data_schema.clone();\n let fingerprint = utils::fingerprint::Fingerprinter::default()\n .with(&flow_spec)\n .map_err(|e| api_error!(\"failed to fingerprint flow spec: {e}\"))?\n .with(&data_schema)\n .map_err(|e| api_error!(\"failed to fingerprint data schema: {e}\"))?\n .into_fingerprint();\n Ok(Json(GetFlowResponse {\n flow_spec,\n data_schema,\n fingerprint,\n }))\n}", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "rust", "sub_task_type": null}, "context_code": [["/cocoindex/src/builder/analyzer.rs", "use crate::builder::exec_ctx::AnalyzedSetupState;\nuse crate::ops::get_executor_factory;\nuse crate::prelude::*;\n\nuse super::plan::*;\nuse crate::lib_context::get_auth_registry;\nuse crate::utils::fingerprint::Fingerprinter;\nuse crate::{\n base::{schema::*, spec::*},\n ops::interface::*,\n};\nuse futures::future::{BoxFuture, try_join3};\nuse futures::{FutureExt, future::try_join_all};\n\n#[derive(Debug)]\npub(super) enum ValueTypeBuilder {\n Basic(BasicValueType),\n Struct(StructSchemaBuilder),\n Table(TableSchemaBuilder),\n}\n\nimpl TryFrom<&ValueType> for ValueTypeBuilder {\n type Error = anyhow::Error;\n\n fn try_from(value_type: &ValueType) -> Result {\n match value_type {\n ValueType::Basic(basic_type) => Ok(ValueTypeBuilder::Basic(basic_type.clone())),\n ValueType::Struct(struct_type) => Ok(ValueTypeBuilder::Struct(struct_type.try_into()?)),\n ValueType::Table(table_type) => Ok(ValueTypeBuilder::Table(table_type.try_into()?)),\n }\n }\n}\n\nimpl TryInto for &ValueTypeBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n match self {\n ValueTypeBuilder::Basic(basic_type) => Ok(ValueType::Basic(basic_type.clone())),\n ValueTypeBuilder::Struct(struct_type) => Ok(ValueType::Struct(struct_type.try_into()?)),\n ValueTypeBuilder::Table(table_type) => Ok(ValueType::Table(table_type.try_into()?)),\n }\n }\n}\n\n#[derive(Default, Debug)]\npub(super) struct StructSchemaBuilder {\n fields: Vec>,\n field_name_idx: HashMap,\n description: Option>,\n}\n\nimpl StructSchemaBuilder {\n fn add_field(&mut self, field: FieldSchema) -> Result {\n let field_idx = self.fields.len() as u32;\n match self.field_name_idx.entry(field.name.clone()) {\n std::collections::hash_map::Entry::Occupied(_) => {\n bail!(\"Field name already exists: {}\", field.name);\n }\n std::collections::hash_map::Entry::Vacant(entry) => {\n entry.insert(field_idx);\n }\n }\n self.fields.push(field);\n Ok(field_idx)\n }\n\n pub fn find_field(&self, field_name: &'_ str) -> Option<(u32, &FieldSchema)> {\n self.field_name_idx\n .get(field_name)\n .map(|&field_idx| (field_idx, &self.fields[field_idx as usize]))\n }\n}\n\nimpl TryFrom<&StructSchema> for StructSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_from(schema: &StructSchema) -> Result {\n let mut result = StructSchemaBuilder {\n fields: Vec::with_capacity(schema.fields.len()),\n field_name_idx: HashMap::with_capacity(schema.fields.len()),\n description: schema.description.clone(),\n };\n for field in schema.fields.iter() {\n result.add_field(FieldSchema::::from_alternative(field)?)?;\n }\n Ok(result)\n }\n}\n\nimpl TryInto for &StructSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n Ok(StructSchema {\n fields: Arc::new(\n self.fields\n .iter()\n .map(FieldSchema::::from_alternative)\n .collect::>>()?,\n ),\n description: self.description.clone(),\n })\n }\n}\n\n#[derive(Debug)]\npub(super) struct TableSchemaBuilder {\n pub kind: TableKind,\n pub sub_scope: Arc>,\n}\n\nimpl TryFrom<&TableSchema> for TableSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_from(schema: &TableSchema) -> Result {\n Ok(Self {\n kind: schema.kind,\n sub_scope: Arc::new(Mutex::new(DataScopeBuilder {\n data: (&schema.row).try_into()?,\n })),\n })\n }\n}\n\nimpl TryInto for &TableSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n let sub_scope = self.sub_scope.lock().unwrap();\n let row = (&sub_scope.data).try_into()?;\n Ok(TableSchema {\n kind: self.kind,\n row,\n })\n }\n}\n\nfn try_make_common_value_type(\n value_type1: &EnrichedValueType,\n value_type2: &EnrichedValueType,\n) -> Result {\n let typ = match (&value_type1.typ, &value_type2.typ) {\n (ValueType::Basic(basic_type1), ValueType::Basic(basic_type2)) => {\n if basic_type1 != basic_type2 {\n api_bail!(\"Value types are not compatible: {basic_type1} vs {basic_type2}\");\n }\n ValueType::Basic(basic_type1.clone())\n }\n (ValueType::Struct(struct_type1), ValueType::Struct(struct_type2)) => {\n let common_schema = try_merge_struct_schemas(struct_type1, struct_type2)?;\n ValueType::Struct(common_schema)\n }\n (ValueType::Table(table_type1), ValueType::Table(table_type2)) => {\n if table_type1.kind != table_type2.kind {\n api_bail!(\n \"Collection types are not compatible: {} vs {}\",\n table_type1,\n table_type2\n );\n }\n let row = try_merge_struct_schemas(&table_type1.row, &table_type2.row)?;\n ValueType::Table(TableSchema {\n kind: table_type1.kind,\n row,\n })\n }\n (t1 @ (ValueType::Basic(_) | ValueType::Struct(_) | ValueType::Table(_)), t2) => {\n api_bail!(\"Unmatched types:\\n {t1}\\n {t2}\\n\",)\n }\n };\n let common_attrs: Vec<_> = value_type1\n .attrs\n .iter()\n .filter_map(|(k, v)| {\n if value_type2.attrs.get(k) == Some(v) {\n Some((k, v))\n } else {\n None\n }\n })\n .collect();\n let attrs = if common_attrs.len() == value_type1.attrs.len() {\n value_type1.attrs.clone()\n } else {\n Arc::new(\n common_attrs\n .into_iter()\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect(),\n )\n };\n\n Ok(EnrichedValueType {\n typ,\n nullable: value_type1.nullable || value_type2.nullable,\n attrs,\n })\n}\n\nfn try_merge_fields_schemas(\n schema1: &[FieldSchema],\n schema2: &[FieldSchema],\n) -> Result> {\n if schema1.len() != schema2.len() {\n api_bail!(\n \"Fields are not compatible as they have different fields count:\\n ({})\\n ({})\\n\",\n schema1\n .iter()\n .map(|f| f.to_string())\n .collect::>()\n .join(\", \"),\n schema2\n .iter()\n .map(|f| f.to_string())\n .collect::>()\n .join(\", \")\n );\n }\n let mut result_fields = Vec::with_capacity(schema1.len());\n for (field1, field2) in schema1.iter().zip(schema2.iter()) {\n if field1.name != field2.name {\n api_bail!(\n \"Structs are not compatible as they have incompatible field names `{}` vs `{}`\",\n field1.name,\n field2.name\n );\n }\n result_fields.push(FieldSchema {\n name: field1.name.clone(),\n value_type: try_make_common_value_type(&field1.value_type, &field2.value_type)?,\n });\n }\n Ok(result_fields)\n}\n\nfn try_merge_struct_schemas(\n schema1: &StructSchema,\n schema2: &StructSchema,\n) -> Result {\n let fields = try_merge_fields_schemas(&schema1.fields, &schema2.fields)?;\n Ok(StructSchema {\n fields: Arc::new(fields),\n description: schema1\n .description\n .clone()\n .or_else(|| schema2.description.clone()),\n })\n}\n\nfn try_merge_collector_schemas(\n schema1: &CollectorSchema,\n schema2: &CollectorSchema,\n) -> Result {\n let fields = try_merge_fields_schemas(&schema1.fields, &schema2.fields)?;\n Ok(CollectorSchema {\n fields,\n auto_uuid_field_idx: if schema1.auto_uuid_field_idx == schema2.auto_uuid_field_idx {\n schema1.auto_uuid_field_idx\n } else {\n None\n },\n })\n}\n\n#[derive(Debug)]\npub(super) struct CollectorBuilder {\n pub schema: Arc,\n pub is_used: bool,\n}\n\nimpl CollectorBuilder {\n pub fn new(schema: Arc) -> Self {\n Self {\n schema,\n is_used: false,\n }\n }\n\n pub fn merge_schema(&mut self, schema: &CollectorSchema) -> Result<()> {\n if self.is_used {\n api_bail!(\"Collector is already used\");\n }\n let existing_schema = Arc::make_mut(&mut self.schema);\n *existing_schema = try_merge_collector_schemas(existing_schema, schema)?;\n Ok(())\n }\n\n pub fn use_schema(&mut self) -> Arc {\n self.is_used = true;\n self.schema.clone()\n }\n}\n\n#[derive(Debug)]\npub(super) struct DataScopeBuilder {\n pub data: StructSchemaBuilder,\n}\n\nimpl DataScopeBuilder {\n pub fn new() -> Self {\n Self {\n data: Default::default(),\n }\n }\n\n pub fn last_field(&self) -> Option<&FieldSchema> {\n self.data.fields.last()\n }\n\n pub fn add_field(\n &mut self,\n name: FieldName,\n value_type: &EnrichedValueType,\n ) -> Result {\n let field_index = self.data.add_field(FieldSchema {\n name,\n value_type: EnrichedValueType::from_alternative(value_type)?,\n })?;\n Ok(AnalyzedOpOutput {\n field_idx: field_index,\n })\n }\n\n pub fn analyze_field_path<'a>(\n &'a self,\n field_path: &'_ FieldPath,\n ) -> Result<(\n AnalyzedLocalFieldReference,\n &'a EnrichedValueType,\n )> {\n let mut indices = Vec::with_capacity(field_path.len());\n let mut struct_schema = &self.data;\n\n let mut i = 0;\n let value_type = loop {\n let field_name = &field_path[i];\n let (field_idx, field) = struct_schema.find_field(field_name).ok_or_else(|| {\n api_error!(\"Field {} not found\", field_path[0..(i + 1)].join(\".\"))\n })?;\n indices.push(field_idx);\n if i + 1 >= field_path.len() {\n break &field.value_type;\n }\n i += 1;\n\n struct_schema = match &field.value_type.typ {\n ValueTypeBuilder::Struct(struct_type) => struct_type,\n _ => {\n api_bail!(\"Field {} is not a struct\", field_path[0..(i + 1)].join(\".\"));\n }\n };\n };\n Ok((\n AnalyzedLocalFieldReference {\n fields_idx: indices,\n },\n value_type,\n ))\n }\n}\n\npub(super) struct AnalyzerContext {\n pub lib_ctx: Arc,\n pub flow_ctx: Arc,\n}\n\n#[derive(Debug, Default)]\npub(super) struct OpScopeStates {\n pub op_output_types: HashMap,\n pub collectors: IndexMap,\n pub sub_scopes: HashMap>,\n}\n\nimpl OpScopeStates {\n pub fn add_collector(\n &mut self,\n collector_name: FieldName,\n schema: CollectorSchema,\n ) -> Result {\n let existing_len = self.collectors.len();\n let idx = match self.collectors.entry(collector_name) {\n indexmap::map::Entry::Occupied(mut entry) => {\n entry.get_mut().merge_schema(&schema)?;\n entry.index()\n }\n indexmap::map::Entry::Vacant(entry) => {\n entry.insert(CollectorBuilder::new(Arc::new(schema)));\n existing_len\n }\n };\n Ok(AnalyzedLocalCollectorReference {\n collector_idx: idx as u32,\n })\n }\n\n pub fn consume_collector(\n &mut self,\n collector_name: &FieldName,\n ) -> Result<(AnalyzedLocalCollectorReference, Arc)> {\n let (collector_idx, _, collector) = self\n .collectors\n .get_full_mut(collector_name)\n .ok_or_else(|| api_error!(\"Collector not found: {}\", collector_name))?;\n Ok((\n AnalyzedLocalCollectorReference {\n collector_idx: collector_idx as u32,\n },\n collector.use_schema(),\n ))\n }\n\n fn build_op_scope_schema(&self) -> OpScopeSchema {\n OpScopeSchema {\n op_output_types: self\n .op_output_types\n .iter()\n .map(|(name, value_type)| (name.clone(), value_type.without_attrs()))\n .collect(),\n collectors: self\n .collectors\n .iter()\n .map(|(name, schema)| NamedSpec {\n name: name.clone(),\n spec: schema.schema.clone(),\n })\n .collect(),\n op_scopes: self.sub_scopes.clone(),\n }\n }\n}\n\n#[derive(Debug)]\npub struct OpScope {\n pub name: String,\n pub parent: Option<(Arc, spec::FieldPath)>,\n pub(super) data: Arc>,\n pub(super) states: Mutex,\n}\n\nstruct Iter<'a>(Option<&'a OpScope>);\n\nimpl<'a> Iterator for Iter<'a> {\n type Item = &'a OpScope;\n\n fn next(&mut self) -> Option {\n match self.0 {\n Some(scope) => {\n self.0 = scope.parent.as_ref().map(|(parent, _)| parent.as_ref());\n Some(scope)\n }\n None => None,\n }\n }\n}\n\nimpl OpScope {\n pub(super) fn new(\n name: String,\n parent: Option<(Arc, spec::FieldPath)>,\n data: Arc>,\n ) -> Arc {\n Arc::new(Self {\n name,\n parent,\n data,\n states: Mutex::default(),\n })\n }\n\n fn add_op_output(\n &self,\n name: FieldName,\n value_type: EnrichedValueType,\n ) -> Result {\n let op_output = self\n .data\n .lock()\n .unwrap()\n .add_field(name.clone(), &value_type)?;\n self.states\n .lock()\n .unwrap()\n .op_output_types\n .insert(name, value_type);\n Ok(op_output)\n }\n\n pub fn ancestors(&self) -> impl Iterator {\n Iter(Some(self))\n }\n\n pub fn is_op_scope_descendant(&self, other: &Self) -> bool {\n if self == other {\n return true;\n }\n match &self.parent {\n Some((parent, _)) => parent.is_op_scope_descendant(other),\n None => false,\n }\n }\n\n pub(super) fn new_foreach_op_scope(\n self: &Arc,\n scope_name: String,\n field_path: &FieldPath,\n ) -> Result<(AnalyzedLocalFieldReference, Arc)> {\n let (local_field_ref, sub_data_scope) = {\n let data_scope = self.data.lock().unwrap();\n let (local_field_ref, value_type) = data_scope.analyze_field_path(field_path)?;\n let sub_data_scope = match &value_type.typ {\n ValueTypeBuilder::Table(table_type) => table_type.sub_scope.clone(),\n _ => api_bail!(\"ForEach only works on collection, field {field_path} is not\"),\n };\n (local_field_ref, sub_data_scope)\n };\n let sub_op_scope = OpScope::new(\n scope_name,\n Some((self.clone(), field_path.clone())),\n sub_data_scope,\n );\n Ok((local_field_ref, sub_op_scope))\n }\n}\n\nimpl std::fmt::Display for OpScope {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n if let Some((scope, field_path)) = &self.parent {\n write!(f, \"{} [{} AS {}]\", scope, field_path, self.name)?;\n } else {\n write!(f, \"[{}]\", self.name)?;\n }\n Ok(())\n }\n}\n\nimpl PartialEq for OpScope {\n fn eq(&self, other: &Self) -> bool {\n std::ptr::eq(self, other)\n }\n}\nimpl Eq for OpScope {}\n\nfn find_scope<'a>(scope_name: &ScopeName, op_scope: &'a OpScope) -> Result<(u32, &'a OpScope)> {\n let (up_level, scope) = op_scope\n .ancestors()\n .enumerate()\n .find(|(_, s)| &s.name == scope_name)\n .ok_or_else(|| api_error!(\"Scope not found: {}\", scope_name))?;\n Ok((up_level as u32, scope))\n}\n\nfn analyze_struct_mapping(\n mapping: &StructMapping,\n op_scope: &OpScope,\n) -> Result<(AnalyzedStructMapping, Vec)> {\n let mut field_mappings = Vec::with_capacity(mapping.fields.len());\n let mut field_schemas = Vec::with_capacity(mapping.fields.len());\n for field in mapping.fields.iter() {\n let (field_mapping, value_type) = analyze_value_mapping(&field.spec, op_scope)?;\n field_mappings.push(field_mapping);\n field_schemas.push(FieldSchema {\n name: field.name.clone(),\n value_type,\n });\n }\n Ok((\n AnalyzedStructMapping {\n fields: field_mappings,\n },\n field_schemas,\n ))\n}\n\nfn analyze_value_mapping(\n value_mapping: &ValueMapping,\n op_scope: &OpScope,\n) -> Result<(AnalyzedValueMapping, EnrichedValueType)> {\n let result = match value_mapping {\n ValueMapping::Constant(v) => {\n let value = value::Value::from_json(v.value.clone(), &v.schema.typ)?;\n (AnalyzedValueMapping::Constant { value }, v.schema.clone())\n }\n\n ValueMapping::Field(v) => {\n let (scope_up_level, op_scope) = match &v.scope {\n Some(scope_name) => find_scope(scope_name, op_scope)?,\n None => (0, op_scope),\n };\n let data_scope = op_scope.data.lock().unwrap();\n let (local_field_ref, value_type) = data_scope.analyze_field_path(&v.field_path)?;\n (\n AnalyzedValueMapping::Field(AnalyzedFieldReference {\n local: local_field_ref,\n scope_up_level,\n }),\n EnrichedValueType::from_alternative(value_type)?,\n )\n }\n\n ValueMapping::Struct(v) => {\n let (struct_mapping, field_schemas) = analyze_struct_mapping(v, op_scope)?;\n (\n AnalyzedValueMapping::Struct(struct_mapping),\n EnrichedValueType {\n typ: ValueType::Struct(StructSchema {\n fields: Arc::new(field_schemas),\n description: None,\n }),\n nullable: false,\n attrs: Default::default(),\n },\n )\n }\n };\n Ok(result)\n}\n\nfn analyze_input_fields(\n arg_bindings: &[OpArgBinding],\n op_scope: &OpScope,\n) -> Result> {\n let mut input_field_schemas = Vec::with_capacity(arg_bindings.len());\n for arg_binding in arg_bindings.iter() {\n let (analyzed_value, value_type) = analyze_value_mapping(&arg_binding.value, op_scope)?;\n input_field_schemas.push(OpArgSchema {\n name: arg_binding.arg_name.clone(),\n value_type,\n analyzed_value: analyzed_value.clone(),\n });\n }\n Ok(input_field_schemas)\n}\n\nfn add_collector(\n scope_name: &ScopeName,\n collector_name: FieldName,\n schema: CollectorSchema,\n op_scope: &OpScope,\n) -> Result {\n let (scope_up_level, scope) = find_scope(scope_name, op_scope)?;\n let local_ref = scope\n .states\n .lock()\n .unwrap()\n .add_collector(collector_name, schema)?;\n Ok(AnalyzedCollectorReference {\n local: local_ref,\n scope_up_level,\n })\n}\n\nstruct ExportDataFieldsInfo {\n local_collector_ref: AnalyzedLocalCollectorReference,\n primary_key_def: AnalyzedPrimaryKeyDef,\n primary_key_type: ValueType,\n value_fields_idx: Vec,\n value_stable: bool,\n}\n\nimpl AnalyzerContext {\n pub(super) async fn analyze_import_op(\n &self,\n op_scope: &Arc,\n import_op: NamedSpec,\n ) -> Result> + Send + use<>> {\n let source_factory = match get_executor_factory(&import_op.spec.source.kind)? {\n ExecutorFactory::Source(source_executor) => source_executor,\n _ => {\n return Err(anyhow::anyhow!(\n \"`{}` is not a source op\",\n import_op.spec.source.kind\n ));\n }\n };\n let (output_type, executor) = source_factory\n .build(\n serde_json::Value::Object(import_op.spec.source.spec),\n self.flow_ctx.clone(),\n )\n .await?;\n\n let op_name = import_op.name.clone();\n let primary_key_type = output_type\n .typ\n .key_type()\n .ok_or_else(|| api_error!(\"Source must produce a type with key: {op_name}\"))?\n .typ\n .clone();\n let output = op_scope.add_op_output(import_op.name, output_type)?;\n\n let concur_control_options = import_op\n .spec\n .execution_options\n .get_concur_control_options();\n let global_concurrency_controller = self.lib_ctx.global_concurrency_controller.clone();\n let result_fut = async move {\n trace!(\"Start building executor for source op `{op_name}`\");\n let executor = executor.await?;\n trace!(\"Finished building executor for source op `{op_name}`\");\n Ok(AnalyzedImportOp {\n executor,\n output,\n primary_key_type,\n name: op_name,\n refresh_options: import_op.spec.refresh_options,\n concurrency_controller: concur_control::CombinedConcurrencyController::new(\n &concur_control_options,\n global_concurrency_controller,\n ),\n })\n };\n Ok(result_fut)\n }\n\n pub(super) async fn analyze_reactive_op(\n &self,\n op_scope: &Arc,\n reactive_op: &NamedSpec,\n ) -> Result>> {\n let result_fut = match &reactive_op.spec {\n ReactiveOpSpec::Transform(op) => {\n let input_field_schemas =\n analyze_input_fields(&op.inputs, op_scope).with_context(|| {\n format!(\n \"Failed to analyze inputs for transform op: {}\",\n reactive_op.name\n )\n })?;\n let spec = serde_json::Value::Object(op.op.spec.clone());\n\n match get_executor_factory(&op.op.kind)? {\n ExecutorFactory::SimpleFunction(fn_executor) => {\n let input_value_mappings = input_field_schemas\n .iter()\n .map(|field| field.analyzed_value.clone())\n .collect();\n let (output_enriched_type, executor) = fn_executor\n .build(spec, input_field_schemas, self.flow_ctx.clone())\n .await?;\n let logic_fingerprinter = Fingerprinter::default()\n .with(&op.op)?\n .with(&output_enriched_type.without_attrs())?;\n let output_type = output_enriched_type.typ.clone();\n let output = op_scope\n .add_op_output(reactive_op.name.clone(), output_enriched_type)?;\n let op_name = reactive_op.name.clone();\n async move {\n trace!(\"Start building executor for transform op `{op_name}`\");\n let executor = executor.await.with_context(|| {\n format!(\"Failed to build executor for transform op: {op_name}\")\n })?;\n let enable_cache = executor.enable_cache();\n let behavior_version = executor.behavior_version();\n trace!(\"Finished building executor for transform op `{op_name}`, enable cache: {enable_cache}, behavior version: {behavior_version:?}\");\n let function_exec_info = AnalyzedFunctionExecInfo {\n enable_cache,\n behavior_version,\n fingerprinter: logic_fingerprinter\n .with(&behavior_version)?,\n output_type\n };\n if function_exec_info.enable_cache\n && function_exec_info.behavior_version.is_none()\n {\n api_bail!(\n \"When caching is enabled, behavior version must be specified for transform op: {op_name}\"\n );\n }\n Ok(AnalyzedReactiveOp::Transform(AnalyzedTransformOp {\n name: op_name,\n inputs: input_value_mappings,\n function_exec_info,\n executor,\n output,\n }))\n }\n .boxed()\n }\n _ => api_bail!(\"`{}` is not a function op\", op.op.kind),\n }\n }\n\n ReactiveOpSpec::ForEach(foreach_op) => {\n let (local_field_ref, sub_op_scope) = op_scope.new_foreach_op_scope(\n foreach_op.op_scope.name.clone(),\n &foreach_op.field_path,\n )?;\n let analyzed_op_scope_fut = {\n let analyzed_op_scope_fut = self\n .analyze_op_scope(&sub_op_scope, &foreach_op.op_scope.ops)\n .boxed_local()\n .await?;\n let sub_op_scope_schema =\n sub_op_scope.states.lock().unwrap().build_op_scope_schema();\n op_scope.states.lock().unwrap().sub_scopes.insert(\n foreach_op.op_scope.name.clone(),\n Arc::new(sub_op_scope_schema),\n );\n analyzed_op_scope_fut\n };\n let op_name = reactive_op.name.clone();\n\n let concur_control_options =\n foreach_op.execution_options.get_concur_control_options();\n async move {\n Ok(AnalyzedReactiveOp::ForEach(AnalyzedForEachOp {\n local_field_ref,\n op_scope: analyzed_op_scope_fut\n .await\n .with_context(|| format!(\"Analyzing foreach op: {op_name}\"))?,\n name: op_name,\n concurrency_controller: concur_control::ConcurrencyController::new(\n &concur_control_options,\n ),\n }))\n }\n .boxed()\n }\n\n ReactiveOpSpec::Collect(op) => {\n let (struct_mapping, fields_schema) = analyze_struct_mapping(&op.input, op_scope)?;\n let has_auto_uuid_field = op.auto_uuid_field.is_some();\n let fingerprinter = Fingerprinter::default().with(&fields_schema)?;\n let collect_op = AnalyzedReactiveOp::Collect(AnalyzedCollectOp {\n name: reactive_op.name.clone(),\n has_auto_uuid_field,\n input: struct_mapping,\n collector_ref: add_collector(\n &op.scope_name,\n op.collector_name.clone(),\n CollectorSchema::from_fields(fields_schema, op.auto_uuid_field.clone()),\n op_scope,\n )?,\n fingerprinter,\n });\n async move { Ok(collect_op) }.boxed()\n }\n };\n Ok(result_fut)\n }\n\n #[allow(clippy::too_many_arguments)]\n async fn analyze_export_op_group(\n &self,\n target_kind: &str,\n op_scope: &Arc,\n flow_inst: &FlowInstanceSpec,\n export_op_group: &AnalyzedExportTargetOpGroup,\n declarations: Vec,\n targets_analyzed_ss: &mut [Option],\n declarations_analyzed_ss: &mut Vec,\n ) -> Result> + Send + use<>>> {\n let mut collection_specs = Vec::::new();\n let mut data_fields_infos = Vec::::new();\n for idx in export_op_group.op_idx.iter() {\n let export_op = &flow_inst.export_ops[*idx];\n let (local_collector_ref, collector_schema) = op_scope\n .states\n .lock()\n .unwrap()\n .consume_collector(&export_op.spec.collector_name)?;\n let (key_fields_schema, value_fields_schema, data_collection_info) =\n match &export_op.spec.index_options.primary_key_fields {\n Some(fields) => {\n let pk_fields_idx = fields\n .iter()\n .map(|f| {\n collector_schema\n .fields\n .iter()\n .position(|field| &field.name == f)\n .ok_or_else(|| anyhow!(\"field not found: {}\", f))\n })\n .collect::>>()?;\n\n let key_fields_schema = pk_fields_idx\n .iter()\n .map(|idx| collector_schema.fields[*idx].clone())\n .collect::>();\n let primary_key_type = if pk_fields_idx.len() == 1 {\n key_fields_schema[0].value_type.typ.clone()\n } else {\n ValueType::Struct(StructSchema {\n fields: Arc::from(key_fields_schema.clone()),\n description: None,\n })\n };\n let mut value_fields_schema: Vec = vec![];\n let mut value_fields_idx = vec![];\n for (idx, field) in collector_schema.fields.iter().enumerate() {\n if !pk_fields_idx.contains(&idx) {\n value_fields_schema.push(field.clone());\n value_fields_idx.push(idx as u32);\n }\n }\n let value_stable = collector_schema\n .auto_uuid_field_idx\n .as_ref()\n .map(|uuid_idx| pk_fields_idx.contains(uuid_idx))\n .unwrap_or(false);\n (\n key_fields_schema,\n value_fields_schema,\n ExportDataFieldsInfo {\n local_collector_ref,\n primary_key_def: AnalyzedPrimaryKeyDef::Fields(pk_fields_idx),\n primary_key_type,\n value_fields_idx,\n value_stable,\n },\n )\n }\n None => {\n // TODO: Support auto-generate primary key\n api_bail!(\"Primary key fields must be specified\")\n }\n };\n collection_specs.push(interface::ExportDataCollectionSpec {\n name: export_op.name.clone(),\n spec: serde_json::Value::Object(export_op.spec.target.spec.clone()),\n key_fields_schema,\n value_fields_schema,\n index_options: export_op.spec.index_options.clone(),\n });\n data_fields_infos.push(data_collection_info);\n }\n let (data_collections_output, declarations_output) = export_op_group\n .target_factory\n .clone()\n .build(collection_specs, declarations, self.flow_ctx.clone())\n .await?;\n let analyzed_export_ops = export_op_group\n .op_idx\n .iter()\n .zip(data_collections_output.into_iter())\n .zip(data_fields_infos.into_iter())\n .map(|((idx, data_coll_output), data_fields_info)| {\n let export_op = &flow_inst.export_ops[*idx];\n let op_name = export_op.name.clone();\n let export_target_factory = export_op_group.target_factory.clone();\n\n let export_op_ss = exec_ctx::AnalyzedTargetSetupState {\n target_kind: target_kind.to_string(),\n setup_key: data_coll_output.setup_key,\n desired_setup_state: data_coll_output.desired_setup_state,\n setup_by_user: export_op.spec.setup_by_user,\n };\n targets_analyzed_ss[*idx] = Some(export_op_ss);\n\n Ok(async move {\n trace!(\"Start building executor for export op `{op_name}`\");\n let export_context = data_coll_output\n .export_context\n .await\n .with_context(|| format!(\"Analyzing export op: {op_name}\"))?;\n trace!(\"Finished building executor for export op `{op_name}`\");\n Ok(AnalyzedExportOp {\n name: op_name,\n input: data_fields_info.local_collector_ref,\n export_target_factory,\n export_context,\n primary_key_def: data_fields_info.primary_key_def,\n primary_key_type: data_fields_info.primary_key_type,\n value_fields: data_fields_info.value_fields_idx,\n value_stable: data_fields_info.value_stable,\n })\n })\n })\n .collect::>>()?;\n for (setup_key, desired_setup_state) in declarations_output {\n let decl_ss = exec_ctx::AnalyzedTargetSetupState {\n target_kind: target_kind.to_string(),\n setup_key,\n desired_setup_state,\n setup_by_user: false,\n };\n declarations_analyzed_ss.push(decl_ss);\n }\n Ok(analyzed_export_ops)\n }\n\n async fn analyze_op_scope(\n &self,\n op_scope: &Arc,\n reactive_ops: &[NamedSpec],\n ) -> Result> + Send + use<>> {\n let mut op_futs = Vec::with_capacity(reactive_ops.len());\n for reactive_op in reactive_ops.iter() {\n op_futs.push(self.analyze_reactive_op(op_scope, reactive_op).await?);\n }\n let collector_len = op_scope.states.lock().unwrap().collectors.len();\n let result_fut = async move {\n Ok(AnalyzedOpScope {\n reactive_ops: try_join_all(op_futs).await?,\n collector_len,\n })\n };\n Ok(result_fut)\n }\n}\n\npub fn build_flow_instance_context(\n flow_inst_name: &str,\n py_exec_ctx: Option,\n) -> Arc {\n Arc::new(FlowInstanceContext {\n flow_instance_name: flow_inst_name.to_string(),\n auth_registry: get_auth_registry().clone(),\n py_exec_ctx: py_exec_ctx.map(Arc::new),\n })\n}\n\nfn build_flow_schema(root_op_scope: &OpScope) -> Result {\n let schema = (&root_op_scope.data.lock().unwrap().data).try_into()?;\n let root_op_scope_schema = root_op_scope.states.lock().unwrap().build_op_scope_schema();\n Ok(FlowSchema {\n schema,\n root_op_scope: root_op_scope_schema,\n })\n}\n\npub async fn analyze_flow(\n flow_inst: &FlowInstanceSpec,\n flow_ctx: Arc,\n) -> Result<(\n FlowSchema,\n AnalyzedSetupState,\n impl Future> + Send + use<>,\n)> {\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: get_lib_context()?,\n flow_ctx,\n };\n let root_data_scope = Arc::new(Mutex::new(DataScopeBuilder::new()));\n let root_op_scope = OpScope::new(ROOT_SCOPE_NAME.to_string(), None, root_data_scope);\n let mut import_ops_futs = Vec::with_capacity(flow_inst.import_ops.len());\n for import_op in flow_inst.import_ops.iter() {\n import_ops_futs.push(\n analyzer_ctx\n .analyze_import_op(&root_op_scope, import_op.clone())\n .await?,\n );\n }\n let op_scope_fut = analyzer_ctx\n .analyze_op_scope(&root_op_scope, &flow_inst.reactive_ops)\n .await?;\n\n #[derive(Default)]\n struct TargetOpGroup {\n export_op_ids: Vec,\n declarations: Vec,\n }\n let mut target_op_group = IndexMap::::new();\n for (idx, export_op) in flow_inst.export_ops.iter().enumerate() {\n target_op_group\n .entry(export_op.spec.target.kind.clone())\n .or_default()\n .export_op_ids\n .push(idx);\n }\n for declaration in flow_inst.declarations.iter() {\n target_op_group\n .entry(declaration.kind.clone())\n .or_default()\n .declarations\n .push(serde_json::Value::Object(declaration.spec.clone()));\n }\n\n let mut export_ops_futs = vec![];\n let mut analyzed_target_op_groups = vec![];\n\n let mut targets_analyzed_ss = Vec::with_capacity(flow_inst.export_ops.len());\n targets_analyzed_ss.resize_with(flow_inst.export_ops.len(), || None);\n\n let mut declarations_analyzed_ss = Vec::with_capacity(flow_inst.declarations.len());\n\n for (target_kind, op_ids) in target_op_group.into_iter() {\n let target_factory = match get_executor_factory(&target_kind)? {\n ExecutorFactory::ExportTarget(export_executor) => export_executor,\n _ => api_bail!(\"`{}` is not a export target op\", target_kind),\n };\n let analyzed_target_op_group = AnalyzedExportTargetOpGroup {\n target_factory,\n op_idx: op_ids.export_op_ids,\n };\n export_ops_futs.extend(\n analyzer_ctx\n .analyze_export_op_group(\n target_kind.as_str(),\n &root_op_scope,\n flow_inst,\n &analyzed_target_op_group,\n op_ids.declarations,\n &mut targets_analyzed_ss,\n &mut declarations_analyzed_ss,\n )\n .await?,\n );\n analyzed_target_op_groups.push(analyzed_target_op_group);\n }\n\n let flow_schema = build_flow_schema(&root_op_scope)?;\n let analyzed_ss = exec_ctx::AnalyzedSetupState {\n targets: targets_analyzed_ss\n .into_iter()\n .enumerate()\n .map(|(idx, v)| v.ok_or_else(|| anyhow!(\"target op `{}` not found\", idx)))\n .collect::>>()?,\n declarations: declarations_analyzed_ss,\n };\n\n let logic_fingerprint = Fingerprinter::default()\n .with(&flow_inst)?\n .with(&flow_schema.schema)?\n .into_fingerprint();\n let plan_fut = async move {\n let (import_ops, op_scope, export_ops) = try_join3(\n try_join_all(import_ops_futs),\n op_scope_fut,\n try_join_all(export_ops_futs),\n )\n .await?;\n\n Ok(ExecutionPlan {\n logic_fingerprint,\n import_ops,\n op_scope,\n export_ops,\n export_op_groups: analyzed_target_op_groups,\n })\n };\n\n Ok((flow_schema, analyzed_ss, plan_fut))\n}\n\npub async fn analyze_transient_flow<'a>(\n flow_inst: &TransientFlowSpec,\n flow_ctx: Arc,\n) -> Result<(\n EnrichedValueType,\n FlowSchema,\n impl Future> + Send + 'a,\n)> {\n let mut root_data_scope = DataScopeBuilder::new();\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: get_lib_context()?,\n flow_ctx,\n };\n let mut input_fields = vec![];\n for field in flow_inst.input_fields.iter() {\n let analyzed_field = root_data_scope.add_field(field.name.clone(), &field.value_type)?;\n input_fields.push(analyzed_field);\n }\n let root_op_scope = OpScope::new(\n ROOT_SCOPE_NAME.to_string(),\n None,\n Arc::new(Mutex::new(root_data_scope)),\n );\n let op_scope_fut = analyzer_ctx\n .analyze_op_scope(&root_op_scope, &flow_inst.reactive_ops)\n .await?;\n let (output_value, output_type) =\n analyze_value_mapping(&flow_inst.output_value, &root_op_scope)?;\n let data_schema = build_flow_schema(&root_op_scope)?;\n let plan_fut = async move {\n let op_scope = op_scope_fut.await?;\n Ok(TransientExecutionPlan {\n input_fields,\n op_scope,\n output_value,\n })\n };\n Ok((output_type, data_schema, plan_fut))\n}\n"], ["/cocoindex/src/lib_context.rs", "use crate::prelude::*;\n\nuse crate::builder::AnalyzedFlow;\nuse crate::execution::source_indexer::SourceIndexingContext;\nuse crate::service::error::ApiError;\nuse crate::settings;\nuse crate::setup::ObjectSetupStatus;\nuse axum::http::StatusCode;\nuse sqlx::PgPool;\nuse sqlx::postgres::PgConnectOptions;\nuse tokio::runtime::Runtime;\n\npub struct FlowExecutionContext {\n pub setup_execution_context: Arc,\n pub setup_status: setup::FlowSetupStatus,\n source_indexing_contexts: Vec>>,\n}\n\nasync fn build_setup_context(\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n) -> Result<(\n Arc,\n setup::FlowSetupStatus,\n)> {\n let setup_execution_context = Arc::new(exec_ctx::build_flow_setup_execution_context(\n &analyzed_flow.flow_instance,\n &analyzed_flow.data_schema,\n &analyzed_flow.setup_state,\n existing_flow_ss,\n )?);\n\n let setup_status = setup::check_flow_setup_status(\n Some(&setup_execution_context.setup_state),\n existing_flow_ss,\n &analyzed_flow.flow_instance_ctx,\n )\n .await?;\n\n Ok((setup_execution_context, setup_status))\n}\n\nimpl FlowExecutionContext {\n async fn new(\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result {\n let (setup_execution_context, setup_status) =\n build_setup_context(analyzed_flow, existing_flow_ss).await?;\n\n let mut source_indexing_contexts = Vec::new();\n source_indexing_contexts.resize_with(analyzed_flow.flow_instance.import_ops.len(), || {\n tokio::sync::OnceCell::new()\n });\n\n Ok(Self {\n setup_execution_context,\n setup_status,\n source_indexing_contexts,\n })\n }\n\n pub async fn update_setup_state(\n &mut self,\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result<()> {\n let (setup_execution_context, setup_status) =\n build_setup_context(analyzed_flow, existing_flow_ss).await?;\n\n self.setup_execution_context = setup_execution_context;\n self.setup_status = setup_status;\n Ok(())\n }\n\n pub async fn get_source_indexing_context(\n &self,\n flow: &Arc,\n source_idx: usize,\n pool: &PgPool,\n ) -> Result<&Arc> {\n self.source_indexing_contexts[source_idx]\n .get_or_try_init(|| async move {\n anyhow::Ok(Arc::new(\n SourceIndexingContext::load(\n flow.clone(),\n source_idx,\n self.setup_execution_context.clone(),\n pool,\n )\n .await?,\n ))\n })\n .await\n }\n}\n\npub struct FlowContext {\n pub flow: Arc,\n execution_ctx: Arc>,\n}\n\nimpl FlowContext {\n pub fn flow_name(&self) -> &str {\n &self.flow.flow_instance.name\n }\n\n pub async fn new(\n flow: Arc,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result {\n let execution_ctx = Arc::new(tokio::sync::RwLock::new(\n FlowExecutionContext::new(&flow, existing_flow_ss).await?,\n ));\n Ok(Self {\n flow,\n execution_ctx,\n })\n }\n\n pub async fn use_execution_ctx(\n &self,\n ) -> Result> {\n let execution_ctx = self.execution_ctx.read().await;\n if !execution_ctx.setup_status.is_up_to_date() {\n api_bail!(\n \"Setup for flow `{}` is not up-to-date. Please run `cocoindex setup` to update the setup.\",\n self.flow_name()\n );\n }\n Ok(execution_ctx)\n }\n\n pub async fn use_owned_execution_ctx(\n &self,\n ) -> Result> {\n let execution_ctx = self.execution_ctx.clone().read_owned().await;\n if !execution_ctx.setup_status.is_up_to_date() {\n api_bail!(\n \"Setup for flow `{}` is not up-to-date. Please run `cocoindex setup` to update the setup.\",\n self.flow_name()\n );\n }\n Ok(execution_ctx)\n }\n\n pub fn get_execution_ctx_for_setup(&self) -> &tokio::sync::RwLock {\n &self.execution_ctx\n }\n}\n\nstatic TOKIO_RUNTIME: LazyLock = LazyLock::new(|| Runtime::new().unwrap());\nstatic AUTH_REGISTRY: LazyLock> = LazyLock::new(|| Arc::new(AuthRegistry::new()));\n\ntype PoolKey = (String, Option);\ntype PoolValue = Arc>;\n\n#[derive(Default)]\npub struct DbPools {\n pub pools: Mutex>,\n}\n\nimpl DbPools {\n pub async fn get_pool(&self, conn_spec: &settings::DatabaseConnectionSpec) -> Result {\n let db_pool_cell = {\n let key = (conn_spec.url.clone(), conn_spec.user.clone());\n let mut db_pools = self.pools.lock().unwrap();\n db_pools.entry(key).or_default().clone()\n };\n let pool = db_pool_cell\n .get_or_try_init(|| async move {\n let mut pg_options: PgConnectOptions = conn_spec.url.parse()?;\n if let Some(user) = &conn_spec.user {\n pg_options = pg_options.username(user);\n }\n if let Some(password) = &conn_spec.password {\n pg_options = pg_options.password(password);\n }\n let pool = PgPool::connect_with(pg_options)\n .await\n .context(\"Failed to connect to database\")?;\n anyhow::Ok(pool)\n })\n .await?;\n Ok(pool.clone())\n }\n}\n\npub struct LibSetupContext {\n pub all_setup_states: setup::AllSetupStates,\n pub global_setup_status: setup::GlobalSetupStatus,\n}\npub struct PersistenceContext {\n pub builtin_db_pool: PgPool,\n pub setup_ctx: tokio::sync::RwLock,\n}\n\npub struct LibContext {\n pub db_pools: DbPools,\n pub persistence_ctx: Option,\n pub flows: Mutex>>,\n\n pub global_concurrency_controller: Arc,\n}\n\nimpl LibContext {\n pub fn get_flow_context(&self, flow_name: &str) -> Result> {\n let flows = self.flows.lock().unwrap();\n let flow_ctx = flows\n .get(flow_name)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"Flow instance not found: {flow_name}\"),\n StatusCode::NOT_FOUND,\n )\n })?\n .clone();\n Ok(flow_ctx)\n }\n\n pub fn remove_flow_context(&self, flow_name: &str) {\n let mut flows = self.flows.lock().unwrap();\n flows.remove(flow_name);\n }\n\n pub fn require_persistence_ctx(&self) -> Result<&PersistenceContext> {\n self.persistence_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Database is required for this operation. Please set COCOINDEX_DATABASE_URL environment variable and call cocoindex.init() with database settings.\"))\n }\n\n pub fn require_builtin_db_pool(&self) -> Result<&PgPool> {\n Ok(&self.require_persistence_ctx()?.builtin_db_pool)\n }\n}\n\npub fn get_runtime() -> &'static Runtime {\n &TOKIO_RUNTIME\n}\n\npub fn get_auth_registry() -> &'static Arc {\n &AUTH_REGISTRY\n}\n\nstatic LIB_INIT: OnceLock<()> = OnceLock::new();\npub fn create_lib_context(settings: settings::Settings) -> Result {\n LIB_INIT.get_or_init(|| {\n let _ = env_logger::try_init();\n\n pyo3_async_runtimes::tokio::init_with_runtime(get_runtime()).unwrap();\n\n let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();\n });\n\n let db_pools = DbPools::default();\n let persistence_ctx = if let Some(database_spec) = &settings.database {\n let (pool, all_setup_states) = get_runtime().block_on(async {\n let pool = db_pools.get_pool(database_spec).await?;\n let existing_ss = setup::get_existing_setup_state(&pool).await?;\n anyhow::Ok((pool, existing_ss))\n })?;\n Some(PersistenceContext {\n builtin_db_pool: pool,\n setup_ctx: tokio::sync::RwLock::new(LibSetupContext {\n global_setup_status: setup::GlobalSetupStatus::from_setup_states(&all_setup_states),\n all_setup_states,\n }),\n })\n } else {\n // No database configured\n None\n };\n\n Ok(LibContext {\n db_pools,\n persistence_ctx,\n flows: Mutex::new(BTreeMap::new()),\n global_concurrency_controller: Arc::new(concur_control::ConcurrencyController::new(\n &concur_control::Options {\n max_inflight_rows: settings.global_execution_options.source_max_inflight_rows,\n max_inflight_bytes: settings.global_execution_options.source_max_inflight_bytes,\n },\n )),\n })\n}\n\npub static LIB_CONTEXT: RwLock>> = RwLock::new(None);\n\npub(crate) fn init_lib_context(settings: settings::Settings) -> Result<()> {\n let mut lib_context_locked = LIB_CONTEXT.write().unwrap();\n *lib_context_locked = Some(Arc::new(create_lib_context(settings)?));\n Ok(())\n}\n\npub(crate) fn get_lib_context() -> Result> {\n let lib_context_locked = LIB_CONTEXT.read().unwrap();\n lib_context_locked\n .as_ref()\n .cloned()\n .ok_or_else(|| anyhow!(\"CocoIndex library is not initialized or already stopped\"))\n}\n\npub(crate) fn clear_lib_context() {\n let mut lib_context_locked = LIB_CONTEXT.write().unwrap();\n *lib_context_locked = None;\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_db_pools_default() {\n let db_pools = DbPools::default();\n assert!(db_pools.pools.lock().unwrap().is_empty());\n }\n\n #[test]\n fn test_lib_context_without_database() {\n let lib_context = create_lib_context(settings::Settings::default()).unwrap();\n assert!(lib_context.persistence_ctx.is_none());\n assert!(lib_context.require_builtin_db_pool().is_err());\n }\n\n #[test]\n fn test_persistence_context_type_safety() {\n // This test ensures that PersistenceContext groups related fields together\n let settings = settings::Settings {\n database: Some(settings::DatabaseConnectionSpec {\n url: \"postgresql://test\".to_string(),\n user: None,\n password: None,\n }),\n ..Default::default()\n };\n\n // This would fail at runtime due to invalid connection, but we're testing the structure\n let result = create_lib_context(settings);\n // We expect this to fail due to invalid connection, but the structure should be correct\n assert!(result.is_err());\n }\n}\n"], ["/cocoindex/src/execution/evaluator.rs", "use crate::prelude::*;\n\nuse anyhow::{Context, Ok};\nuse futures::future::try_join_all;\n\nuse crate::base::value::EstimatedByteSize;\nuse crate::builder::{AnalyzedTransientFlow, plan::*};\nuse crate::py::IntoPyResult;\nuse crate::{\n base::{schema, value},\n utils::immutable::RefList,\n};\n\nuse super::memoization::{EvaluationMemory, EvaluationMemoryOptions, evaluate_with_cell};\n\n#[derive(Debug)]\npub struct ScopeValueBuilder {\n // TODO: Share the same lock for values produced in the same execution scope, for stricter atomicity.\n pub fields: Vec>>,\n}\n\nimpl value::EstimatedByteSize for ScopeValueBuilder {\n fn estimated_detached_byte_size(&self) -> usize {\n self.fields\n .iter()\n .map(|f| f.get().map_or(0, |v| v.estimated_byte_size()))\n .sum()\n }\n}\n\nimpl From<&ScopeValueBuilder> for value::ScopeValue {\n fn from(val: &ScopeValueBuilder) -> Self {\n value::ScopeValue(value::FieldValues {\n fields: val\n .fields\n .iter()\n .map(|f| value::Value::from_alternative_ref(f.get().unwrap()))\n .collect(),\n })\n }\n}\n\nimpl From for value::ScopeValue {\n fn from(val: ScopeValueBuilder) -> Self {\n value::ScopeValue(value::FieldValues {\n fields: val\n .fields\n .into_iter()\n .map(|f| value::Value::from_alternative(f.into_inner().unwrap()))\n .collect(),\n })\n }\n}\n\nimpl ScopeValueBuilder {\n fn new(num_fields: usize) -> Self {\n let mut fields = Vec::with_capacity(num_fields);\n fields.resize_with(num_fields, OnceLock::new);\n Self { fields }\n }\n\n fn augmented_from(source: &value::ScopeValue, schema: &schema::TableSchema) -> Result {\n let val_index_base = if schema.has_key() { 1 } else { 0 };\n let len = schema.row.fields.len() - val_index_base;\n\n let mut builder = Self::new(len);\n\n let value::ScopeValue(source_fields) = source;\n for ((v, t), r) in source_fields\n .fields\n .iter()\n .zip(schema.row.fields[val_index_base..(val_index_base + len)].iter())\n .zip(&mut builder.fields)\n {\n r.set(augmented_value(v, &t.value_type.typ)?)\n .into_py_result()?;\n }\n Ok(builder)\n }\n}\n\nfn augmented_value(\n val: &value::Value,\n val_type: &schema::ValueType,\n) -> Result> {\n let value = match (val, val_type) {\n (value::Value::Null, _) => value::Value::Null,\n (value::Value::Basic(v), _) => value::Value::Basic(v.clone()),\n (value::Value::Struct(v), schema::ValueType::Struct(t)) => {\n value::Value::Struct(value::FieldValues {\n fields: v\n .fields\n .iter()\n .enumerate()\n .map(|(i, v)| augmented_value(v, &t.fields[i].value_type.typ))\n .collect::>>()?,\n })\n }\n (value::Value::UTable(v), schema::ValueType::Table(t)) => value::Value::UTable(\n v.iter()\n .map(|v| ScopeValueBuilder::augmented_from(v, t))\n .collect::>>()?,\n ),\n (value::Value::KTable(v), schema::ValueType::Table(t)) => value::Value::KTable(\n v.iter()\n .map(|(k, v)| Ok((k.clone(), ScopeValueBuilder::augmented_from(v, t)?)))\n .collect::>>()?,\n ),\n (value::Value::LTable(v), schema::ValueType::Table(t)) => value::Value::LTable(\n v.iter()\n .map(|v| ScopeValueBuilder::augmented_from(v, t))\n .collect::>>()?,\n ),\n (val, _) => bail!(\"Value kind doesn't match the type {val_type}: {val:?}\"),\n };\n Ok(value)\n}\n\nenum ScopeKey<'a> {\n /// For root struct and UTable.\n None,\n /// For KTable row.\n MapKey(&'a value::KeyValue),\n /// For LTable row.\n ListIndex(usize),\n}\n\nimpl<'a> ScopeKey<'a> {\n pub fn key(&self) -> Option> {\n match self {\n ScopeKey::None => None,\n ScopeKey::MapKey(k) => Some(Cow::Borrowed(k)),\n ScopeKey::ListIndex(i) => Some(Cow::Owned(value::KeyValue::Int64(*i as i64))),\n }\n }\n\n pub fn value_field_index_base(&self) -> u32 {\n match *self {\n ScopeKey::None => 0,\n ScopeKey::MapKey(_) => 1,\n ScopeKey::ListIndex(_) => 0,\n }\n }\n}\n\nimpl std::fmt::Display for ScopeKey<'_> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ScopeKey::None => write!(f, \"()\"),\n ScopeKey::MapKey(k) => write!(f, \"{{{k}}}\"),\n ScopeKey::ListIndex(i) => write!(f, \"[{i}]\"),\n }\n }\n}\n\nstruct ScopeEntry<'a> {\n key: ScopeKey<'a>,\n value: &'a ScopeValueBuilder,\n schema: &'a schema::StructSchema,\n collected_values: Vec>>,\n}\n\nimpl<'a> ScopeEntry<'a> {\n fn new(\n key: ScopeKey<'a>,\n value: &'a ScopeValueBuilder,\n schema: &'a schema::StructSchema,\n analyzed_op_scope: &AnalyzedOpScope,\n ) -> Self {\n let mut collected_values = Vec::with_capacity(analyzed_op_scope.collector_len);\n collected_values.resize_with(analyzed_op_scope.collector_len, Default::default);\n\n Self {\n key,\n value,\n schema,\n collected_values,\n }\n }\n\n fn get_local_field_schema<'b>(\n schema: &'b schema::StructSchema,\n indices: &[u32],\n ) -> Result<&'b schema::FieldSchema> {\n let field_idx = indices[0] as usize;\n let field_schema = &schema.fields[field_idx];\n let result = if indices.len() == 1 {\n field_schema\n } else {\n let struct_field_schema = match &field_schema.value_type.typ {\n schema::ValueType::Struct(s) => s,\n _ => bail!(\"Expect struct field\"),\n };\n Self::get_local_field_schema(struct_field_schema, &indices[1..])?\n };\n Ok(result)\n }\n\n fn get_local_key_field<'b>(\n key_val: &'b value::KeyValue,\n indices: &'_ [u32],\n ) -> &'b value::KeyValue {\n if indices.is_empty() {\n key_val\n } else if let value::KeyValue::Struct(fields) = key_val {\n Self::get_local_key_field(&fields[indices[0] as usize], &indices[1..])\n } else {\n panic!(\"Only struct can be accessed by sub field\");\n }\n }\n\n fn get_local_field<'b>(\n val: &'b value::Value,\n indices: &'_ [u32],\n ) -> &'b value::Value {\n if indices.is_empty() {\n val\n } else if let value::Value::Struct(fields) = val {\n Self::get_local_field(&fields.fields[indices[0] as usize], &indices[1..])\n } else {\n panic!(\"Only struct can be accessed by sub field\");\n }\n }\n\n fn get_value_field_builder(\n &self,\n field_ref: &AnalyzedLocalFieldReference,\n ) -> &value::Value {\n let first_index = field_ref.fields_idx[0];\n let index_base = self.key.value_field_index_base();\n let val = self.value.fields[(first_index - index_base) as usize]\n .get()\n .unwrap();\n Self::get_local_field(val, &field_ref.fields_idx[1..])\n }\n\n fn get_field(&self, field_ref: &AnalyzedLocalFieldReference) -> value::Value {\n let first_index = field_ref.fields_idx[0];\n let index_base = self.key.value_field_index_base();\n if first_index < index_base {\n let key_val = self.key.key().unwrap().into_owned();\n let key_part = Self::get_local_key_field(&key_val, &field_ref.fields_idx[1..]);\n key_part.clone().into()\n } else {\n let val = self.value.fields[(first_index - index_base) as usize]\n .get()\n .unwrap();\n let val_part = Self::get_local_field(val, &field_ref.fields_idx[1..]);\n value::Value::from_alternative_ref(val_part)\n }\n }\n\n fn get_field_schema(\n &self,\n field_ref: &AnalyzedLocalFieldReference,\n ) -> Result<&schema::FieldSchema> {\n Ok(Self::get_local_field_schema(\n self.schema,\n &field_ref.fields_idx,\n )?)\n }\n\n fn define_field_w_builder(\n &self,\n output_field: &AnalyzedOpOutput,\n val: value::Value,\n ) -> Result<()> {\n let field_index = output_field.field_idx as usize;\n let index_base = self.key.value_field_index_base() as usize;\n self.value.fields[field_index - index_base].set(val).map_err(|_| {\n anyhow!(\"Field {field_index} for scope is already set, violating single-definition rule.\")\n })?;\n Ok(())\n }\n\n fn define_field(&self, output_field: &AnalyzedOpOutput, val: &value::Value) -> Result<()> {\n let field_index = output_field.field_idx as usize;\n let field_schema = &self.schema.fields[field_index];\n let val = augmented_value(val, &field_schema.value_type.typ)?;\n self.define_field_w_builder(output_field, val)?;\n Ok(())\n }\n}\n\nfn assemble_value(\n value_mapping: &AnalyzedValueMapping,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n) -> value::Value {\n match value_mapping {\n AnalyzedValueMapping::Constant { value } => value.clone(),\n AnalyzedValueMapping::Field(field_ref) => scoped_entries\n .headn(field_ref.scope_up_level as usize)\n .unwrap()\n .get_field(&field_ref.local),\n AnalyzedValueMapping::Struct(mapping) => {\n let fields = mapping\n .fields\n .iter()\n .map(|f| assemble_value(f, scoped_entries))\n .collect();\n value::Value::Struct(value::FieldValues { fields })\n }\n }\n}\n\nfn assemble_input_values<'a>(\n value_mappings: &'a [AnalyzedValueMapping],\n scoped_entries: RefList<'a, &ScopeEntry<'a>>,\n) -> impl Iterator + 'a {\n value_mappings\n .iter()\n .map(move |value_mapping| assemble_value(value_mapping, scoped_entries))\n}\n\nasync fn evaluate_child_op_scope(\n op_scope: &AnalyzedOpScope,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n child_scope_entry: ScopeEntry<'_>,\n concurrency_controller: &concur_control::ConcurrencyController,\n memory: &EvaluationMemory,\n) -> Result<()> {\n let _permit = concurrency_controller\n .acquire(Some(|| {\n child_scope_entry\n .value\n .fields\n .iter()\n .map(|f| f.get().map_or(0, |v| v.estimated_byte_size()))\n .sum()\n }))\n .await?;\n evaluate_op_scope(op_scope, scoped_entries.prepend(&child_scope_entry), memory)\n .await\n .with_context(|| {\n format!(\n \"Evaluating in scope with key {}\",\n match child_scope_entry.key.key() {\n Some(k) => k.to_string(),\n None => \"()\".to_string(),\n }\n )\n })\n}\n\nasync fn evaluate_op_scope(\n op_scope: &AnalyzedOpScope,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n memory: &EvaluationMemory,\n) -> Result<()> {\n let head_scope = *scoped_entries.head().unwrap();\n for reactive_op in op_scope.reactive_ops.iter() {\n match reactive_op {\n AnalyzedReactiveOp::Transform(op) => {\n let mut input_values = Vec::with_capacity(op.inputs.len());\n input_values\n .extend(assemble_input_values(&op.inputs, scoped_entries).collect::>());\n let output_value_cell = memory.get_cache_entry(\n || {\n Ok(op\n .function_exec_info\n .fingerprinter\n .clone()\n .with(&input_values)?\n .into_fingerprint())\n },\n &op.function_exec_info.output_type,\n /*ttl=*/ None,\n )?;\n let output_value = evaluate_with_cell(output_value_cell.as_ref(), move || {\n op.executor.evaluate(input_values)\n })\n .await\n .with_context(|| format!(\"Evaluating Transform op `{}`\", op.name,))?;\n head_scope.define_field(&op.output, &output_value)?;\n }\n\n AnalyzedReactiveOp::ForEach(op) => {\n let target_field_schema = head_scope.get_field_schema(&op.local_field_ref)?;\n let table_schema = match &target_field_schema.value_type.typ {\n schema::ValueType::Table(cs) => cs,\n _ => bail!(\"Expect target field to be a table\"),\n };\n\n let target_field = head_scope.get_value_field_builder(&op.local_field_ref);\n let task_futs = match target_field {\n value::Value::UTable(v) => v\n .iter()\n .map(|item| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::None,\n item,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n value::Value::KTable(v) => v\n .iter()\n .map(|(k, v)| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::MapKey(k),\n v,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n value::Value::LTable(v) => v\n .iter()\n .enumerate()\n .map(|(i, item)| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::ListIndex(i),\n item,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n _ => {\n bail!(\"Target field type is expected to be a table\");\n }\n };\n try_join_all(task_futs)\n .await\n .with_context(|| format!(\"Evaluating ForEach op `{}`\", op.name,))?;\n }\n\n AnalyzedReactiveOp::Collect(op) => {\n let mut field_values = Vec::with_capacity(\n op.input.fields.len() + if op.has_auto_uuid_field { 1 } else { 0 },\n );\n let field_values_iter = assemble_input_values(&op.input.fields, scoped_entries);\n if op.has_auto_uuid_field {\n field_values.push(value::Value::Null);\n field_values.extend(field_values_iter);\n let uuid = memory.next_uuid(\n op.fingerprinter\n .clone()\n .with(&field_values[1..])?\n .into_fingerprint(),\n )?;\n field_values[0] = value::Value::Basic(value::BasicValue::Uuid(uuid));\n } else {\n field_values.extend(field_values_iter);\n };\n let collector_entry = scoped_entries\n .headn(op.collector_ref.scope_up_level as usize)\n .ok_or_else(|| anyhow::anyhow!(\"Collector level out of bound\"))?;\n {\n let mut collected_records = collector_entry.collected_values\n [op.collector_ref.local.collector_idx as usize]\n .lock()\n .unwrap();\n collected_records.push(value::FieldValues {\n fields: field_values,\n });\n }\n }\n }\n }\n Ok(())\n}\n\npub struct SourceRowEvaluationContext<'a> {\n pub plan: &'a ExecutionPlan,\n pub import_op: &'a AnalyzedImportOp,\n pub schema: &'a schema::FlowSchema,\n pub key: &'a value::KeyValue,\n pub import_op_idx: usize,\n}\n\n#[derive(Debug)]\npub struct EvaluateSourceEntryOutput {\n pub data_scope: ScopeValueBuilder,\n pub collected_values: Vec>,\n}\n\npub async fn evaluate_source_entry(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n source_value: value::FieldValues,\n memory: &EvaluationMemory,\n) -> Result {\n let _permit = src_eval_ctx\n .import_op\n .concurrency_controller\n .acquire_bytes_with_reservation(|| source_value.estimated_byte_size())\n .await?;\n let root_schema = &src_eval_ctx.schema.schema;\n let root_scope_value = ScopeValueBuilder::new(root_schema.fields.len());\n let root_scope_entry = ScopeEntry::new(\n ScopeKey::None,\n &root_scope_value,\n root_schema,\n &src_eval_ctx.plan.op_scope,\n );\n\n let table_schema = match &root_schema.fields[src_eval_ctx.import_op.output.field_idx as usize]\n .value_type\n .typ\n {\n schema::ValueType::Table(cs) => cs,\n _ => {\n bail!(\"Expect source output to be a table\")\n }\n };\n\n let scope_value =\n ScopeValueBuilder::augmented_from(&value::ScopeValue(source_value), table_schema)?;\n root_scope_entry.define_field_w_builder(\n &src_eval_ctx.import_op.output,\n value::Value::KTable(BTreeMap::from([(src_eval_ctx.key.clone(), scope_value)])),\n )?;\n\n evaluate_op_scope(\n &src_eval_ctx.plan.op_scope,\n RefList::Nil.prepend(&root_scope_entry),\n memory,\n )\n .await?;\n let collected_values = root_scope_entry\n .collected_values\n .into_iter()\n .map(|v| v.into_inner().unwrap())\n .collect::>();\n Ok(EvaluateSourceEntryOutput {\n data_scope: root_scope_value,\n collected_values,\n })\n}\n\npub async fn evaluate_transient_flow(\n flow: &AnalyzedTransientFlow,\n input_values: &Vec,\n) -> Result {\n let root_schema = &flow.data_schema.schema;\n let root_scope_value = ScopeValueBuilder::new(root_schema.fields.len());\n let root_scope_entry = ScopeEntry::new(\n ScopeKey::None,\n &root_scope_value,\n root_schema,\n &flow.execution_plan.op_scope,\n );\n\n if input_values.len() != flow.execution_plan.input_fields.len() {\n bail!(\n \"Input values length mismatch: expect {}, got {}\",\n flow.execution_plan.input_fields.len(),\n input_values.len()\n );\n }\n for (field, value) in flow.execution_plan.input_fields.iter().zip(input_values) {\n root_scope_entry.define_field(field, value)?;\n }\n let eval_memory = EvaluationMemory::new(\n chrono::Utc::now(),\n None,\n EvaluationMemoryOptions {\n enable_cache: false,\n evaluation_only: true,\n },\n );\n evaluate_op_scope(\n &flow.execution_plan.op_scope,\n RefList::Nil.prepend(&root_scope_entry),\n &eval_memory,\n )\n .await?;\n let output_value = assemble_value(\n &flow.execution_plan.output_value,\n RefList::Nil.prepend(&root_scope_entry),\n );\n Ok(output_value)\n}\n"], ["/cocoindex/src/builder/flow_builder.rs", "use crate::{prelude::*, py::Pythonized};\n\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pyo3_async_runtimes::tokio::future_into_py;\nuse std::{collections::btree_map, ops::Deref};\nuse tokio::task::LocalSet;\n\nuse super::analyzer::{\n AnalyzerContext, CollectorBuilder, DataScopeBuilder, OpScope, build_flow_instance_context,\n};\nuse crate::{\n base::{\n schema::{CollectorSchema, FieldSchema},\n spec::{FieldName, NamedSpec},\n },\n lib_context::LibContext,\n ops::interface::FlowInstanceContext,\n py::IntoPyResult,\n};\nuse crate::{lib_context::FlowContext, py};\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct OpScopeRef(Arc);\n\nimpl From> for OpScopeRef {\n fn from(scope: Arc) -> Self {\n Self(scope)\n }\n}\n\nimpl Deref for OpScopeRef {\n type Target = Arc;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl std::fmt::Display for OpScopeRef {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.0)\n }\n}\n\n#[pymethods]\nimpl OpScopeRef {\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn add_collector(&mut self, name: String) -> PyResult {\n let collector = DataCollector {\n name,\n scope: self.0.clone(),\n collector: Mutex::new(None),\n };\n Ok(collector)\n }\n}\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct DataType {\n schema: schema::EnrichedValueType,\n}\n\nimpl From for DataType {\n fn from(schema: schema::EnrichedValueType) -> Self {\n Self { schema }\n }\n}\n\n#[pymethods]\nimpl DataType {\n pub fn __str__(&self) -> String {\n format!(\"{}\", self.schema)\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn schema(&self) -> Pythonized {\n Pythonized(self.schema.clone())\n }\n}\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct DataSlice {\n scope: Arc,\n value: Arc,\n data_type: DataType,\n}\n\n#[pymethods]\nimpl DataSlice {\n pub fn data_type(&self) -> DataType {\n self.data_type.clone()\n }\n\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn field(&self, field_name: &str) -> PyResult> {\n let field_schema = match &self.data_type.schema.typ {\n schema::ValueType::Struct(struct_type) => {\n match struct_type.fields.iter().find(|f| f.name == field_name) {\n Some(field) => field,\n None => return Ok(None),\n }\n }\n _ => return Err(PyException::new_err(\"expect struct type\")),\n };\n let value_mapping = match self.value.as_ref() {\n spec::ValueMapping::Field(spec::FieldMapping {\n scope,\n field_path: spec::FieldPath(field_path),\n }) => spec::ValueMapping::Field(spec::FieldMapping {\n scope: scope.clone(),\n field_path: spec::FieldPath(\n field_path\n .iter()\n .cloned()\n .chain([field_name.to_string()])\n .collect(),\n ),\n }),\n\n spec::ValueMapping::Struct(v) => v\n .fields\n .iter()\n .find(|f| f.name == field_name)\n .map(|f| f.spec.clone())\n .ok_or_else(|| PyException::new_err(format!(\"field {field_name} not found\")))?,\n\n spec::ValueMapping::Constant { .. } => {\n return Err(PyException::new_err(\n \"field access not supported for literal\",\n ));\n }\n };\n Ok(Some(DataSlice {\n scope: self.scope.clone(),\n value: Arc::new(value_mapping),\n data_type: field_schema.value_type.clone().into(),\n }))\n }\n}\n\nimpl DataSlice {\n fn extract_value_mapping(&self) -> spec::ValueMapping {\n match self.value.as_ref() {\n spec::ValueMapping::Field(v) => spec::ValueMapping::Field(spec::FieldMapping {\n field_path: v.field_path.clone(),\n scope: v.scope.clone().or_else(|| Some(self.scope.name.clone())),\n }),\n v => v.clone(),\n }\n }\n}\n\nimpl std::fmt::Display for DataSlice {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(\n f,\n \"DataSlice({}; {} {}) \",\n self.data_type.schema, self.scope, self.value\n )?;\n Ok(())\n }\n}\n\n#[pyclass]\npub struct DataCollector {\n name: String,\n scope: Arc,\n collector: Mutex>,\n}\n\n#[pymethods]\nimpl DataCollector {\n fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\nimpl std::fmt::Display for DataCollector {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let collector = self.collector.lock().unwrap();\n write!(f, \"DataCollector \\\"{}\\\" ({}\", self.name, self.scope)?;\n if let Some(collector) = collector.as_ref() {\n write!(f, \": {}\", collector.schema)?;\n if collector.is_used {\n write!(f, \" (used)\")?;\n }\n }\n write!(f, \")\")?;\n Ok(())\n }\n}\n\n#[pyclass]\npub struct FlowBuilder {\n lib_context: Arc,\n flow_inst_context: Arc,\n\n root_op_scope: Arc,\n flow_instance_name: String,\n reactive_ops: Vec>,\n\n direct_input_fields: Vec,\n direct_output_value: Option,\n\n import_ops: Vec>,\n export_ops: Vec>,\n\n declarations: Vec,\n\n next_generated_op_id: usize,\n}\n\n#[pymethods]\nimpl FlowBuilder {\n #[new]\n pub fn new(name: &str) -> PyResult {\n let lib_context = get_lib_context().into_py_result()?;\n let root_op_scope = OpScope::new(\n spec::ROOT_SCOPE_NAME.to_string(),\n None,\n Arc::new(Mutex::new(DataScopeBuilder::new())),\n );\n let flow_inst_context = build_flow_instance_context(name, None);\n let result = Self {\n lib_context,\n flow_inst_context,\n root_op_scope,\n flow_instance_name: name.to_string(),\n\n reactive_ops: vec![],\n\n import_ops: vec![],\n export_ops: vec![],\n\n direct_input_fields: vec![],\n direct_output_value: None,\n\n declarations: vec![],\n\n next_generated_op_id: 0,\n };\n Ok(result)\n }\n\n pub fn root_scope(&self) -> OpScopeRef {\n OpScopeRef(self.root_op_scope.clone())\n }\n\n #[pyo3(signature = (kind, op_spec, target_scope, name, refresh_options=None, execution_options=None))]\n #[allow(clippy::too_many_arguments)]\n pub fn add_source(\n &mut self,\n py: Python<'_>,\n kind: String,\n op_spec: py::Pythonized>,\n target_scope: Option,\n name: String,\n refresh_options: Option>,\n execution_options: Option>,\n ) -> PyResult {\n if let Some(target_scope) = target_scope {\n if *target_scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"source can only be added to the root scope\",\n ));\n }\n }\n let import_op = spec::NamedSpec {\n name,\n spec: spec::ImportOpSpec {\n source: spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n },\n refresh_options: refresh_options.map(|o| o.into_inner()).unwrap_or_default(),\n execution_options: execution_options\n .map(|o| o.into_inner())\n .unwrap_or_default(),\n },\n };\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(\n analyzer_ctx.analyze_import_op(&self.root_op_scope, import_op.clone()),\n )\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n let result = Self::last_field_to_data_slice(&self.root_op_scope).into_py_result()?;\n self.import_ops.push(import_op);\n Ok(result)\n }\n\n pub fn constant(\n &self,\n value_type: py::Pythonized,\n value: Bound<'_, PyAny>,\n ) -> PyResult {\n let schema = value_type.into_inner();\n let value = py::value_from_py_object(&schema.typ, &value)?;\n let slice = DataSlice {\n scope: self.root_op_scope.clone(),\n value: Arc::new(spec::ValueMapping::Constant(spec::ConstantMapping {\n schema: schema.clone(),\n value: serde_json::to_value(value).into_py_result()?,\n })),\n data_type: schema.into(),\n };\n Ok(slice)\n }\n\n pub fn add_direct_input(\n &mut self,\n name: String,\n value_type: py::Pythonized,\n ) -> PyResult {\n let value_type = value_type.into_inner();\n {\n let mut root_data_scope = self.root_op_scope.data.lock().unwrap();\n root_data_scope\n .add_field(name.clone(), &value_type)\n .into_py_result()?;\n }\n let result = Self::last_field_to_data_slice(&self.root_op_scope).into_py_result()?;\n self.direct_input_fields\n .push(FieldSchema { name, value_type });\n Ok(result)\n }\n\n pub fn set_direct_output(&mut self, data_slice: DataSlice) -> PyResult<()> {\n if data_slice.scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"direct output must be value in the root scope\",\n ));\n }\n self.direct_output_value = Some(data_slice.extract_value_mapping());\n Ok(())\n }\n\n #[pyo3(signature = (data_slice, execution_options=None))]\n pub fn for_each(\n &mut self,\n data_slice: DataSlice,\n execution_options: Option>,\n ) -> PyResult {\n let parent_scope = &data_slice.scope;\n let field_path = match data_slice.value.as_ref() {\n spec::ValueMapping::Field(v) => &v.field_path,\n _ => return Err(PyException::new_err(\"expect field path\")),\n };\n let num_parent_layers = parent_scope.ancestors().count();\n let scope_name = format!(\n \"{}_{}\",\n field_path.last().map_or(\"\", |s| s.as_str()),\n num_parent_layers\n );\n let (_, child_op_scope) = parent_scope\n .new_foreach_op_scope(scope_name.clone(), field_path)\n .into_py_result()?;\n\n let reactive_op = spec::NamedSpec {\n name: format!(\".for_each.{}\", self.next_generated_op_id),\n spec: spec::ReactiveOpSpec::ForEach(spec::ForEachOpSpec {\n field_path: field_path.clone(),\n op_scope: spec::ReactiveOpScope {\n name: scope_name,\n ops: vec![],\n },\n execution_options: execution_options\n .map(|o| o.into_inner())\n .unwrap_or_default(),\n }),\n };\n self.next_generated_op_id += 1;\n self.get_mut_reactive_ops(parent_scope)\n .into_py_result()?\n .push(reactive_op);\n\n Ok(OpScopeRef(child_op_scope))\n }\n\n #[pyo3(signature = (kind, op_spec, args, target_scope, name))]\n pub fn transform(\n &mut self,\n py: Python<'_>,\n kind: String,\n op_spec: py::Pythonized>,\n args: Vec<(DataSlice, Option)>,\n target_scope: Option,\n name: String,\n ) -> PyResult {\n let spec = spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n };\n let op_scope = Self::minimum_common_scope(\n args.iter().map(|(ds, _)| &ds.scope),\n target_scope.as_ref().map(|s| &s.0),\n )\n .into_py_result()?;\n\n let reactive_op = spec::NamedSpec {\n name,\n spec: spec::ReactiveOpSpec::Transform(spec::TransformOpSpec {\n inputs: args\n .iter()\n .map(|(ds, arg_name)| spec::OpArgBinding {\n arg_name: spec::OpArgName(arg_name.clone()),\n value: ds.extract_value_mapping(),\n })\n .collect(),\n op: spec,\n }),\n };\n\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(analyzer_ctx.analyze_reactive_op(op_scope, &reactive_op))\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n self.get_mut_reactive_ops(op_scope)\n .into_py_result()?\n .push(reactive_op);\n\n let result = Self::last_field_to_data_slice(op_scope).into_py_result()?;\n Ok(result)\n }\n\n #[pyo3(signature = (collector, fields, auto_uuid_field=None))]\n pub fn collect(\n &mut self,\n py: Python<'_>,\n collector: &DataCollector,\n fields: Vec<(FieldName, DataSlice)>,\n auto_uuid_field: Option,\n ) -> PyResult<()> {\n let common_scope = Self::minimum_common_scope(fields.iter().map(|(_, ds)| &ds.scope), None)\n .into_py_result()?;\n let name = format!(\".collect.{}\", self.next_generated_op_id);\n self.next_generated_op_id += 1;\n\n let reactive_op = spec::NamedSpec {\n name,\n spec: spec::ReactiveOpSpec::Collect(spec::CollectOpSpec {\n input: spec::StructMapping {\n fields: fields\n .iter()\n .map(|(name, ds)| NamedSpec {\n name: name.clone(),\n spec: ds.extract_value_mapping(),\n })\n .collect(),\n },\n scope_name: collector.scope.name.clone(),\n collector_name: collector.name.clone(),\n auto_uuid_field: auto_uuid_field.clone(),\n }),\n };\n\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(analyzer_ctx.analyze_reactive_op(common_scope, &reactive_op))\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n self.get_mut_reactive_ops(common_scope)\n .into_py_result()?\n .push(reactive_op);\n\n let collector_schema = CollectorSchema::from_fields(\n fields\n .into_iter()\n .map(|(name, ds)| FieldSchema {\n name,\n value_type: ds.data_type.schema,\n })\n .collect(),\n auto_uuid_field,\n );\n {\n let mut collector = collector.collector.lock().unwrap();\n if let Some(collector) = collector.as_mut() {\n collector.merge_schema(&collector_schema).into_py_result()?;\n } else {\n *collector = Some(CollectorBuilder::new(Arc::new(collector_schema)));\n }\n }\n\n Ok(())\n }\n\n #[pyo3(signature = (name, kind, op_spec, index_options, input, setup_by_user=false))]\n pub fn export(\n &mut self,\n name: String,\n kind: String,\n op_spec: py::Pythonized>,\n index_options: py::Pythonized,\n input: &DataCollector,\n setup_by_user: bool,\n ) -> PyResult<()> {\n let spec = spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n };\n\n if input.scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"Export can only work on collectors belonging to the root scope.\",\n ));\n }\n self.export_ops.push(spec::NamedSpec {\n name,\n spec: spec::ExportOpSpec {\n collector_name: input.name.clone(),\n target: spec,\n index_options: index_options.into_inner(),\n setup_by_user,\n },\n });\n Ok(())\n }\n\n pub fn declare(&mut self, op_spec: py::Pythonized) -> PyResult<()> {\n self.declarations.push(op_spec.into_inner());\n Ok(())\n }\n\n pub fn scope_field(&self, scope: OpScopeRef, field_name: &str) -> PyResult> {\n let field_type = {\n let scope_builder = scope.0.data.lock().unwrap();\n let (_, field_schema) = scope_builder\n .data\n .find_field(field_name)\n .ok_or_else(|| PyException::new_err(format!(\"field {field_name} not found\")))?;\n schema::EnrichedValueType::from_alternative(&field_schema.value_type)\n .into_py_result()?\n };\n Ok(Some(DataSlice {\n scope: scope.0,\n value: Arc::new(spec::ValueMapping::Field(spec::FieldMapping {\n scope: None,\n field_path: spec::FieldPath(vec![field_name.to_string()]),\n })),\n data_type: DataType { schema: field_type },\n }))\n }\n\n pub fn build_flow(&self, py: Python<'_>, py_event_loop: Py) -> PyResult {\n let spec = spec::FlowInstanceSpec {\n name: self.flow_instance_name.clone(),\n import_ops: self.import_ops.clone(),\n reactive_ops: self.reactive_ops.clone(),\n export_ops: self.export_ops.clone(),\n declarations: self.declarations.clone(),\n };\n let flow_instance_ctx = build_flow_instance_context(\n &self.flow_instance_name,\n Some(crate::py::PythonExecutionContext::new(py, py_event_loop)),\n );\n let flow_ctx = py\n .allow_threads(|| {\n get_runtime().block_on(async move {\n let analyzed_flow =\n super::AnalyzedFlow::from_flow_instance(spec, flow_instance_ctx).await?;\n let persistence_ctx = self.lib_context.require_persistence_ctx()?;\n let execution_ctx = {\n let flow_setup_ctx = persistence_ctx.setup_ctx.read().await;\n FlowContext::new(\n Arc::new(analyzed_flow),\n flow_setup_ctx\n .all_setup_states\n .flows\n .get(&self.flow_instance_name),\n )\n .await?\n };\n anyhow::Ok(execution_ctx)\n })\n })\n .into_py_result()?;\n let mut flow_ctxs = self.lib_context.flows.lock().unwrap();\n let flow_ctx = match flow_ctxs.entry(self.flow_instance_name.clone()) {\n btree_map::Entry::Occupied(_) => {\n return Err(PyException::new_err(format!(\n \"flow instance name already exists: {}\",\n self.flow_instance_name\n )));\n }\n btree_map::Entry::Vacant(entry) => {\n let flow_ctx = Arc::new(flow_ctx);\n entry.insert(flow_ctx.clone());\n flow_ctx\n }\n };\n Ok(py::Flow(flow_ctx))\n }\n\n pub fn build_transient_flow_async<'py>(\n &self,\n py: Python<'py>,\n py_event_loop: Py,\n ) -> PyResult> {\n if self.direct_input_fields.is_empty() {\n return Err(PyException::new_err(\"expect at least one direct input\"));\n }\n let direct_output_value = if let Some(direct_output_value) = &self.direct_output_value {\n direct_output_value\n } else {\n return Err(PyException::new_err(\"expect direct output\"));\n };\n let spec = spec::TransientFlowSpec {\n name: self.flow_instance_name.clone(),\n input_fields: self.direct_input_fields.clone(),\n reactive_ops: self.reactive_ops.clone(),\n output_value: direct_output_value.clone(),\n };\n let py_ctx = crate::py::PythonExecutionContext::new(py, py_event_loop);\n\n let analyzed_flow = get_runtime().spawn_blocking(|| {\n let local_set = LocalSet::new();\n local_set.block_on(\n get_runtime(),\n super::AnalyzedTransientFlow::from_transient_flow(spec, Some(py_ctx)),\n )\n });\n future_into_py(py, async move {\n Ok(py::TransientFlow(Arc::new(\n analyzed_flow.await.into_py_result()?.into_py_result()?,\n )))\n })\n }\n\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\nimpl std::fmt::Display for FlowBuilder {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Flow instance name: {}\\n\\n\", self.flow_instance_name)?;\n for op in self.import_ops.iter() {\n write!(\n f,\n \"Source op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n for field in self.direct_input_fields.iter() {\n writeln!(f, \"Direct input {}: {}\", field.name, field.value_type)?;\n }\n if !self.direct_input_fields.is_empty() {\n writeln!(f)?;\n }\n for op in self.reactive_ops.iter() {\n write!(\n f,\n \"Reactive op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n for op in self.export_ops.iter() {\n write!(\n f,\n \"Export op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n if let Some(output) = &self.direct_output_value {\n write!(f, \"Direct output: {output}\\n\\n\")?;\n }\n Ok(())\n }\n}\n\nimpl FlowBuilder {\n fn last_field_to_data_slice(op_scope: &Arc) -> Result {\n let data_scope = op_scope.data.lock().unwrap();\n let last_field = data_scope.last_field().unwrap();\n let result = DataSlice {\n scope: op_scope.clone(),\n value: Arc::new(spec::ValueMapping::Field(spec::FieldMapping {\n scope: None,\n field_path: spec::FieldPath(vec![last_field.name.clone()]),\n })),\n data_type: schema::EnrichedValueType::from_alternative(&last_field.value_type)?.into(),\n };\n Ok(result)\n }\n\n fn minimum_common_scope<'a>(\n scopes: impl Iterator>,\n target_scope: Option<&'a Arc>,\n ) -> Result<&'a Arc> {\n let mut scope_iter = scopes;\n let mut common_scope = scope_iter\n .next()\n .ok_or_else(|| PyException::new_err(\"expect at least one input\"))?;\n for scope in scope_iter {\n if scope.is_op_scope_descendant(common_scope) {\n common_scope = scope;\n } else if !common_scope.is_op_scope_descendant(scope) {\n api_bail!(\n \"expect all arguments share the common scope, got {} and {} exclusive to each other\",\n common_scope,\n scope\n );\n }\n }\n if let Some(target_scope) = target_scope {\n if !target_scope.is_op_scope_descendant(common_scope) {\n api_bail!(\n \"the field can only be attached to a scope or sub-scope of the input value. Target scope: {}, input scope: {}\",\n target_scope,\n common_scope\n );\n }\n common_scope = target_scope;\n }\n Ok(common_scope)\n }\n\n fn get_mut_reactive_ops<'a>(\n &'a mut self,\n op_scope: &OpScope,\n ) -> Result<&'a mut Vec>> {\n Self::get_mut_reactive_ops_internal(op_scope, &mut self.reactive_ops)\n }\n\n fn get_mut_reactive_ops_internal<'a>(\n op_scope: &OpScope,\n root_reactive_ops: &'a mut Vec>,\n ) -> Result<&'a mut Vec>> {\n let result = match &op_scope.parent {\n None => root_reactive_ops,\n Some((parent_op_scope, field_path)) => {\n let parent_reactive_ops =\n Self::get_mut_reactive_ops_internal(parent_op_scope, root_reactive_ops)?;\n // Reuse the last foreach if matched, otherwise create a new one.\n match parent_reactive_ops.last() {\n Some(spec::NamedSpec {\n spec: spec::ReactiveOpSpec::ForEach(foreach_spec),\n ..\n }) if &foreach_spec.field_path == field_path\n && foreach_spec.op_scope.name == op_scope.name => {}\n\n _ => {\n api_bail!(\"already out of op scope `{}`\", op_scope.name);\n }\n }\n match &mut parent_reactive_ops.last_mut().unwrap().spec {\n spec::ReactiveOpSpec::ForEach(foreach_spec) => &mut foreach_spec.op_scope.ops,\n _ => unreachable!(),\n }\n }\n };\n Ok(result)\n }\n}\n"], ["/cocoindex/src/py/mod.rs", "use crate::execution::evaluator::evaluate_transient_flow;\nuse crate::prelude::*;\n\nuse crate::base::schema::{FieldSchema, ValueType};\nuse crate::base::spec::{NamedSpec, OutputMode, ReactiveOpSpec, SpecFormatter};\nuse crate::lib_context::{clear_lib_context, get_auth_registry, init_lib_context};\nuse crate::ops::py_factory::{PyExportTargetFactory, PyOpArgSchema};\nuse crate::ops::{interface::ExecutorFactory, py_factory::PyFunctionFactory, register_factory};\nuse crate::server::{self, ServerSettings};\nuse crate::settings::Settings;\nuse crate::setup::{self};\nuse pyo3::IntoPyObjectExt;\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pyo3_async_runtimes::tokio::future_into_py;\nuse std::fmt::Write;\nuse std::sync::Arc;\n\nmod convert;\npub(crate) use convert::*;\n\npub struct PythonExecutionContext {\n pub event_loop: Py,\n}\n\nimpl PythonExecutionContext {\n pub fn new(_py: Python<'_>, event_loop: Py) -> Self {\n Self { event_loop }\n }\n}\n\npub trait ToResultWithPyTrace {\n fn to_result_with_py_trace(self, py: Python<'_>) -> anyhow::Result;\n}\n\nimpl ToResultWithPyTrace for Result {\n fn to_result_with_py_trace(self, py: Python<'_>) -> anyhow::Result {\n match self {\n Ok(value) => Ok(value),\n Err(err) => {\n let mut err_str = format!(\"Error calling Python function: {err}\");\n if let Some(tb) = err.traceback(py) {\n write!(&mut err_str, \"\\n{}\", tb.format()?)?;\n }\n Err(anyhow::anyhow!(err_str))\n }\n }\n }\n}\npub trait IntoPyResult {\n fn into_py_result(self) -> PyResult;\n}\n\nimpl IntoPyResult for Result {\n fn into_py_result(self) -> PyResult {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(PyException::new_err(format!(\"{err:?}\"))),\n }\n }\n}\n\n#[pyfunction]\nfn init(py: Python<'_>, settings: Pythonized) -> PyResult<()> {\n py.allow_threads(|| -> anyhow::Result<()> {\n init_lib_context(settings.into_inner())?;\n Ok(())\n })\n .into_py_result()\n}\n\n#[pyfunction]\nfn start_server(py: Python<'_>, settings: Pythonized) -> PyResult<()> {\n py.allow_threads(|| -> anyhow::Result<()> {\n let server = get_runtime().block_on(server::init_server(\n get_lib_context()?,\n settings.into_inner(),\n ))?;\n get_runtime().spawn(server);\n Ok(())\n })\n .into_py_result()\n}\n\n#[pyfunction]\nfn stop(py: Python<'_>) -> PyResult<()> {\n py.allow_threads(clear_lib_context);\n Ok(())\n}\n\n#[pyfunction]\nfn register_function_factory(name: String, py_function_factory: Py) -> PyResult<()> {\n let factory = PyFunctionFactory {\n py_function_factory,\n };\n register_factory(name, ExecutorFactory::SimpleFunction(Arc::new(factory))).into_py_result()\n}\n\n#[pyfunction]\nfn register_target_connector(name: String, py_target_connector: Py) -> PyResult<()> {\n let factory = PyExportTargetFactory {\n py_target_connector,\n };\n register_factory(name, ExecutorFactory::ExportTarget(Arc::new(factory))).into_py_result()\n}\n\n#[pyclass]\npub struct IndexUpdateInfo(pub execution::stats::IndexUpdateInfo);\n\n#[pymethods]\nimpl IndexUpdateInfo {\n pub fn __str__(&self) -> String {\n format!(\"{}\", self.0)\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\n#[pyclass]\npub struct Flow(pub Arc);\n\n/// A single line in the rendered spec, with hierarchical children\n#[pyclass(get_all, set_all)]\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct RenderedSpecLine {\n /// The formatted content of the line (e.g., \"Import: name=documents, source=LocalFile\")\n pub content: String,\n /// Child lines in the hierarchy\n pub children: Vec,\n}\n\n/// A rendered specification, grouped by sections\n#[pyclass(get_all, set_all)]\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct RenderedSpec {\n /// List of (section_name, lines) pairs\n pub sections: Vec<(String, Vec)>,\n}\n\n#[pyclass]\npub struct FlowLiveUpdaterUpdates(execution::FlowLiveUpdaterUpdates);\n\n#[pymethods]\nimpl FlowLiveUpdaterUpdates {\n #[getter]\n pub fn active_sources(&self) -> Vec {\n self.0.active_sources.clone()\n }\n\n #[getter]\n pub fn updated_sources(&self) -> Vec {\n self.0.updated_sources.clone()\n }\n}\n\n#[pyclass]\npub struct FlowLiveUpdater(pub Arc);\n\n#[pymethods]\nimpl FlowLiveUpdater {\n #[staticmethod]\n pub fn create<'py>(\n py: Python<'py>,\n flow: &Flow,\n options: Pythonized,\n ) -> PyResult> {\n let flow = flow.0.clone();\n future_into_py(py, async move {\n let lib_context = get_lib_context().into_py_result()?;\n let live_updater = execution::FlowLiveUpdater::start(\n flow,\n lib_context.require_builtin_db_pool().into_py_result()?,\n options.into_inner(),\n )\n .await\n .into_py_result()?;\n Ok(Self(Arc::new(live_updater)))\n })\n }\n\n pub fn wait_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let live_updater = self.0.clone();\n future_into_py(\n py,\n async move { live_updater.wait().await.into_py_result() },\n )\n }\n\n pub fn next_status_updates_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let live_updater = self.0.clone();\n future_into_py(py, async move {\n let updates = live_updater.next_status_updates().await.into_py_result()?;\n Ok(FlowLiveUpdaterUpdates(updates))\n })\n }\n\n pub fn abort(&self) {\n self.0.abort();\n }\n\n pub fn index_update_info(&self) -> IndexUpdateInfo {\n IndexUpdateInfo(self.0.index_update_info())\n }\n}\n\n#[pymethods]\nimpl Flow {\n pub fn __str__(&self) -> String {\n serde_json::to_string_pretty(&self.0.flow.flow_instance).unwrap()\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn name(&self) -> &str {\n &self.0.flow.flow_instance.name\n }\n\n pub fn evaluate_and_dump(\n &self,\n py: Python<'_>,\n options: Pythonized,\n ) -> PyResult<()> {\n py.allow_threads(|| {\n get_runtime()\n .block_on(async {\n let exec_plan = self.0.flow.get_execution_plan().await?;\n let lib_context = get_lib_context()?;\n let execution_ctx = self.0.use_execution_ctx().await?;\n execution::dumper::evaluate_and_dump(\n &exec_plan,\n &execution_ctx.setup_execution_context,\n &self.0.flow.data_schema,\n options.into_inner(),\n lib_context.require_builtin_db_pool()?,\n )\n .await\n })\n .into_py_result()?;\n Ok(())\n })\n }\n\n #[pyo3(signature = (output_mode=None))]\n pub fn get_spec(&self, output_mode: Option>) -> PyResult {\n let mode = output_mode.map_or(OutputMode::Concise, |m| m.into_inner());\n let spec = &self.0.flow.flow_instance;\n let mut sections: IndexMap> = IndexMap::new();\n\n // Sources\n sections.insert(\n \"Source\".to_string(),\n spec.import_ops\n .iter()\n .map(|op| RenderedSpecLine {\n content: format!(\"Import: name={}, {}\", op.name, op.spec.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n // Processing\n fn walk(op: &NamedSpec, mode: OutputMode) -> RenderedSpecLine {\n let content = format!(\"{}: {}\", op.name, op.spec.format(mode));\n\n let children = match &op.spec {\n ReactiveOpSpec::ForEach(fe) => fe\n .op_scope\n .ops\n .iter()\n .map(|nested| walk(nested, mode))\n .collect(),\n _ => vec![],\n };\n\n RenderedSpecLine { content, children }\n }\n\n sections.insert(\n \"Processing\".to_string(),\n spec.reactive_ops.iter().map(|op| walk(op, mode)).collect(),\n );\n\n // Targets\n sections.insert(\n \"Targets\".to_string(),\n spec.export_ops\n .iter()\n .map(|op| RenderedSpecLine {\n content: format!(\"Export: name={}, {}\", op.name, op.spec.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n // Declarations\n sections.insert(\n \"Declarations\".to_string(),\n spec.declarations\n .iter()\n .map(|decl| RenderedSpecLine {\n content: format!(\"Declaration: {}\", decl.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n Ok(RenderedSpec {\n sections: sections.into_iter().collect(),\n })\n }\n\n pub fn get_schema(&self) -> Vec<(String, String, String)> {\n let schema = &self.0.flow.data_schema;\n let mut result = Vec::new();\n\n fn process_fields(\n fields: &[FieldSchema],\n prefix: &str,\n result: &mut Vec<(String, String, String)>,\n ) {\n for field in fields {\n let field_name = format!(\"{}{}\", prefix, field.name);\n\n let mut field_type = match &field.value_type.typ {\n ValueType::Basic(basic) => format!(\"{basic}\"),\n ValueType::Table(t) => format!(\"{}\", t.kind),\n ValueType::Struct(_) => \"Struct\".to_string(),\n };\n\n if field.value_type.nullable {\n field_type.push('?');\n }\n\n let attr_str = if field.value_type.attrs.is_empty() {\n String::new()\n } else {\n field\n .value_type\n .attrs\n .keys()\n .map(|k| k.to_string())\n .collect::>()\n .join(\", \")\n };\n\n result.push((field_name.clone(), field_type, attr_str));\n\n match &field.value_type.typ {\n ValueType::Struct(s) => {\n process_fields(&s.fields, &format!(\"{field_name}.\"), result);\n }\n ValueType::Table(t) => {\n process_fields(&t.row.fields, &format!(\"{field_name}[].\"), result);\n }\n ValueType::Basic(_) => {}\n }\n }\n }\n\n process_fields(&schema.schema.fields, \"\", &mut result);\n result\n }\n\n pub fn make_setup_action(&self) -> SetupChangeBundle {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Setup,\n flow_names: vec![self.name().to_string()],\n };\n SetupChangeBundle(Arc::new(bundle))\n }\n\n pub fn make_drop_action(&self) -> SetupChangeBundle {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Drop,\n flow_names: vec![self.name().to_string()],\n };\n SetupChangeBundle(Arc::new(bundle))\n }\n}\n\n#[pyclass]\npub struct TransientFlow(pub Arc);\n\n#[pymethods]\nimpl TransientFlow {\n pub fn __str__(&self) -> String {\n serde_json::to_string_pretty(&self.0.transient_flow_instance).unwrap()\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn evaluate_async<'py>(\n &self,\n py: Python<'py>,\n args: Vec>,\n ) -> PyResult> {\n let flow = self.0.clone();\n let input_values: Vec = std::iter::zip(\n self.0.transient_flow_instance.input_fields.iter(),\n args.into_iter(),\n )\n .map(|(input_schema, arg)| value_from_py_object(&input_schema.value_type.typ, &arg))\n .collect::>()?;\n\n future_into_py(py, async move {\n let result = evaluate_transient_flow(&flow, &input_values)\n .await\n .into_py_result()?;\n Python::with_gil(|py| value_to_py_object(py, &result)?.into_py_any(py))\n })\n }\n}\n\n#[pyclass]\npub struct SetupChangeBundle(Arc);\n\n#[pymethods]\nimpl SetupChangeBundle {\n pub fn describe_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let lib_context = get_lib_context().into_py_result()?;\n let bundle = self.0.clone();\n future_into_py(py, async move {\n bundle.describe(&lib_context).await.into_py_result()\n })\n }\n\n pub fn apply_async<'py>(\n &self,\n py: Python<'py>,\n report_to_stdout: bool,\n ) -> PyResult> {\n let lib_context = get_lib_context().into_py_result()?;\n let bundle = self.0.clone();\n\n future_into_py(py, async move {\n let mut stdout = None;\n let mut sink = None;\n bundle\n .apply(\n &lib_context,\n if report_to_stdout {\n stdout.insert(std::io::stdout())\n } else {\n sink.insert(std::io::sink())\n },\n )\n .await\n .into_py_result()\n })\n }\n}\n\n#[pyfunction]\nfn flow_names_with_setup_async(py: Python<'_>) -> PyResult> {\n future_into_py(py, async move {\n let lib_context = get_lib_context().into_py_result()?;\n let setup_ctx = lib_context\n .require_persistence_ctx()\n .into_py_result()?\n .setup_ctx\n .read()\n .await;\n let flow_names: Vec = setup_ctx.all_setup_states.flows.keys().cloned().collect();\n PyResult::Ok(flow_names)\n })\n}\n\n#[pyfunction]\nfn make_setup_bundle(flow_names: Vec) -> PyResult {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Setup,\n flow_names,\n };\n Ok(SetupChangeBundle(Arc::new(bundle)))\n}\n\n#[pyfunction]\nfn make_drop_bundle(flow_names: Vec) -> PyResult {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Drop,\n flow_names,\n };\n Ok(SetupChangeBundle(Arc::new(bundle)))\n}\n\n#[pyfunction]\nfn remove_flow_context(flow_name: String) {\n let lib_context_locked = crate::lib_context::LIB_CONTEXT.read().unwrap();\n if let Some(lib_context) = lib_context_locked.as_ref() {\n lib_context.remove_flow_context(&flow_name)\n }\n}\n\n#[pyfunction]\nfn add_auth_entry(key: String, value: Pythonized) -> PyResult<()> {\n get_auth_registry()\n .add(key, value.into_inner())\n .into_py_result()?;\n Ok(())\n}\n\n#[pyfunction]\nfn seder_roundtrip<'py>(\n py: Python<'py>,\n value: Bound<'py, PyAny>,\n typ: Pythonized,\n) -> PyResult> {\n let typ = typ.into_inner();\n let value = value_from_py_object(&typ, &value)?;\n let value = value::test_util::seder_roundtrip(&value, &typ).into_py_result()?;\n value_to_py_object(py, &value)\n}\n\n/// A Python module implemented in Rust.\n#[pymodule]\n#[pyo3(name = \"_engine\")]\nfn cocoindex_engine(m: &Bound<'_, PyModule>) -> PyResult<()> {\n m.add_function(wrap_pyfunction!(init, m)?)?;\n m.add_function(wrap_pyfunction!(start_server, m)?)?;\n m.add_function(wrap_pyfunction!(stop, m)?)?;\n m.add_function(wrap_pyfunction!(register_function_factory, m)?)?;\n m.add_function(wrap_pyfunction!(register_target_connector, m)?)?;\n m.add_function(wrap_pyfunction!(flow_names_with_setup_async, m)?)?;\n m.add_function(wrap_pyfunction!(make_setup_bundle, m)?)?;\n m.add_function(wrap_pyfunction!(make_drop_bundle, m)?)?;\n m.add_function(wrap_pyfunction!(remove_flow_context, m)?)?;\n m.add_function(wrap_pyfunction!(add_auth_entry, m)?)?;\n\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n\n let testutil_module = PyModule::new(m.py(), \"testutil\")?;\n testutil_module.add_function(wrap_pyfunction!(seder_roundtrip, &testutil_module)?)?;\n m.add_submodule(&testutil_module)?;\n\n Ok(())\n}\n"], ["/cocoindex/src/execution/source_indexer.rs", "use crate::{\n prelude::*,\n service::error::{SharedError, SharedResult, SharedResultExt},\n};\n\nuse futures::future::Ready;\nuse sqlx::PgPool;\nuse std::collections::{HashMap, hash_map};\nuse tokio::{sync::Semaphore, task::JoinSet};\n\nuse super::{\n db_tracking,\n evaluator::SourceRowEvaluationContext,\n row_indexer::{self, SkippedOr, SourceVersion},\n stats,\n};\n\nuse crate::ops::interface;\nstruct SourceRowIndexingState {\n source_version: SourceVersion,\n processing_sem: Arc,\n touched_generation: usize,\n}\n\nimpl Default for SourceRowIndexingState {\n fn default() -> Self {\n Self {\n source_version: SourceVersion::default(),\n processing_sem: Arc::new(Semaphore::new(1)),\n touched_generation: 0,\n }\n }\n}\n\nstruct SourceIndexingState {\n rows: HashMap,\n scan_generation: usize,\n}\n\npub struct SourceIndexingContext {\n flow: Arc,\n source_idx: usize,\n pending_update: Mutex>>>>,\n update_sem: Semaphore,\n state: Mutex,\n setup_execution_ctx: Arc,\n}\n\npub const NO_ACK: Option Ready>> = None;\n\nimpl SourceIndexingContext {\n pub async fn load(\n flow: Arc,\n source_idx: usize,\n setup_execution_ctx: Arc,\n pool: &PgPool,\n ) -> Result {\n let plan = flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[source_idx];\n let mut list_state = db_tracking::ListTrackedSourceKeyMetadataState::new();\n let mut rows = HashMap::new();\n let scan_generation = 0;\n {\n let mut key_metadata_stream = list_state.list(\n setup_execution_ctx.import_ops[source_idx].source_id,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n );\n while let Some(key_metadata) = key_metadata_stream.next().await {\n let key_metadata = key_metadata?;\n let source_key = value::Value::::from_json(\n key_metadata.source_key,\n &import_op.primary_key_type,\n )?\n .into_key()?;\n rows.insert(\n source_key,\n SourceRowIndexingState {\n source_version: SourceVersion::from_stored(\n key_metadata.processed_source_ordinal,\n &key_metadata.process_logic_fingerprint,\n plan.logic_fingerprint,\n ),\n processing_sem: Arc::new(Semaphore::new(1)),\n touched_generation: scan_generation,\n },\n );\n }\n }\n Ok(Self {\n flow,\n source_idx,\n state: Mutex::new(SourceIndexingState {\n rows,\n scan_generation,\n }),\n pending_update: Mutex::new(None),\n update_sem: Semaphore::new(1),\n setup_execution_ctx,\n })\n }\n\n pub async fn process_source_key<\n AckFut: Future> + Send + 'static,\n AckFn: FnOnce() -> AckFut,\n >(\n self: Arc,\n key: value::KeyValue,\n source_data: Option,\n update_stats: Arc,\n _concur_permit: concur_control::CombinedConcurrencyControllerPermit,\n ack_fn: Option,\n pool: PgPool,\n ) {\n let process = async {\n let plan = self.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[self.source_idx];\n let schema = &self.flow.data_schema;\n let source_data = match source_data {\n Some(source_data) => source_data,\n None => import_op\n .executor\n .get_value(\n &key,\n &interface::SourceExecutorGetOptions {\n include_value: true,\n include_ordinal: true,\n },\n )\n .await?\n .try_into()?,\n };\n\n let source_version = SourceVersion::from_current_data(&source_data);\n let processing_sem = {\n let mut state = self.state.lock().unwrap();\n let touched_generation = state.scan_generation;\n match state.rows.entry(key.clone()) {\n hash_map::Entry::Occupied(mut entry) => {\n if entry\n .get()\n .source_version\n .should_skip(&source_version, Some(update_stats.as_ref()))\n {\n return anyhow::Ok(());\n }\n let sem = entry.get().processing_sem.clone();\n if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n entry.remove();\n } else {\n entry.get_mut().source_version = source_version.clone();\n }\n sem\n }\n hash_map::Entry::Vacant(entry) => {\n if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n update_stats.num_no_change.inc(1);\n return anyhow::Ok(());\n }\n let new_entry = SourceRowIndexingState {\n source_version: source_version.clone(),\n touched_generation,\n ..Default::default()\n };\n let sem = new_entry.processing_sem.clone();\n entry.insert(new_entry);\n sem\n }\n }\n };\n\n let _processing_permit = processing_sem.acquire().await?;\n let result = row_indexer::update_source_row(\n &SourceRowEvaluationContext {\n plan: &plan,\n import_op,\n schema,\n key: &key,\n import_op_idx: self.source_idx,\n },\n &self.setup_execution_ctx,\n source_data.value,\n &source_version,\n &pool,\n &update_stats,\n )\n .await?;\n let target_source_version = if let SkippedOr::Skipped(existing_source_version) = result\n {\n Some(existing_source_version)\n } else if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n Some(source_version)\n } else {\n None\n };\n if let Some(target_source_version) = target_source_version {\n let mut state = self.state.lock().unwrap();\n let scan_generation = state.scan_generation;\n let entry = state.rows.entry(key.clone());\n match entry {\n hash_map::Entry::Occupied(mut entry) => {\n if !entry\n .get()\n .source_version\n .should_skip(&target_source_version, None)\n {\n if target_source_version.kind\n == row_indexer::SourceVersionKind::NonExistence\n {\n entry.remove();\n } else {\n let mut_entry = entry.get_mut();\n mut_entry.source_version = target_source_version;\n mut_entry.touched_generation = scan_generation;\n }\n }\n }\n hash_map::Entry::Vacant(entry) => {\n if target_source_version.kind\n != row_indexer::SourceVersionKind::NonExistence\n {\n entry.insert(SourceRowIndexingState {\n source_version: target_source_version,\n touched_generation: scan_generation,\n ..Default::default()\n });\n }\n }\n }\n }\n anyhow::Ok(())\n };\n let process_and_ack = async {\n process.await?;\n if let Some(ack_fn) = ack_fn {\n ack_fn().await?;\n }\n anyhow::Ok(())\n };\n if let Err(e) = process_and_ack.await {\n update_stats.num_errors.inc(1);\n error!(\n \"{:?}\",\n e.context(format!(\n \"Error in processing row from source `{source}` with key: {key}\",\n source = self.flow.flow_instance.import_ops[self.source_idx].name\n ))\n );\n }\n }\n\n pub async fn update(\n self: &Arc,\n pool: &PgPool,\n update_stats: &Arc,\n ) -> Result<()> {\n let pending_update_fut = {\n let mut pending_update = self.pending_update.lock().unwrap();\n if let Some(pending_update_fut) = &*pending_update {\n pending_update_fut.clone()\n } else {\n let slf = self.clone();\n let pool = pool.clone();\n let update_stats = update_stats.clone();\n let task = tokio::spawn(async move {\n {\n let _permit = slf.update_sem.acquire().await?;\n {\n let mut pending_update = slf.pending_update.lock().unwrap();\n *pending_update = None;\n }\n slf.update_once(&pool, &update_stats).await?;\n }\n anyhow::Ok(())\n });\n let pending_update_fut = async move {\n task.await\n .map_err(SharedError::from)?\n .map_err(SharedError::new)\n }\n .boxed()\n .shared();\n *pending_update = Some(pending_update_fut.clone());\n pending_update_fut\n }\n };\n pending_update_fut.await.std_result()?;\n Ok(())\n }\n\n async fn update_once(\n self: &Arc,\n pool: &PgPool,\n update_stats: &Arc,\n ) -> Result<()> {\n let plan = self.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[self.source_idx];\n let mut rows_stream = import_op\n .executor\n .list(&interface::SourceExecutorListOptions {\n include_ordinal: true,\n });\n let mut join_set = JoinSet::new();\n let scan_generation = {\n let mut state = self.state.lock().unwrap();\n state.scan_generation += 1;\n state.scan_generation\n };\n while let Some(row) = rows_stream.next().await {\n for row in row? {\n let source_version = SourceVersion::from_current_with_ordinal(\n row.ordinal\n .ok_or_else(|| anyhow::anyhow!(\"ordinal is not available\"))?,\n );\n {\n let mut state = self.state.lock().unwrap();\n let scan_generation = state.scan_generation;\n let row_state = state.rows.entry(row.key.clone()).or_default();\n row_state.touched_generation = scan_generation;\n if row_state\n .source_version\n .should_skip(&source_version, Some(update_stats.as_ref()))\n {\n continue;\n }\n }\n let concur_permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n join_set.spawn(self.clone().process_source_key(\n row.key,\n None,\n update_stats.clone(),\n concur_permit,\n NO_ACK,\n pool.clone(),\n ));\n }\n }\n while let Some(result) = join_set.join_next().await {\n if let Err(e) = result {\n if !e.is_cancelled() {\n error!(\"{e:?}\");\n }\n }\n }\n\n let deleted_key_versions = {\n let mut deleted_key_versions = Vec::new();\n let state = self.state.lock().unwrap();\n for (key, row_state) in state.rows.iter() {\n if row_state.touched_generation < scan_generation {\n deleted_key_versions.push((key.clone(), row_state.source_version.ordinal));\n }\n }\n deleted_key_versions\n };\n for (key, source_ordinal) in deleted_key_versions {\n // If the source ordinal is unavailable, call without source ordinal so that another polling will be triggered to avoid out-of-order.\n let source_data = source_ordinal\n .is_available()\n .then(|| interface::SourceData {\n value: interface::SourceValue::NonExistence,\n ordinal: source_ordinal,\n });\n let concur_permit = import_op.concurrency_controller.acquire(Some(|| 0)).await?;\n join_set.spawn(self.clone().process_source_key(\n key,\n source_data,\n update_stats.clone(),\n concur_permit,\n NO_ACK,\n pool.clone(),\n ));\n }\n while let Some(result) = join_set.join_next().await {\n if let Err(e) = result {\n if !e.is_cancelled() {\n error!(\"{e:?}\");\n }\n }\n }\n\n Ok(())\n }\n}\n"], ["/cocoindex/src/execution/dumper.rs", "use crate::prelude::*;\n\nuse futures::{StreamExt, future::try_join_all};\nuse itertools::Itertools;\nuse serde::ser::SerializeSeq;\nuse sqlx::PgPool;\nuse std::path::{Path, PathBuf};\nuse yaml_rust2::YamlEmitter;\n\nuse super::evaluator::SourceRowEvaluationContext;\nuse super::memoization::EvaluationMemoryOptions;\nuse super::row_indexer;\nuse crate::base::{schema, value};\nuse crate::builder::plan::{AnalyzedImportOp, ExecutionPlan};\nuse crate::ops::interface::SourceExecutorListOptions;\nuse crate::utils::yaml_ser::YamlSerializer;\n\n#[derive(Debug, Clone, Deserialize)]\npub struct EvaluateAndDumpOptions {\n pub output_dir: String,\n pub use_cache: bool,\n}\n\nconst FILENAME_PREFIX_MAX_LENGTH: usize = 128;\n\nstruct TargetExportData<'a> {\n schema: &'a Vec,\n // The purpose is to make rows sorted by primary key.\n data: BTreeMap,\n}\n\nimpl Serialize for TargetExportData<'_> {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let mut seq = serializer.serialize_seq(Some(self.data.len()))?;\n for (_, values) in self.data.iter() {\n seq.serialize_element(&value::TypedFieldsValue {\n schema: self.schema,\n values_iter: values.fields.iter(),\n })?;\n }\n seq.end()\n }\n}\n\n#[derive(Serialize)]\nstruct SourceOutputData<'a> {\n key: value::TypedValue<'a>,\n\n #[serde(skip_serializing_if = \"Option::is_none\")]\n exports: Option>>,\n\n #[serde(skip_serializing_if = \"Option::is_none\")]\n error: Option,\n}\n\nstruct Dumper<'a> {\n plan: &'a ExecutionPlan,\n setup_execution_ctx: &'a exec_ctx::FlowSetupExecutionContext,\n schema: &'a schema::FlowSchema,\n pool: &'a PgPool,\n options: EvaluateAndDumpOptions,\n}\n\nimpl<'a> Dumper<'a> {\n async fn evaluate_source_entry<'b>(\n &'a self,\n import_op_idx: usize,\n import_op: &'a AnalyzedImportOp,\n key: &value::KeyValue,\n collected_values_buffer: &'b mut Vec>,\n ) -> Result>>>\n where\n 'a: 'b,\n {\n let data_builder = row_indexer::evaluate_source_entry_with_memory(\n &SourceRowEvaluationContext {\n plan: self.plan,\n import_op,\n schema: self.schema,\n key,\n import_op_idx,\n },\n self.setup_execution_ctx,\n EvaluationMemoryOptions {\n enable_cache: self.options.use_cache,\n evaluation_only: true,\n },\n self.pool,\n )\n .await?;\n\n let data_builder = if let Some(data_builder) = data_builder {\n data_builder\n } else {\n return Ok(None);\n };\n\n *collected_values_buffer = data_builder.collected_values;\n let exports = self\n .plan\n .export_ops\n .iter()\n .map(|export_op| -> Result<_> {\n let collector_idx = export_op.input.collector_idx as usize;\n let entry = (\n export_op.name.as_str(),\n TargetExportData {\n schema: &self.schema.root_op_scope.collectors[collector_idx]\n .spec\n .fields,\n data: collected_values_buffer[collector_idx]\n .iter()\n .map(|v| -> Result<_> {\n let key = row_indexer::extract_primary_key(\n &export_op.primary_key_def,\n v,\n )?;\n Ok((key, v))\n })\n .collect::>()?,\n },\n );\n Ok(entry)\n })\n .collect::>()?;\n Ok(Some(exports))\n }\n\n async fn evaluate_and_dump_source_entry(\n &self,\n import_op_idx: usize,\n import_op: &AnalyzedImportOp,\n key: value::KeyValue,\n file_path: PathBuf,\n ) -> Result<()> {\n let _permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n let mut collected_values_buffer = Vec::new();\n let (exports, error) = match self\n .evaluate_source_entry(import_op_idx, import_op, &key, &mut collected_values_buffer)\n .await\n {\n Ok(exports) => (exports, None),\n Err(e) => (None, Some(format!(\"{e:?}\"))),\n };\n let key_value = value::Value::from(key);\n let file_data = SourceOutputData {\n key: value::TypedValue {\n t: &import_op.primary_key_type,\n v: &key_value,\n },\n exports,\n error,\n };\n\n let yaml_output = {\n let mut yaml_output = String::new();\n let yaml_data = YamlSerializer::serialize(&file_data)?;\n let mut yaml_emitter = YamlEmitter::new(&mut yaml_output);\n yaml_emitter.multiline_strings(true);\n yaml_emitter.compact(true);\n yaml_emitter.dump(&yaml_data)?;\n yaml_output\n };\n tokio::fs::write(file_path, yaml_output).await?;\n\n Ok(())\n }\n\n async fn evaluate_and_dump_for_source(\n &self,\n import_op_idx: usize,\n import_op: &AnalyzedImportOp,\n ) -> Result<()> {\n let mut keys_by_filename_prefix: IndexMap> = IndexMap::new();\n\n let mut rows_stream = import_op.executor.list(&SourceExecutorListOptions {\n include_ordinal: false,\n });\n while let Some(rows) = rows_stream.next().await {\n for row in rows?.into_iter() {\n let mut s = row\n .key\n .to_strs()\n .into_iter()\n .map(|s| urlencoding::encode(&s).into_owned())\n .join(\":\");\n s.truncate(\n (0..(FILENAME_PREFIX_MAX_LENGTH - import_op.name.as_str().len()))\n .rev()\n .find(|i| s.is_char_boundary(*i))\n .unwrap_or(0),\n );\n keys_by_filename_prefix.entry(s).or_default().push(row.key);\n }\n }\n let output_dir = Path::new(&self.options.output_dir);\n let evaluate_futs =\n keys_by_filename_prefix\n .into_iter()\n .flat_map(|(filename_prefix, keys)| {\n let num_keys = keys.len();\n keys.into_iter().enumerate().map(move |(i, key)| {\n let extra_id = if num_keys > 1 {\n Cow::Owned(format!(\".{i}\"))\n } else {\n Cow::Borrowed(\"\")\n };\n let file_name =\n format!(\"{}@{}{}.yaml\", import_op.name, filename_prefix, extra_id);\n let file_path = output_dir.join(Path::new(&file_name));\n self.evaluate_and_dump_source_entry(\n import_op_idx,\n import_op,\n key,\n file_path,\n )\n })\n });\n try_join_all(evaluate_futs).await?;\n Ok(())\n }\n\n async fn evaluate_and_dump(&self) -> Result<()> {\n try_join_all(\n self.plan\n .import_ops\n .iter()\n .enumerate()\n .map(|(idx, import_op)| self.evaluate_and_dump_for_source(idx, import_op)),\n )\n .await?;\n Ok(())\n }\n}\n\npub async fn evaluate_and_dump(\n plan: &ExecutionPlan,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n schema: &schema::FlowSchema,\n options: EvaluateAndDumpOptions,\n pool: &PgPool,\n) -> Result<()> {\n let output_dir = Path::new(&options.output_dir);\n if output_dir.exists() {\n if !output_dir.is_dir() {\n return Err(anyhow::anyhow!(\"The path exists and is not a directory\"));\n }\n } else {\n tokio::fs::create_dir(output_dir).await?;\n }\n\n let dumper = Dumper {\n plan,\n setup_execution_ctx,\n schema,\n pool,\n options,\n };\n dumper.evaluate_and_dump().await\n}\n"], ["/cocoindex/src/setup/driver.rs", "use crate::{\n lib_context::{FlowContext, FlowExecutionContext, LibSetupContext},\n ops::{\n get_optional_executor_factory,\n interface::{ExportTargetFactory, FlowInstanceContext},\n },\n prelude::*,\n};\n\nuse sqlx::PgPool;\nuse std::{\n fmt::{Debug, Display},\n str::FromStr,\n};\n\nuse super::{AllSetupStates, GlobalSetupStatus};\nuse super::{\n CombinedState, DesiredMode, ExistingMode, FlowSetupState, FlowSetupStatus, ObjectSetupStatus,\n ObjectStatus, ResourceIdentifier, ResourceSetupInfo, ResourceSetupStatus, SetupChangeType,\n StateChange, TargetSetupState, db_metadata,\n};\nuse crate::execution::db_tracking_setup;\nuse crate::ops::interface::ExecutorFactory;\nuse std::fmt::Write;\n\nenum MetadataRecordType {\n FlowVersion,\n FlowMetadata,\n TrackingTable,\n Target(String),\n}\n\nimpl Display for MetadataRecordType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n MetadataRecordType::FlowVersion => f.write_str(db_metadata::FLOW_VERSION_RESOURCE_TYPE),\n MetadataRecordType::FlowMetadata => write!(f, \"FlowMetadata\"),\n MetadataRecordType::TrackingTable => write!(f, \"TrackingTable\"),\n MetadataRecordType::Target(target_id) => write!(f, \"Target:{target_id}\"),\n }\n }\n}\n\nimpl std::str::FromStr for MetadataRecordType {\n type Err = anyhow::Error;\n\n fn from_str(s: &str) -> Result {\n if s == db_metadata::FLOW_VERSION_RESOURCE_TYPE {\n Ok(Self::FlowVersion)\n } else if s == \"FlowMetadata\" {\n Ok(Self::FlowMetadata)\n } else if s == \"TrackingTable\" {\n Ok(Self::TrackingTable)\n } else if let Some(target_id) = s.strip_prefix(\"Target:\") {\n Ok(Self::Target(target_id.to_string()))\n } else {\n anyhow::bail!(\"Invalid MetadataRecordType string: {}\", s)\n }\n }\n}\n\nfn from_metadata_record(\n state: Option,\n staging_changes: sqlx::types::Json>>,\n legacy_state_key: Option,\n) -> Result> {\n let current: Option = state.map(serde_json::from_value).transpose()?;\n let staging: Vec> = (staging_changes.0.into_iter())\n .map(|sc| -> Result<_> {\n Ok(match sc {\n StateChange::Upsert(v) => StateChange::Upsert(serde_json::from_value(v)?),\n StateChange::Delete => StateChange::Delete,\n })\n })\n .collect::>()?;\n Ok(CombinedState {\n current,\n staging,\n legacy_state_key,\n })\n}\n\nfn get_export_target_factory(\n target_type: &str,\n) -> Option> {\n match get_optional_executor_factory(target_type) {\n Some(ExecutorFactory::ExportTarget(factory)) => Some(factory),\n _ => None,\n }\n}\n\npub async fn get_existing_setup_state(pool: &PgPool) -> Result> {\n let setup_metadata_records = db_metadata::read_setup_metadata(pool).await?;\n\n let setup_metadata_records = if let Some(records) = setup_metadata_records {\n records\n } else {\n return Ok(AllSetupStates::default());\n };\n\n // Group setup metadata records by flow name\n let setup_metadata_records = setup_metadata_records.into_iter().fold(\n BTreeMap::>::new(),\n |mut acc, record| {\n acc.entry(record.flow_name.clone())\n .or_default()\n .push(record);\n acc\n },\n );\n\n let flows = setup_metadata_records\n .into_iter()\n .map(|(flow_name, metadata_records)| -> anyhow::Result<_> {\n let mut flow_ss = FlowSetupState::default();\n for metadata_record in metadata_records {\n let state = metadata_record.state;\n let staging_changes = metadata_record.staging_changes;\n match MetadataRecordType::from_str(&metadata_record.resource_type)? {\n MetadataRecordType::FlowVersion => {\n flow_ss.seen_flow_metadata_version =\n db_metadata::parse_flow_version(&state);\n }\n MetadataRecordType::FlowMetadata => {\n flow_ss.metadata = from_metadata_record(state, staging_changes, None)?;\n }\n MetadataRecordType::TrackingTable => {\n flow_ss.tracking_table =\n from_metadata_record(state, staging_changes, None)?;\n }\n MetadataRecordType::Target(target_type) => {\n let normalized_key = {\n if let Some(factory) = get_export_target_factory(&target_type) {\n factory.normalize_setup_key(&metadata_record.key)?\n } else {\n metadata_record.key.clone()\n }\n };\n let combined_state = from_metadata_record(\n state,\n staging_changes,\n (normalized_key != metadata_record.key).then_some(metadata_record.key),\n )?;\n flow_ss.targets.insert(\n super::ResourceIdentifier {\n key: normalized_key,\n target_kind: target_type,\n },\n combined_state,\n );\n }\n }\n }\n Ok((flow_name, flow_ss))\n })\n .collect::>()?;\n\n Ok(AllSetupStates {\n has_metadata_table: true,\n flows,\n })\n}\n\nfn diff_state(\n existing_state: Option<&E>,\n desired_state: Option<&D>,\n diff: impl Fn(Option<&E>, &D) -> Option>,\n) -> Option>\nwhere\n E: PartialEq,\n{\n match (existing_state, desired_state) {\n (None, None) => None,\n (Some(_), None) => Some(StateChange::Delete),\n (existing_state, Some(desired_state)) => {\n if existing_state.map(|e| e == desired_state).unwrap_or(false) {\n None\n } else {\n diff(existing_state, desired_state)\n }\n }\n }\n}\n\nfn to_object_status(existing: Option, desired: Option) -> Option {\n Some(match (&existing, &desired) {\n (Some(_), None) => ObjectStatus::Deleted,\n (None, Some(_)) => ObjectStatus::New,\n (Some(_), Some(_)) => ObjectStatus::Existing,\n (None, None) => return None,\n })\n}\n\n#[derive(Debug, Default)]\nstruct GroupedResourceStates {\n desired: Option,\n existing: CombinedState,\n}\n\nfn group_resource_states<'a>(\n desired: impl Iterator,\n existing: impl Iterator)>,\n) -> Result> {\n let mut grouped: IndexMap<&'a ResourceIdentifier, GroupedResourceStates> = desired\n .into_iter()\n .map(|(key, state)| {\n (\n key,\n GroupedResourceStates {\n desired: Some(state.clone()),\n existing: CombinedState::default(),\n },\n )\n })\n .collect();\n for (key, state) in existing {\n let entry = grouped.entry(key);\n if state.current.is_some() {\n if let indexmap::map::Entry::Occupied(entry) = &entry {\n if entry.get().existing.current.is_some() {\n bail!(\"Duplicate existing state for key: {}\", entry.key());\n }\n }\n }\n let entry = entry.or_default();\n if let Some(current) = &state.current {\n entry.existing.current = Some(current.clone());\n }\n if let Some(legacy_state_key) = &state.legacy_state_key {\n if entry\n .existing\n .legacy_state_key\n .as_ref()\n .is_some_and(|v| v != legacy_state_key)\n {\n warn!(\n \"inconsistent legacy key: {:?}, {:?}\",\n key, entry.existing.legacy_state_key\n );\n }\n entry.existing.legacy_state_key = Some(legacy_state_key.clone());\n }\n for s in state.staging.iter() {\n match s {\n StateChange::Upsert(v) => {\n entry.existing.staging.push(StateChange::Upsert(v.clone()))\n }\n StateChange::Delete => entry.existing.staging.push(StateChange::Delete),\n }\n }\n }\n Ok(grouped)\n}\n\npub async fn check_flow_setup_status(\n desired_state: Option<&FlowSetupState>,\n existing_state: Option<&FlowSetupState>,\n flow_instance_ctx: &Arc,\n) -> Result {\n let metadata_change = diff_state(\n existing_state.map(|e| &e.metadata),\n desired_state.map(|d| &d.metadata),\n |_, desired_state| Some(StateChange::Upsert(desired_state.clone())),\n );\n\n let new_source_ids = desired_state\n .iter()\n .flat_map(|d| d.metadata.sources.values().map(|v| v.source_id))\n .collect::>();\n let tracking_table_change = db_tracking_setup::TrackingTableSetupStatus::new(\n desired_state.map(|d| &d.tracking_table),\n &existing_state\n .map(|e| Cow::Borrowed(&e.tracking_table))\n .unwrap_or_default(),\n (existing_state.iter())\n .flat_map(|state| state.metadata.possible_versions())\n .flat_map(|metadata| {\n metadata\n .sources\n .values()\n .map(|v| v.source_id)\n .filter(|id| !new_source_ids.contains(id))\n })\n .collect::>()\n .into_iter()\n .collect(),\n );\n\n let mut target_resources = Vec::new();\n let mut unknown_resources = Vec::new();\n\n let grouped_target_resources = group_resource_states(\n desired_state.iter().flat_map(|d| d.targets.iter()),\n existing_state.iter().flat_map(|e| e.targets.iter()),\n )?;\n for (resource_id, v) in grouped_target_resources.into_iter() {\n let factory = match get_export_target_factory(&resource_id.target_kind) {\n Some(factory) => factory,\n None => {\n unknown_resources.push(resource_id.clone());\n continue;\n }\n };\n let state = v.desired.clone();\n let target_state = v\n .desired\n .and_then(|state| (!state.common.setup_by_user).then_some(state.state));\n let existing_without_setup_by_user = CombinedState {\n current: v\n .existing\n .current\n .and_then(|s| s.state_unless_setup_by_user()),\n staging: v\n .existing\n .staging\n .into_iter()\n .filter_map(|s| match s {\n StateChange::Upsert(s) => {\n s.state_unless_setup_by_user().map(StateChange::Upsert)\n }\n StateChange::Delete => Some(StateChange::Delete),\n })\n .collect(),\n legacy_state_key: v.existing.legacy_state_key.clone(),\n };\n let never_setup_by_sys = target_state.is_none()\n && existing_without_setup_by_user.current.is_none()\n && existing_without_setup_by_user.staging.is_empty();\n let setup_status = if never_setup_by_sys {\n None\n } else {\n Some(\n factory\n .check_setup_status(\n &resource_id.key,\n target_state,\n existing_without_setup_by_user,\n flow_instance_ctx.clone(),\n )\n .await?,\n )\n };\n target_resources.push(ResourceSetupInfo {\n key: resource_id.clone(),\n state,\n description: factory.describe_resource(&resource_id.key)?,\n setup_status,\n legacy_key: v\n .existing\n .legacy_state_key\n .map(|legacy_state_key| ResourceIdentifier {\n target_kind: resource_id.target_kind.clone(),\n key: legacy_state_key,\n }),\n });\n }\n Ok(FlowSetupStatus {\n status: to_object_status(existing_state, desired_state),\n seen_flow_metadata_version: existing_state.and_then(|s| s.seen_flow_metadata_version),\n metadata_change,\n tracking_table: tracking_table_change.map(|c| c.into_setup_info()),\n target_resources,\n unknown_resources,\n })\n}\n\nstruct ResourceSetupChangeItem<'a, K: 'a, C: ResourceSetupStatus> {\n key: &'a K,\n setup_status: &'a C,\n}\n\nasync fn maybe_update_resource_setup<\n 'a,\n K: 'a,\n S: 'a,\n C: ResourceSetupStatus,\n ChangeApplierResultFut: Future>,\n>(\n resource_kind: &str,\n write: &mut (dyn std::io::Write + Send),\n resources: impl Iterator>,\n apply_change: impl FnOnce(Vec>) -> ChangeApplierResultFut,\n) -> Result<()> {\n let mut changes = Vec::new();\n for resource in resources {\n if let Some(setup_status) = &resource.setup_status {\n if setup_status.change_type() != SetupChangeType::NoChange {\n changes.push(ResourceSetupChangeItem {\n key: &resource.key,\n setup_status,\n });\n writeln!(write, \"{}:\", resource.description)?;\n for change in setup_status.describe_changes() {\n match change {\n setup::ChangeDescription::Action(action) => {\n writeln!(write, \" - {action}\")?;\n }\n setup::ChangeDescription::Note(_) => {}\n }\n }\n }\n }\n }\n if !changes.is_empty() {\n write!(write, \"Pushing change for {resource_kind}...\")?;\n apply_change(changes).await?;\n writeln!(write, \"DONE\")?;\n }\n Ok(())\n}\n\nasync fn apply_changes_for_flow(\n write: &mut (dyn std::io::Write + Send),\n flow_ctx: &FlowContext,\n flow_status: &FlowSetupStatus,\n existing_setup_state: &mut Option>,\n pool: &PgPool,\n) -> Result<()> {\n let Some(status) = flow_status.status else {\n return Ok(());\n };\n let verb = match status {\n ObjectStatus::New => \"Creating\",\n ObjectStatus::Deleted => \"Deleting\",\n ObjectStatus::Existing => \"Updating resources for \",\n _ => bail!(\"invalid flow status\"),\n };\n write!(write, \"\\n{verb} flow {}:\\n\", flow_ctx.flow_name())?;\n\n let mut update_info =\n HashMap::::new();\n\n if let Some(metadata_change) = &flow_status.metadata_change {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::FlowMetadata.to_string(),\n serde_json::Value::Null,\n ),\n db_metadata::StateUpdateInfo::new(metadata_change.desired_state(), None)?,\n );\n }\n if let Some(tracking_table) = &flow_status.tracking_table {\n if tracking_table\n .setup_status\n .as_ref()\n .map(|c| c.change_type() != SetupChangeType::NoChange)\n .unwrap_or_default()\n {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::TrackingTable.to_string(),\n serde_json::Value::Null,\n ),\n db_metadata::StateUpdateInfo::new(tracking_table.state.as_ref(), None)?,\n );\n }\n }\n\n for target_resource in &flow_status.target_resources {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::Target(target_resource.key.target_kind.clone()).to_string(),\n target_resource.key.key.clone(),\n ),\n db_metadata::StateUpdateInfo::new(\n target_resource.state.as_ref(),\n target_resource.legacy_key.as_ref().map(|k| {\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::Target(k.target_kind.clone()).to_string(),\n k.key.clone(),\n )\n }),\n )?,\n );\n }\n\n let new_version_id = db_metadata::stage_changes_for_flow(\n flow_ctx.flow_name(),\n flow_status.seen_flow_metadata_version,\n &update_info,\n pool,\n )\n .await?;\n\n if let Some(tracking_table) = &flow_status.tracking_table {\n maybe_update_resource_setup(\n \"tracking table\",\n write,\n std::iter::once(tracking_table),\n |setup_status| setup_status[0].setup_status.apply_change(),\n )\n .await?;\n }\n\n let mut setup_status_by_target_kind = IndexMap::<&str, Vec<_>>::new();\n for target_resource in &flow_status.target_resources {\n setup_status_by_target_kind\n .entry(target_resource.key.target_kind.as_str())\n .or_default()\n .push(target_resource);\n }\n for (target_kind, resources) in setup_status_by_target_kind.into_iter() {\n maybe_update_resource_setup(\n target_kind,\n write,\n resources.into_iter(),\n |setup_status| async move {\n let factory = get_export_target_factory(target_kind).ok_or_else(|| {\n anyhow::anyhow!(\"No factory found for target kind: {}\", target_kind)\n })?;\n factory\n .apply_setup_changes(\n setup_status\n .into_iter()\n .map(|s| interface::ResourceSetupChangeItem {\n key: &s.key.key,\n setup_status: s.setup_status.as_ref(),\n })\n .collect(),\n flow_ctx.flow.flow_instance_ctx.clone(),\n )\n .await?;\n Ok(())\n },\n )\n .await?;\n }\n\n let is_deletion = status == ObjectStatus::Deleted;\n db_metadata::commit_changes_for_flow(\n flow_ctx.flow_name(),\n new_version_id,\n &update_info,\n is_deletion,\n pool,\n )\n .await?;\n if is_deletion {\n *existing_setup_state = None;\n } else {\n let (existing_metadata, existing_tracking_table, existing_targets) =\n match std::mem::take(existing_setup_state) {\n Some(s) => (Some(s.metadata), Some(s.tracking_table), s.targets),\n None => Default::default(),\n };\n let metadata = CombinedState::from_change(\n existing_metadata,\n flow_status\n .metadata_change\n .as_ref()\n .map(|v| v.desired_state()),\n );\n let tracking_table = CombinedState::from_change(\n existing_tracking_table,\n flow_status.tracking_table.as_ref().map(|c| {\n c.setup_status\n .as_ref()\n .and_then(|c| c.desired_state.as_ref())\n }),\n );\n let mut targets = existing_targets;\n for target_resource in &flow_status.target_resources {\n match &target_resource.state {\n Some(state) => {\n targets.insert(\n target_resource.key.clone(),\n CombinedState::from_desired(state.clone()),\n );\n }\n None => {\n targets.shift_remove(&target_resource.key);\n }\n }\n }\n *existing_setup_state = Some(setup::FlowSetupState {\n metadata,\n tracking_table,\n seen_flow_metadata_version: Some(new_version_id),\n targets,\n });\n }\n\n writeln!(write, \"Done for flow {}\", flow_ctx.flow_name())?;\n Ok(())\n}\n\nasync fn apply_global_changes(\n write: &mut (dyn std::io::Write + Send),\n setup_status: &GlobalSetupStatus,\n all_setup_states: &mut AllSetupStates,\n) -> Result<()> {\n maybe_update_resource_setup(\n \"metadata table\",\n write,\n std::iter::once(&setup_status.metadata_table),\n |setup_status| setup_status[0].setup_status.apply_change(),\n )\n .await?;\n\n if setup_status\n .metadata_table\n .setup_status\n .as_ref()\n .is_some_and(|c| c.change_type() == SetupChangeType::Create)\n {\n all_setup_states.has_metadata_table = true;\n }\n\n Ok(())\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum FlowSetupChangeAction {\n Setup,\n Drop,\n}\npub struct SetupChangeBundle {\n pub action: FlowSetupChangeAction,\n pub flow_names: Vec,\n}\n\nimpl SetupChangeBundle {\n async fn get_flow_setup_status<'a>(\n setup_ctx: &LibSetupContext,\n flow_ctx: &'a FlowContext,\n flow_exec_ctx: &'a FlowExecutionContext,\n action: &FlowSetupChangeAction,\n buffer: &'a mut Option,\n ) -> Result<&'a FlowSetupStatus> {\n let result = match action {\n FlowSetupChangeAction::Setup => &flow_exec_ctx.setup_status,\n FlowSetupChangeAction::Drop => {\n let existing_state = setup_ctx.all_setup_states.flows.get(flow_ctx.flow_name());\n buffer.insert(\n check_flow_setup_status(None, existing_state, &flow_ctx.flow.flow_instance_ctx)\n .await?,\n )\n }\n };\n Ok(result)\n }\n\n pub async fn describe(&self, lib_context: &LibContext) -> Result<(String, bool)> {\n let mut text = String::new();\n let mut is_up_to_date = true;\n\n let setup_ctx = lib_context\n .require_persistence_ctx()?\n .setup_ctx\n .read()\n .await;\n let setup_ctx = &*setup_ctx;\n\n if self.action == FlowSetupChangeAction::Setup {\n is_up_to_date = is_up_to_date && setup_ctx.global_setup_status.is_up_to_date();\n write!(&mut text, \"{}\", setup_ctx.global_setup_status)?;\n }\n\n for flow_name in &self.flow_names {\n let flow_ctx = {\n let flows = lib_context.flows.lock().unwrap();\n flows\n .get(flow_name)\n .ok_or_else(|| anyhow::anyhow!(\"Flow instance not found: {flow_name}\"))?\n .clone()\n };\n let flow_exec_ctx = flow_ctx.get_execution_ctx_for_setup().read().await;\n\n let mut setup_status_buffer = None;\n let setup_status = Self::get_flow_setup_status(\n setup_ctx,\n &flow_ctx,\n &flow_exec_ctx,\n &self.action,\n &mut setup_status_buffer,\n )\n .await?;\n\n is_up_to_date = is_up_to_date && setup_status.is_up_to_date();\n write!(\n &mut text,\n \"{}\",\n setup::FormattedFlowSetupStatus(flow_name, setup_status)\n )?;\n }\n Ok((text, is_up_to_date))\n }\n\n pub async fn apply(\n &self,\n lib_context: &LibContext,\n write: &mut (dyn std::io::Write + Send),\n ) -> Result<()> {\n let persistence_ctx = lib_context.require_persistence_ctx()?;\n let mut setup_ctx = persistence_ctx.setup_ctx.write().await;\n let setup_ctx = &mut *setup_ctx;\n\n if self.action == FlowSetupChangeAction::Setup\n && !setup_ctx.global_setup_status.is_up_to_date()\n {\n apply_global_changes(\n write,\n &setup_ctx.global_setup_status,\n &mut setup_ctx.all_setup_states,\n )\n .await?;\n setup_ctx.global_setup_status =\n GlobalSetupStatus::from_setup_states(&setup_ctx.all_setup_states);\n }\n\n for flow_name in &self.flow_names {\n let flow_ctx = {\n let flows = lib_context.flows.lock().unwrap();\n flows\n .get(flow_name)\n .ok_or_else(|| anyhow::anyhow!(\"Flow instance not found: {flow_name}\"))?\n .clone()\n };\n let mut flow_exec_ctx = flow_ctx.get_execution_ctx_for_setup().write().await;\n\n let mut setup_status_buffer = None;\n let setup_status = Self::get_flow_setup_status(\n setup_ctx,\n &flow_ctx,\n &flow_exec_ctx,\n &self.action,\n &mut setup_status_buffer,\n )\n .await?;\n if setup_status.is_up_to_date() {\n continue;\n }\n\n let mut flow_states = setup_ctx.all_setup_states.flows.remove(flow_name);\n apply_changes_for_flow(\n write,\n &flow_ctx,\n setup_status,\n &mut flow_states,\n &persistence_ctx.builtin_db_pool,\n )\n .await?;\n\n flow_exec_ctx\n .update_setup_state(&flow_ctx.flow, flow_states.as_ref())\n .await?;\n if let Some(flow_states) = flow_states {\n setup_ctx\n .all_setup_states\n .flows\n .insert(flow_name.to_string(), flow_states);\n }\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/targets/neo4j.rs", "use crate::prelude::*;\n\nuse super::shared::property_graph::*;\n\nuse crate::setup::components::{self, State, apply_component_changes};\nuse crate::setup::{ResourceSetupStatus, SetupChangeType};\nuse crate::{ops::sdk::*, setup::CombinedState};\n\nuse indoc::formatdoc;\nuse neo4rs::{BoltType, ConfigBuilder, Graph};\nuse std::fmt::Write;\nuse tokio::sync::OnceCell;\n\nconst DEFAULT_DB: &str = \"neo4j\";\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n uri: String,\n user: String,\n password: String,\n db: Option,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n connection: spec::AuthEntryReference,\n mapping: GraphElementMapping,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Declaration {\n connection: spec::AuthEntryReference,\n #[serde(flatten)]\n decl: GraphDeclaration,\n}\n\ntype Neo4jGraphElement = GraphElementType;\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct GraphKey {\n uri: String,\n db: String,\n}\n\nimpl GraphKey {\n fn from_spec(spec: &ConnectionSpec) -> Self {\n Self {\n uri: spec.uri.clone(),\n db: spec.db.clone().unwrap_or_else(|| DEFAULT_DB.to_string()),\n }\n }\n}\n\nimpl retryable::IsRetryable for neo4rs::Error {\n fn is_retryable(&self) -> bool {\n match self {\n neo4rs::Error::ConnectionError => true,\n neo4rs::Error::Neo4j(e) => e.kind() == neo4rs::Neo4jErrorKind::Transient,\n _ => false,\n }\n }\n}\n\n#[derive(Default)]\npub struct GraphPool {\n graphs: Mutex>>>>,\n}\n\nimpl GraphPool {\n async fn get_graph(&self, spec: &ConnectionSpec) -> Result> {\n let graph_key = GraphKey::from_spec(spec);\n let cell = {\n let mut graphs = self.graphs.lock().unwrap();\n graphs.entry(graph_key).or_default().clone()\n };\n let graph = cell\n .get_or_try_init(|| async {\n let mut config_builder = ConfigBuilder::default()\n .uri(spec.uri.clone())\n .user(spec.user.clone())\n .password(spec.password.clone());\n if let Some(db) = &spec.db {\n config_builder = config_builder.db(db.clone());\n }\n anyhow::Ok(Arc::new(Graph::connect(config_builder.build()?).await?))\n })\n .await?;\n Ok(graph.clone())\n }\n\n async fn get_graph_for_key(\n &self,\n key: &Neo4jGraphElement,\n auth_registry: &AuthRegistry,\n ) -> Result> {\n let spec = auth_registry.get::(&key.connection)?;\n self.get_graph(&spec).await\n }\n}\n\npub struct ExportContext {\n connection_ref: AuthEntryReference,\n graph: Arc,\n\n create_order: u8,\n\n delete_cypher: String,\n insert_cypher: String,\n delete_before_upsert: bool,\n\n analyzed_data_coll: AnalyzedDataCollection,\n\n key_field_params: Vec,\n src_key_field_params: Vec,\n tgt_key_field_params: Vec,\n}\n\nfn json_value_to_bolt_value(value: &serde_json::Value) -> Result {\n let bolt_value = match value {\n serde_json::Value::Null => BoltType::Null(neo4rs::BoltNull),\n serde_json::Value::Bool(v) => BoltType::Boolean(neo4rs::BoltBoolean::new(*v)),\n serde_json::Value::Number(v) => {\n if let Some(i) = v.as_i64() {\n BoltType::Integer(neo4rs::BoltInteger::new(i))\n } else if let Some(f) = v.as_f64() {\n BoltType::Float(neo4rs::BoltFloat::new(f))\n } else {\n anyhow::bail!(\"Unsupported JSON number: {}\", v)\n }\n }\n serde_json::Value::String(v) => BoltType::String(neo4rs::BoltString::new(v)),\n serde_json::Value::Array(v) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(json_value_to_bolt_value)\n .collect::>()?,\n }),\n serde_json::Value::Object(v) => BoltType::Map(neo4rs::BoltMap {\n value: v\n .into_iter()\n .map(|(k, v)| Ok((neo4rs::BoltString::new(k), json_value_to_bolt_value(v)?)))\n .collect::>()?,\n }),\n };\n Ok(bolt_value)\n}\n\nfn key_to_bolt(key: &KeyValue, schema: &schema::ValueType) -> Result {\n value_to_bolt(&key.into(), schema)\n}\n\nfn field_values_to_bolt<'a>(\n field_values: impl IntoIterator,\n schema: impl IntoIterator,\n) -> Result {\n let bolt_value = BoltType::Map(neo4rs::BoltMap {\n value: std::iter::zip(schema, field_values)\n .map(|(schema, value)| {\n Ok((\n neo4rs::BoltString::new(&schema.name),\n value_to_bolt(value, &schema.value_type.typ)?,\n ))\n })\n .collect::>()?,\n });\n Ok(bolt_value)\n}\n\nfn mapped_field_values_to_bolt(\n fields_schema: &[schema::FieldSchema],\n fields_input_idx: &[usize],\n field_values: &FieldValues,\n) -> Result {\n let bolt_value = BoltType::Map(neo4rs::BoltMap {\n value: std::iter::zip(fields_schema.iter(), fields_input_idx.iter())\n .map(|(schema, field_idx)| {\n Ok((\n neo4rs::BoltString::new(&schema.name),\n value_to_bolt(&field_values.fields[*field_idx], &schema.value_type.typ)?,\n ))\n })\n .collect::>()?,\n });\n Ok(bolt_value)\n}\n\nfn basic_value_to_bolt(value: &BasicValue, schema: &BasicValueType) -> Result {\n let bolt_value = match value {\n BasicValue::Bytes(v) => {\n BoltType::Bytes(neo4rs::BoltBytes::new(bytes::Bytes::from_owner(v.clone())))\n }\n BasicValue::Str(v) => BoltType::String(neo4rs::BoltString::new(v)),\n BasicValue::Bool(v) => BoltType::Boolean(neo4rs::BoltBoolean::new(*v)),\n BasicValue::Int64(v) => BoltType::Integer(neo4rs::BoltInteger::new(*v)),\n BasicValue::Float64(v) => BoltType::Float(neo4rs::BoltFloat::new(*v)),\n BasicValue::Float32(v) => BoltType::Float(neo4rs::BoltFloat::new(*v as f64)),\n BasicValue::Range(v) => BoltType::List(neo4rs::BoltList {\n value: [\n BoltType::Integer(neo4rs::BoltInteger::new(v.start as i64)),\n BoltType::Integer(neo4rs::BoltInteger::new(v.end as i64)),\n ]\n .into(),\n }),\n BasicValue::Uuid(v) => BoltType::String(neo4rs::BoltString::new(&v.to_string())),\n BasicValue::Date(v) => BoltType::Date(neo4rs::BoltDate::from(*v)),\n BasicValue::Time(v) => BoltType::LocalTime(neo4rs::BoltLocalTime::from(*v)),\n BasicValue::LocalDateTime(v) => {\n BoltType::LocalDateTime(neo4rs::BoltLocalDateTime::from(*v))\n }\n BasicValue::OffsetDateTime(v) => BoltType::DateTime(neo4rs::BoltDateTime::from(*v)),\n BasicValue::TimeDelta(v) => BoltType::Duration(neo4rs::BoltDuration::new(\n neo4rs::BoltInteger { value: 0 },\n neo4rs::BoltInteger { value: 0 },\n neo4rs::BoltInteger {\n value: v.num_seconds(),\n },\n v.subsec_nanos().into(),\n )),\n BasicValue::Vector(v) => match schema {\n BasicValueType::Vector(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|v| basic_value_to_bolt(v, &t.element_type))\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-vector type got vector value: {}\", schema),\n },\n BasicValue::Json(v) => json_value_to_bolt_value(v)?,\n BasicValue::UnionVariant { tag_id, value } => match schema {\n BasicValueType::Union(s) => {\n let typ = s\n .types\n .get(*tag_id)\n .ok_or_else(|| anyhow::anyhow!(\"Invalid `tag_id`: {}\", tag_id))?;\n\n basic_value_to_bolt(value, typ)?\n }\n _ => anyhow::bail!(\"Non-union type got union value: {}\", schema),\n },\n };\n Ok(bolt_value)\n}\n\nfn value_to_bolt(value: &Value, schema: &schema::ValueType) -> Result {\n let bolt_value = match value {\n Value::Null => BoltType::Null(neo4rs::BoltNull),\n Value::Basic(v) => match schema {\n ValueType::Basic(t) => basic_value_to_bolt(v, t)?,\n _ => anyhow::bail!(\"Non-basic type got basic value: {}\", schema),\n },\n Value::Struct(v) => match schema {\n ValueType::Struct(t) => field_values_to_bolt(v.fields.iter(), t.fields.iter())?,\n _ => anyhow::bail!(\"Non-struct type got struct value: {}\", schema),\n },\n Value::UTable(v) | Value::LTable(v) => match schema {\n ValueType::Table(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|v| field_values_to_bolt(v.0.fields.iter(), t.row.fields.iter()))\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-table type got table value: {}\", schema),\n },\n Value::KTable(v) => match schema {\n ValueType::Table(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|(k, v)| {\n field_values_to_bolt(\n std::iter::once(&Into::::into(k.clone()))\n .chain(v.0.fields.iter()),\n t.row.fields.iter(),\n )\n })\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-table type got table value: {}\", schema),\n },\n };\n Ok(bolt_value)\n}\n\nconst CORE_KEY_PARAM_PREFIX: &str = \"key\";\nconst CORE_PROPS_PARAM: &str = \"props\";\nconst SRC_KEY_PARAM_PREFIX: &str = \"source_key\";\nconst SRC_PROPS_PARAM: &str = \"source_props\";\nconst TGT_KEY_PARAM_PREFIX: &str = \"target_key\";\nconst TGT_PROPS_PARAM: &str = \"target_props\";\nconst CORE_ELEMENT_MATCHER_VAR: &str = \"e\";\nconst SELF_CONTAINED_TAG_FIELD_NAME: &str = \"__self_contained\";\n\nimpl ExportContext {\n fn build_key_field_params_n_literal<'a>(\n param_prefix: &str,\n key_fields: impl Iterator,\n ) -> (Vec, String) {\n let (params, items): (Vec, Vec) = key_fields\n .into_iter()\n .enumerate()\n .map(|(i, name)| {\n let param = format!(\"{param_prefix}_{i}\");\n let item = format!(\"{name}: ${param}\");\n (param, item)\n })\n .unzip();\n (params, format!(\"{{{}}}\", items.into_iter().join(\", \")))\n }\n\n fn new(\n graph: Arc,\n spec: Spec,\n analyzed_data_coll: AnalyzedDataCollection,\n ) -> Result {\n let (key_field_params, key_fields_literal) = Self::build_key_field_params_n_literal(\n CORE_KEY_PARAM_PREFIX,\n analyzed_data_coll.schema.key_fields.iter().map(|f| &f.name),\n );\n let result = match spec.mapping {\n GraphElementMapping::Node(node_spec) => {\n let delete_cypher = formatdoc! {\"\n OPTIONAL MATCH (old_node:{label} {key_fields_literal})\n WITH old_node\n SET old_node.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\n WITH old_node\n WHERE NOT (old_node)--()\n DELETE old_node\n FINISH\n \",\n label = node_spec.label,\n };\n\n let insert_cypher = formatdoc! {\"\n MERGE (new_node:{label} {key_fields_literal})\n SET new_node.{SELF_CONTAINED_TAG_FIELD_NAME} = TRUE{optional_set_props}\n FINISH\n \",\n label = node_spec.label,\n optional_set_props = if !analyzed_data_coll.value_fields_input_idx.is_empty() {\n format!(\", new_node += ${CORE_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n };\n\n Self {\n connection_ref: spec.connection,\n graph,\n create_order: 0,\n delete_cypher,\n insert_cypher,\n delete_before_upsert: false,\n analyzed_data_coll,\n key_field_params,\n src_key_field_params: vec![],\n tgt_key_field_params: vec![],\n }\n }\n GraphElementMapping::Relationship(rel_spec) => {\n let delete_cypher = formatdoc! {\"\n OPTIONAL MATCH (old_src)-[old_rel:{rel_type} {key_fields_literal}]->(old_tgt)\n\n DELETE old_rel\n\n WITH collect(old_src) + collect(old_tgt) AS nodes_to_check\n UNWIND nodes_to_check AS node\n WITH DISTINCT node\n WHERE NOT COALESCE(node.{SELF_CONTAINED_TAG_FIELD_NAME}, FALSE)\n AND COUNT{{ (node)--() }} = 0\n DELETE node\n\n FINISH\n \",\n rel_type = rel_spec.rel_type,\n };\n\n let analyzed_rel = analyzed_data_coll\n .rel\n .as_ref()\n .ok_or_else(invariance_violation)?;\n let analyzed_src = &analyzed_rel.source;\n let analyzed_tgt = &analyzed_rel.target;\n\n let (src_key_field_params, src_key_fields_literal) =\n Self::build_key_field_params_n_literal(\n SRC_KEY_PARAM_PREFIX,\n analyzed_src.schema.key_fields.iter().map(|f| &f.name),\n );\n let (tgt_key_field_params, tgt_key_fields_literal) =\n Self::build_key_field_params_n_literal(\n TGT_KEY_PARAM_PREFIX,\n analyzed_tgt.schema.key_fields.iter().map(|f| &f.name),\n );\n\n let insert_cypher = formatdoc! {\"\n MERGE (new_src:{src_node_label} {src_key_fields_literal})\n {optional_set_src_props}\n\n MERGE (new_tgt:{tgt_node_label} {tgt_key_fields_literal})\n {optional_set_tgt_props}\n\n MERGE (new_src)-[new_rel:{rel_type} {key_fields_literal}]->(new_tgt)\n {optional_set_rel_props}\n\n FINISH\n \",\n src_node_label = rel_spec.source.label,\n optional_set_src_props = if analyzed_src.has_value_fields() {\n format!(\"SET new_src += ${SRC_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n tgt_node_label = rel_spec.target.label,\n optional_set_tgt_props = if analyzed_tgt.has_value_fields() {\n format!(\"SET new_tgt += ${TGT_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n rel_type = rel_spec.rel_type,\n optional_set_rel_props = if !analyzed_data_coll.value_fields_input_idx.is_empty() {\n format!(\"SET new_rel += ${CORE_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n };\n Self {\n connection_ref: spec.connection,\n graph,\n create_order: 1,\n delete_cypher,\n insert_cypher,\n delete_before_upsert: true,\n analyzed_data_coll,\n key_field_params,\n src_key_field_params,\n tgt_key_field_params,\n }\n }\n };\n Ok(result)\n }\n\n fn bind_key_field_params<'a>(\n query: neo4rs::Query,\n params: &[String],\n type_val: impl Iterator,\n ) -> Result {\n let mut query = query;\n for (i, (typ, val)) in type_val.enumerate() {\n query = query.param(¶ms[i], value_to_bolt(val, typ)?);\n }\n Ok(query)\n }\n\n fn bind_rel_key_field_params(\n &self,\n query: neo4rs::Query,\n val: &KeyValue,\n ) -> Result {\n let mut query = query;\n for (i, val) in val\n .fields_iter(self.analyzed_data_coll.schema.key_fields.len())?\n .enumerate()\n {\n query = query.param(\n &self.key_field_params[i],\n key_to_bolt(\n val,\n &self.analyzed_data_coll.schema.key_fields[i].value_type.typ,\n )?,\n );\n }\n Ok(query)\n }\n\n fn add_upsert_queries(\n &self,\n upsert: &ExportTargetUpsertEntry,\n queries: &mut Vec,\n ) -> Result<()> {\n if self.delete_before_upsert {\n queries.push(\n self.bind_rel_key_field_params(neo4rs::query(&self.delete_cypher), &upsert.key)?,\n );\n }\n\n let value = &upsert.value;\n let mut query =\n self.bind_rel_key_field_params(neo4rs::query(&self.insert_cypher), &upsert.key)?;\n\n if let Some(analyzed_rel) = &self.analyzed_data_coll.rel {\n let bind_params = |query: neo4rs::Query,\n analyzed: &AnalyzedGraphElementFieldMapping,\n key_field_params: &[String]|\n -> Result {\n let mut query = Self::bind_key_field_params(\n query,\n key_field_params,\n std::iter::zip(\n analyzed.schema.key_fields.iter(),\n analyzed.fields_input_idx.key.iter(),\n )\n .map(|(f, field_idx)| (&f.value_type.typ, &value.fields[*field_idx])),\n )?;\n if analyzed.has_value_fields() {\n query = query.param(\n SRC_PROPS_PARAM,\n mapped_field_values_to_bolt(\n &analyzed.schema.value_fields,\n &analyzed.fields_input_idx.value,\n value,\n )?,\n );\n }\n Ok(query)\n };\n query = bind_params(query, &analyzed_rel.source, &self.src_key_field_params)?;\n query = bind_params(query, &analyzed_rel.target, &self.tgt_key_field_params)?;\n }\n\n if !self.analyzed_data_coll.value_fields_input_idx.is_empty() {\n query = query.param(\n CORE_PROPS_PARAM,\n mapped_field_values_to_bolt(\n &self.analyzed_data_coll.schema.value_fields,\n &self.analyzed_data_coll.value_fields_input_idx,\n value,\n )?,\n );\n }\n queries.push(query);\n Ok(())\n }\n\n fn add_delete_queries(\n &self,\n delete_key: &value::KeyValue,\n queries: &mut Vec,\n ) -> Result<()> {\n queries\n .push(self.bind_rel_key_field_params(neo4rs::query(&self.delete_cypher), delete_key)?);\n Ok(())\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct SetupState {\n key_field_names: Vec,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n dependent_node_labels: Vec,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n sub_components: Vec,\n}\n\nimpl SetupState {\n fn new(\n schema: &GraphElementSchema,\n index_options: &IndexOptions,\n dependent_node_labels: Vec,\n ) -> Result {\n let key_field_names: Vec =\n schema.key_fields.iter().map(|f| f.name.clone()).collect();\n let mut sub_components = vec![];\n sub_components.push(ComponentState {\n object_label: schema.elem_type.clone(),\n index_def: IndexDef::KeyConstraint {\n field_names: key_field_names.clone(),\n },\n });\n let value_field_types = schema\n .value_fields\n .iter()\n .map(|f| (f.name.as_str(), &f.value_type.typ))\n .collect::>();\n for index_def in index_options.vector_indexes.iter() {\n sub_components.push(ComponentState {\n object_label: schema.elem_type.clone(),\n index_def: IndexDef::from_vector_index_def(\n index_def,\n value_field_types\n .get(index_def.field_name.as_str())\n .ok_or_else(|| {\n api_error!(\n \"Unknown field name for vector index: {}\",\n index_def.field_name\n )\n })?,\n )?,\n });\n }\n Ok(Self {\n key_field_names,\n dependent_node_labels,\n sub_components,\n })\n }\n\n fn check_compatible(&self, existing: &Self) -> SetupStateCompatibility {\n if self.key_field_names == existing.key_field_names {\n SetupStateCompatibility::Compatible\n } else {\n SetupStateCompatibility::NotCompatible\n }\n }\n}\n\nimpl IntoIterator for SetupState {\n type Item = ComponentState;\n type IntoIter = std::vec::IntoIter;\n\n fn into_iter(self) -> Self::IntoIter {\n self.sub_components.into_iter()\n }\n}\n#[derive(Debug, Default)]\nstruct DataClearAction {\n dependent_node_labels: Vec,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\nenum ComponentKind {\n KeyConstraint,\n VectorIndex,\n}\n\nimpl ComponentKind {\n fn describe(&self) -> &str {\n match self {\n ComponentKind::KeyConstraint => \"KEY CONSTRAINT\",\n ComponentKind::VectorIndex => \"VECTOR INDEX\",\n }\n }\n}\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct ComponentKey {\n kind: ComponentKind,\n name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\nenum IndexDef {\n KeyConstraint {\n field_names: Vec,\n },\n VectorIndex {\n field_name: String,\n metric: spec::VectorSimilarityMetric,\n vector_size: usize,\n },\n}\n\nimpl IndexDef {\n fn from_vector_index_def(\n index_def: &spec::VectorIndexDef,\n field_typ: &schema::ValueType,\n ) -> Result {\n Ok(Self::VectorIndex {\n field_name: index_def.field_name.clone(),\n vector_size: (match field_typ {\n schema::ValueType::Basic(schema::BasicValueType::Vector(schema)) => {\n schema.dimension\n }\n _ => None,\n })\n .ok_or_else(|| {\n api_error!(\"Vector index field must be a vector with fixed dimension\")\n })?,\n metric: index_def.metric,\n })\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\npub struct ComponentState {\n object_label: ElementType,\n index_def: IndexDef,\n}\n\nimpl components::State for ComponentState {\n fn key(&self) -> ComponentKey {\n let prefix = match &self.object_label {\n ElementType::Relationship(_) => \"r\",\n ElementType::Node(_) => \"n\",\n };\n let label = self.object_label.label();\n match &self.index_def {\n IndexDef::KeyConstraint { .. } => ComponentKey {\n kind: ComponentKind::KeyConstraint,\n name: format!(\"{prefix}__{label}__key\"),\n },\n IndexDef::VectorIndex {\n field_name, metric, ..\n } => ComponentKey {\n kind: ComponentKind::VectorIndex,\n name: format!(\"{prefix}__{label}__{field_name}__{metric}__vidx\"),\n },\n }\n }\n}\n\npub struct SetupComponentOperator {\n graph_pool: Arc,\n conn_spec: ConnectionSpec,\n}\n\n#[async_trait]\nimpl components::SetupOperator for SetupComponentOperator {\n type Key = ComponentKey;\n type State = ComponentState;\n type SetupState = SetupState;\n type Context = ();\n\n fn describe_key(&self, key: &Self::Key) -> String {\n format!(\"{} {}\", key.kind.describe(), key.name)\n }\n\n fn describe_state(&self, state: &Self::State) -> String {\n let key_desc = self.describe_key(&state.key());\n let label = state.object_label.label();\n match &state.index_def {\n IndexDef::KeyConstraint { field_names } => {\n format!(\"{key_desc} ON {label} (key: {})\", field_names.join(\", \"))\n }\n IndexDef::VectorIndex {\n field_name,\n metric,\n vector_size,\n } => {\n format!(\n \"{key_desc} ON {label} (field_name: {field_name}, vector_size: {vector_size}, metric: {metric})\",\n )\n }\n }\n }\n\n fn is_up_to_date(&self, current: &ComponentState, desired: &ComponentState) -> bool {\n current == desired\n }\n\n async fn create(&self, state: &ComponentState, _context: &Self::Context) -> Result<()> {\n let graph = self.graph_pool.get_graph(&self.conn_spec).await?;\n let key = state.key();\n let qualifier = CORE_ELEMENT_MATCHER_VAR;\n let matcher = state.object_label.matcher(qualifier);\n let query = neo4rs::query(&match &state.index_def {\n IndexDef::KeyConstraint { field_names } => {\n let key_type = match &state.object_label {\n ElementType::Node(_) => \"NODE\",\n ElementType::Relationship(_) => \"RELATIONSHIP\",\n };\n format!(\n \"CREATE CONSTRAINT {name} IF NOT EXISTS FOR {matcher} REQUIRE {field_names} IS {key_type} KEY\",\n name = key.name,\n field_names = build_composite_field_names(qualifier, field_names),\n )\n }\n IndexDef::VectorIndex {\n field_name,\n metric,\n vector_size,\n } => {\n formatdoc! {\"\n CREATE VECTOR INDEX {name} IF NOT EXISTS\n FOR {matcher} ON {qualifier}.{field_name}\n OPTIONS {{\n indexConfig: {{\n `vector.dimensions`: {vector_size},\n `vector.similarity_function`: '{metric}'\n }}\n }}\",\n name = key.name,\n }\n }\n });\n Ok(graph.run(query).await?)\n }\n\n async fn delete(&self, key: &ComponentKey, _context: &Self::Context) -> Result<()> {\n let graph = self.graph_pool.get_graph(&self.conn_spec).await?;\n let query = neo4rs::query(&format!(\n \"DROP {kind} {name} IF EXISTS\",\n kind = match key.kind {\n ComponentKind::KeyConstraint => \"CONSTRAINT\",\n ComponentKind::VectorIndex => \"INDEX\",\n },\n name = key.name,\n ));\n Ok(graph.run(query).await?)\n }\n}\n\nfn build_composite_field_names(qualifier: &str, field_names: &[String]) -> String {\n let strs = field_names\n .iter()\n .map(|name| format!(\"{qualifier}.{name}\"))\n .join(\", \");\n if field_names.len() == 1 {\n strs\n } else {\n format!(\"({strs})\")\n }\n}\n#[derive(Debug)]\npub struct GraphElementDataSetupStatus {\n data_clear: Option,\n change_type: SetupChangeType,\n}\n\nimpl GraphElementDataSetupStatus {\n fn new(desired_state: Option<&SetupState>, existing: &CombinedState) -> Self {\n let mut data_clear: Option = None;\n for v in existing.possible_versions() {\n if desired_state.as_ref().is_none_or(|desired| {\n desired.check_compatible(v) == SetupStateCompatibility::NotCompatible\n }) {\n data_clear\n .get_or_insert_default()\n .dependent_node_labels\n .extend(v.dependent_node_labels.iter().cloned());\n }\n }\n\n let change_type = match (desired_state, existing.possible_versions().next()) {\n (Some(_), Some(_)) => {\n if data_clear.is_none() {\n SetupChangeType::NoChange\n } else {\n SetupChangeType::Update\n }\n }\n (Some(_), None) => SetupChangeType::Create,\n (None, Some(_)) => SetupChangeType::Delete,\n (None, None) => SetupChangeType::NoChange,\n };\n\n Self {\n data_clear,\n change_type,\n }\n }\n}\n\nimpl ResourceSetupStatus for GraphElementDataSetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n if let Some(data_clear) = &self.data_clear {\n let mut desc = \"Clear data\".to_string();\n if !data_clear.dependent_node_labels.is_empty() {\n write!(\n &mut desc,\n \"; dependents {}\",\n data_clear\n .dependent_node_labels\n .iter()\n .map(|l| format!(\"{}\", ElementType::Node(l.clone())))\n .join(\", \")\n )\n .unwrap();\n }\n result.push(setup::ChangeDescription::Action(desc));\n }\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.change_type\n }\n}\n\nasync fn clear_graph_element_data(\n graph: &Graph,\n key: &Neo4jGraphElement,\n is_self_contained: bool,\n) -> Result<()> {\n let var_name = CORE_ELEMENT_MATCHER_VAR;\n let matcher = key.typ.matcher(var_name);\n let query_string = match key.typ {\n ElementType::Node(_) => {\n let optional_reset_self_contained = if is_self_contained {\n formatdoc! {\"\n WITH {var_name}\n SET {var_name}.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\n \"}\n } else {\n \"\".to_string()\n };\n formatdoc! {\"\n CALL {{\n MATCH {matcher}\n {optional_reset_self_contained}\n WITH {var_name} WHERE NOT ({var_name})--() DELETE {var_name}\n }} IN TRANSACTIONS\n \"}\n }\n ElementType::Relationship(_) => {\n formatdoc! {\"\n CALL {{\n MATCH {matcher} WITH {var_name} DELETE {var_name}\n }} IN TRANSACTIONS\n \"}\n }\n };\n let delete_query = neo4rs::query(&query_string);\n graph.run(delete_query).await?;\n Ok(())\n}\n\n/// Factory for Neo4j relationships\npub struct Factory {\n graph_pool: Arc,\n}\n\nimpl Factory {\n pub fn new() -> Self {\n Self {\n graph_pool: Arc::default(),\n }\n }\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = Declaration;\n type SetupState = SetupState;\n type SetupStatus = (\n GraphElementDataSetupStatus,\n components::SetupStatus,\n );\n type Key = Neo4jGraphElement;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Neo4j\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(Neo4jGraphElement, SetupState)>,\n )> {\n let (analyzed_data_colls, declared_graph_elements) = analyze_graph_mappings(\n data_collections\n .iter()\n .map(|d| DataCollectionGraphMappingInput {\n auth_ref: &d.spec.connection,\n mapping: &d.spec.mapping,\n index_options: &d.index_options,\n key_fields_schema: d.key_fields_schema.clone(),\n value_fields_schema: d.value_fields_schema.clone(),\n }),\n declarations.iter().map(|d| (&d.connection, &d.decl)),\n )?;\n let data_coll_output = std::iter::zip(data_collections, analyzed_data_colls)\n .map(|(data_coll, analyzed)| {\n let setup_key = Neo4jGraphElement {\n connection: data_coll.spec.connection.clone(),\n typ: analyzed.schema.elem_type.clone(),\n };\n let desired_setup_state = SetupState::new(\n &analyzed.schema,\n &data_coll.index_options,\n analyzed\n .dependent_node_labels()\n .into_iter()\n .map(|s| s.to_string())\n .collect(),\n )?;\n\n let conn_spec = context\n .auth_registry\n .get::(&data_coll.spec.connection)?;\n let factory = self.clone();\n let export_context = async move {\n Ok(Arc::new(ExportContext::new(\n factory.graph_pool.get_graph(&conn_spec).await?,\n data_coll.spec,\n analyzed,\n )?))\n }\n .boxed();\n\n Ok(TypedExportDataCollectionBuildOutput {\n export_context,\n setup_key,\n desired_setup_state,\n })\n })\n .collect::>>()?;\n let decl_output = std::iter::zip(declarations, declared_graph_elements)\n .map(|(decl, graph_elem_schema)| {\n let setup_state =\n SetupState::new(&graph_elem_schema, &decl.decl.index_options, vec![])?;\n let setup_key = GraphElementType {\n connection: decl.connection,\n typ: graph_elem_schema.elem_type.clone(),\n };\n Ok((setup_key, setup_state))\n })\n .collect::>>()?;\n Ok((data_coll_output, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n key: Neo4jGraphElement,\n desired: Option,\n existing: CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result {\n let conn_spec = flow_instance_ctx\n .auth_registry\n .get::(&key.connection)?;\n let data_status = GraphElementDataSetupStatus::new(desired.as_ref(), &existing);\n let components = components::SetupStatus::create(\n SetupComponentOperator {\n graph_pool: self.graph_pool.clone(),\n conn_spec,\n },\n desired,\n existing,\n )?;\n Ok((data_status, components))\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(desired.check_compatible(existing))\n }\n\n fn describe_resource(&self, key: &Neo4jGraphElement) -> Result {\n Ok(format!(\"Neo4j {}\", key.typ))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut muts_by_graph = HashMap::new();\n for mut_with_ctx in mutations.iter() {\n muts_by_graph\n .entry(&mut_with_ctx.export_context.connection_ref)\n .or_insert_with(Vec::new)\n .push(mut_with_ctx);\n }\n let retry_options = retryable::RetryOptions::default();\n for muts in muts_by_graph.values_mut() {\n muts.sort_by_key(|m| m.export_context.create_order);\n let graph = &muts[0].export_context.graph;\n retryable::run(\n async || {\n let mut queries = vec![];\n for mut_with_ctx in muts.iter() {\n let export_ctx = &mut_with_ctx.export_context;\n for upsert in mut_with_ctx.mutation.upserts.iter() {\n export_ctx.add_upsert_queries(upsert, &mut queries)?;\n }\n }\n for mut_with_ctx in muts.iter().rev() {\n let export_ctx = &mut_with_ctx.export_context;\n for deletion in mut_with_ctx.mutation.deletes.iter() {\n export_ctx.add_delete_queries(&deletion.key, &mut queries)?;\n }\n }\n let mut txn = graph.start_txn().await?;\n txn.run_queries(queries).await?;\n txn.commit().await?;\n retryable::Ok(())\n },\n &retry_options,\n )\n .await\n .map_err(Into::::into)?\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n // Relationships first, then nodes, as relationships need to be deleted before nodes they referenced.\n let mut relationship_types = IndexSet::<&Neo4jGraphElement>::new();\n let mut node_labels = IndexSet::<&Neo4jGraphElement>::new();\n let mut dependent_node_labels = IndexSet::::new();\n\n let mut components = vec![];\n for change in changes.iter() {\n if let Some(data_clear) = &change.setup_status.0.data_clear {\n match &change.key.typ {\n ElementType::Relationship(_) => {\n relationship_types.insert(&change.key);\n for label in &data_clear.dependent_node_labels {\n dependent_node_labels.insert(Neo4jGraphElement {\n connection: change.key.connection.clone(),\n typ: ElementType::Node(label.clone()),\n });\n }\n }\n ElementType::Node(_) => {\n node_labels.insert(&change.key);\n }\n }\n }\n components.push(&change.setup_status.1);\n }\n\n // Relationships have no dependency, so can be cleared first.\n for rel_type in relationship_types.into_iter() {\n let graph = self\n .graph_pool\n .get_graph_for_key(rel_type, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, rel_type, true).await?;\n }\n // Clear standalone nodes, which is simpler than dependent nodes.\n for node_label in node_labels.iter() {\n let graph = self\n .graph_pool\n .get_graph_for_key(node_label, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, node_label, true).await?;\n }\n // Clear dependent nodes if they're not covered by standalone nodes.\n for node_label in dependent_node_labels.iter() {\n if !node_labels.contains(node_label) {\n let graph = self\n .graph_pool\n .get_graph_for_key(node_label, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, node_label, false).await?;\n }\n }\n\n apply_component_changes(components, &()).await?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/execution/row_indexer.rs", "use crate::prelude::*;\n\nuse futures::future::try_join_all;\nuse sqlx::PgPool;\nuse std::collections::{HashMap, HashSet};\n\nuse super::db_tracking::{self, TrackedTargetKeyInfo, read_source_tracking_info_for_processing};\nuse super::db_tracking_setup;\nuse super::evaluator::{\n EvaluateSourceEntryOutput, SourceRowEvaluationContext, evaluate_source_entry,\n};\nuse super::memoization::{EvaluationMemory, EvaluationMemoryOptions, StoredMemoizationInfo};\nuse super::stats;\n\nuse crate::base::value::{self, FieldValues, KeyValue};\nuse crate::builder::plan::*;\nuse crate::ops::interface::{\n ExportTargetMutation, ExportTargetUpsertEntry, Ordinal, SourceExecutorGetOptions,\n};\nuse crate::utils::db::WriteAction;\nuse crate::utils::fingerprint::{Fingerprint, Fingerprinter};\n\npub fn extract_primary_key(\n primary_key_def: &AnalyzedPrimaryKeyDef,\n record: &FieldValues,\n) -> Result {\n match primary_key_def {\n AnalyzedPrimaryKeyDef::Fields(fields) => {\n KeyValue::from_values(fields.iter().map(|field| &record.fields[*field]))\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]\npub enum SourceVersionKind {\n #[default]\n UnknownLogic,\n DifferentLogic,\n CurrentLogic,\n NonExistence,\n}\n\n#[derive(Debug, Clone, Default)]\npub struct SourceVersion {\n pub ordinal: Ordinal,\n pub kind: SourceVersionKind,\n}\n\nimpl SourceVersion {\n pub fn from_stored(\n stored_ordinal: Option,\n stored_fp: &Option>,\n curr_fp: Fingerprint,\n ) -> Self {\n Self {\n ordinal: Ordinal(stored_ordinal),\n kind: match &stored_fp {\n Some(stored_fp) => {\n if stored_fp.as_slice() == curr_fp.0.as_slice() {\n SourceVersionKind::CurrentLogic\n } else {\n SourceVersionKind::DifferentLogic\n }\n }\n None => SourceVersionKind::UnknownLogic,\n },\n }\n }\n\n pub fn from_stored_processing_info(\n info: &db_tracking::SourceTrackingInfoForProcessing,\n curr_fp: Fingerprint,\n ) -> Self {\n Self::from_stored(\n info.processed_source_ordinal,\n &info.process_logic_fingerprint,\n curr_fp,\n )\n }\n\n pub fn from_stored_precommit_info(\n info: &db_tracking::SourceTrackingInfoForPrecommit,\n curr_fp: Fingerprint,\n ) -> Self {\n Self::from_stored(\n info.processed_source_ordinal,\n &info.process_logic_fingerprint,\n curr_fp,\n )\n }\n\n pub fn from_current_with_ordinal(ordinal: Ordinal) -> Self {\n Self {\n ordinal,\n kind: SourceVersionKind::CurrentLogic,\n }\n }\n\n pub fn from_current_data(data: &interface::SourceData) -> Self {\n let kind = match &data.value {\n interface::SourceValue::Existence(_) => SourceVersionKind::CurrentLogic,\n interface::SourceValue::NonExistence => SourceVersionKind::NonExistence,\n };\n Self {\n ordinal: data.ordinal,\n kind,\n }\n }\n\n pub fn should_skip(\n &self,\n target: &SourceVersion,\n update_stats: Option<&stats::UpdateStats>,\n ) -> bool {\n // Ordinal indicates monotonic invariance - always respect ordinal order\n // Never process older ordinals to maintain consistency\n let should_skip = match (self.ordinal.0, target.ordinal.0) {\n (Some(existing_ordinal), Some(target_ordinal)) => {\n // Skip if target ordinal is older, or same ordinal with same/older logic version\n existing_ordinal > target_ordinal\n || (existing_ordinal == target_ordinal && self.kind >= target.kind)\n }\n _ => false,\n };\n if should_skip {\n if let Some(update_stats) = update_stats {\n update_stats.num_no_change.inc(1);\n }\n }\n should_skip\n }\n}\n\npub enum SkippedOr {\n Normal(T),\n Skipped(SourceVersion),\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\nstruct TargetKeyPair {\n pub key: serde_json::Value,\n pub additional_key: serde_json::Value,\n}\n\n#[derive(Default)]\nstruct TrackingInfoForTarget<'a> {\n export_op: Option<&'a AnalyzedExportOp>,\n\n // Existing keys info. Keyed by target key.\n // Will be removed after new rows for the same key are added into `new_staging_keys_info` and `mutation.upserts`,\n // hence all remaining ones are to be deleted.\n existing_staging_keys_info: HashMap)>>,\n existing_keys_info: HashMap)>>,\n\n // New keys info for staging.\n new_staging_keys_info: Vec,\n\n // Mutation to apply to the target storage.\n mutation: ExportTargetMutation,\n}\n\n#[derive(Debug)]\nstruct PrecommitData<'a> {\n evaluate_output: &'a EvaluateSourceEntryOutput,\n memoization_info: &'a StoredMemoizationInfo,\n}\nstruct PrecommitMetadata {\n source_entry_exists: bool,\n process_ordinal: i64,\n existing_process_ordinal: Option,\n new_target_keys: db_tracking::TrackedTargetKeyForSource,\n}\nstruct PrecommitOutput {\n metadata: PrecommitMetadata,\n target_mutations: HashMap,\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn precommit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n logic_fp: Fingerprint,\n data: Option>,\n process_timestamp: &chrono::DateTime,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n export_ops: &[AnalyzedExportOp],\n export_ops_exec_ctx: &[exec_ctx::ExportOpExecutionContext],\n update_stats: &stats::UpdateStats,\n pool: &PgPool,\n) -> Result> {\n let mut txn = pool.begin().await?;\n\n let tracking_info = db_tracking::read_source_tracking_info_for_precommit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n if let Some(tracking_info) = &tracking_info {\n let existing_source_version =\n SourceVersion::from_stored_precommit_info(tracking_info, logic_fp);\n if existing_source_version.should_skip(source_version, Some(update_stats)) {\n return Ok(SkippedOr::Skipped(existing_source_version));\n }\n }\n let tracking_info_exists = tracking_info.is_some();\n let process_ordinal = (tracking_info\n .as_ref()\n .map(|info| info.max_process_ordinal)\n .unwrap_or(0)\n + 1)\n .max(process_timestamp.timestamp_millis());\n let existing_process_ordinal = tracking_info.as_ref().and_then(|info| info.process_ordinal);\n\n let mut tracking_info_for_targets = HashMap::::new();\n for (export_op, export_op_exec_ctx) in\n std::iter::zip(export_ops.iter(), export_ops_exec_ctx.iter())\n {\n tracking_info_for_targets\n .entry(export_op_exec_ctx.target_id)\n .or_default()\n .export_op = Some(export_op);\n }\n\n // Collect `tracking_info_for_targets` from existing tracking info.\n if let Some(info) = tracking_info {\n let sqlx::types::Json(staging_target_keys) = info.staging_target_keys;\n for (target_id, keys_info) in staging_target_keys.into_iter() {\n let target_info = tracking_info_for_targets.entry(target_id).or_default();\n for key_info in keys_info.into_iter() {\n target_info\n .existing_staging_keys_info\n .entry(TargetKeyPair {\n key: key_info.key,\n additional_key: key_info.additional_key,\n })\n .or_default()\n .push((key_info.process_ordinal, key_info.fingerprint));\n }\n }\n\n if let Some(sqlx::types::Json(target_keys)) = info.target_keys {\n for (target_id, keys_info) in target_keys.into_iter() {\n let target_info = tracking_info_for_targets.entry(target_id).or_default();\n for key_info in keys_info.into_iter() {\n target_info\n .existing_keys_info\n .entry(TargetKeyPair {\n key: key_info.key,\n additional_key: key_info.additional_key,\n })\n .or_default()\n .push((key_info.process_ordinal, key_info.fingerprint));\n }\n }\n }\n }\n\n let mut new_target_keys_info = db_tracking::TrackedTargetKeyForSource::default();\n if let Some(data) = &data {\n for (export_op, export_op_exec_ctx) in\n std::iter::zip(export_ops.iter(), export_ops_exec_ctx.iter())\n {\n let target_info = tracking_info_for_targets\n .entry(export_op_exec_ctx.target_id)\n .or_default();\n let mut keys_info = Vec::new();\n let collected_values =\n &data.evaluate_output.collected_values[export_op.input.collector_idx as usize];\n for value in collected_values.iter() {\n let primary_key = extract_primary_key(&export_op.primary_key_def, value)?;\n let primary_key_json = serde_json::to_value(&primary_key)?;\n\n let mut field_values = FieldValues {\n fields: Vec::with_capacity(export_op.value_fields.len()),\n };\n for field in export_op.value_fields.iter() {\n field_values\n .fields\n .push(value.fields[*field as usize].clone());\n }\n let additional_key = export_op.export_target_factory.extract_additional_key(\n &primary_key,\n &field_values,\n export_op.export_context.as_ref(),\n )?;\n let target_key_pair = TargetKeyPair {\n key: primary_key_json,\n additional_key,\n };\n let existing_target_keys = target_info.existing_keys_info.remove(&target_key_pair);\n let existing_staging_target_keys = target_info\n .existing_staging_keys_info\n .remove(&target_key_pair);\n\n let curr_fp = if !export_op.value_stable {\n Some(\n Fingerprinter::default()\n .with(&field_values)?\n .into_fingerprint(),\n )\n } else {\n None\n };\n if existing_target_keys\n .as_ref()\n .map(|keys| !keys.is_empty() && keys.iter().all(|(_, fp)| fp == &curr_fp))\n .unwrap_or(false)\n && existing_staging_target_keys\n .map(|keys| keys.iter().all(|(_, fp)| fp == &curr_fp))\n .unwrap_or(true)\n {\n // Already exists, with exactly the same value fingerprint.\n // Nothing need to be changed, except carrying over the existing target keys info.\n let (existing_ordinal, existing_fp) = existing_target_keys\n .ok_or_else(invariance_violation)?\n .into_iter()\n .next()\n .ok_or_else(invariance_violation)?;\n keys_info.push(TrackedTargetKeyInfo {\n key: target_key_pair.key,\n additional_key: target_key_pair.additional_key,\n process_ordinal: existing_ordinal,\n fingerprint: existing_fp,\n });\n } else {\n // Entry with new value. Needs to be upserted.\n let tracked_target_key = TrackedTargetKeyInfo {\n key: target_key_pair.key.clone(),\n additional_key: target_key_pair.additional_key.clone(),\n process_ordinal,\n fingerprint: curr_fp,\n };\n target_info.mutation.upserts.push(ExportTargetUpsertEntry {\n key: primary_key,\n additional_key: target_key_pair.additional_key,\n value: field_values,\n });\n target_info\n .new_staging_keys_info\n .push(tracked_target_key.clone());\n keys_info.push(tracked_target_key);\n }\n }\n new_target_keys_info.push((export_op_exec_ctx.target_id, keys_info));\n }\n }\n\n let mut new_staging_target_keys = db_tracking::TrackedTargetKeyForSource::default();\n let mut target_mutations = HashMap::with_capacity(export_ops.len());\n for (target_id, target_tracking_info) in tracking_info_for_targets.into_iter() {\n let legacy_keys: HashSet = target_tracking_info\n .existing_keys_info\n .into_keys()\n .chain(target_tracking_info.existing_staging_keys_info.into_keys())\n .collect();\n\n let mut new_staging_keys_info = target_tracking_info.new_staging_keys_info;\n // Add tracking info for deletions.\n new_staging_keys_info.extend(legacy_keys.iter().map(|key| TrackedTargetKeyInfo {\n key: key.key.clone(),\n additional_key: key.additional_key.clone(),\n process_ordinal,\n fingerprint: None,\n }));\n new_staging_target_keys.push((target_id, new_staging_keys_info));\n\n if let Some(export_op) = target_tracking_info.export_op {\n let mut mutation = target_tracking_info.mutation;\n mutation.deletes.reserve(legacy_keys.len());\n for legacy_key in legacy_keys.into_iter() {\n let key = value::Value::::from_json(\n legacy_key.key,\n &export_op.primary_key_type,\n )?\n .as_key()?;\n mutation.deletes.push(interface::ExportTargetDeleteEntry {\n key,\n additional_key: legacy_key.additional_key,\n });\n }\n target_mutations.insert(target_id, mutation);\n }\n }\n\n db_tracking::precommit_source_tracking_info(\n source_id,\n source_key_json,\n process_ordinal,\n new_staging_target_keys,\n data.as_ref().map(|data| data.memoization_info),\n db_setup,\n &mut *txn,\n if tracking_info_exists {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n )\n .await?;\n\n txn.commit().await?;\n\n Ok(SkippedOr::Normal(PrecommitOutput {\n metadata: PrecommitMetadata {\n source_entry_exists: data.is_some(),\n process_ordinal,\n existing_process_ordinal,\n new_target_keys: new_target_keys_info,\n },\n target_mutations,\n }))\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn commit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n logic_fingerprint: &[u8],\n precommit_metadata: PrecommitMetadata,\n process_timestamp: &chrono::DateTime,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n pool: &PgPool,\n) -> Result<()> {\n let mut txn = pool.begin().await?;\n\n let tracking_info = db_tracking::read_source_tracking_info_for_commit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n let tracking_info_exists = tracking_info.is_some();\n if tracking_info.as_ref().and_then(|info| info.process_ordinal)\n >= Some(precommit_metadata.process_ordinal)\n {\n return Ok(());\n }\n\n let cleaned_staging_target_keys = tracking_info\n .map(|info| {\n let sqlx::types::Json(staging_target_keys) = info.staging_target_keys;\n staging_target_keys\n .into_iter()\n .filter_map(|(target_id, target_keys)| {\n let cleaned_target_keys: Vec<_> = target_keys\n .into_iter()\n .filter(|key_info| {\n Some(key_info.process_ordinal)\n > precommit_metadata.existing_process_ordinal\n && key_info.process_ordinal != precommit_metadata.process_ordinal\n })\n .collect();\n if !cleaned_target_keys.is_empty() {\n Some((target_id, cleaned_target_keys))\n } else {\n None\n }\n })\n .collect::>()\n })\n .unwrap_or_default();\n if !precommit_metadata.source_entry_exists && cleaned_staging_target_keys.is_empty() {\n // TODO: When we support distributed execution in the future, we'll need to leave a tombstone for a while\n // to prevent an earlier update causing the record reappear because of out-of-order processing.\n if tracking_info_exists {\n db_tracking::delete_source_tracking_info(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n }\n } else {\n db_tracking::commit_source_tracking_info(\n source_id,\n source_key_json,\n cleaned_staging_target_keys,\n source_version.ordinal.into(),\n logic_fingerprint,\n precommit_metadata.process_ordinal,\n process_timestamp.timestamp_micros(),\n precommit_metadata.new_target_keys,\n db_setup,\n &mut *txn,\n if tracking_info_exists {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n )\n .await?;\n }\n\n txn.commit().await?;\n\n Ok(())\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn try_content_hash_optimization(\n source_id: i32,\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n current_hash: &crate::utils::fingerprint::Fingerprint,\n tracking_info: &db_tracking::SourceTrackingInfoForProcessing,\n existing_version: &Option,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n update_stats: &stats::UpdateStats,\n pool: &PgPool,\n) -> Result>> {\n // Check if we can use content hash optimization\n if existing_version\n .as_ref()\n .is_none_or(|v| v.kind != SourceVersionKind::CurrentLogic)\n {\n return Ok(None);\n }\n\n if tracking_info\n .max_process_ordinal\n .zip(tracking_info.process_ordinal)\n .is_none_or(|(max_ord, proc_ord)| max_ord != proc_ord)\n {\n return Ok(None);\n }\n\n let existing_hash = tracking_info\n .memoization_info\n .as_ref()\n .and_then(|info| info.0.as_ref())\n .and_then(|stored_info| stored_info.content_hash.as_ref());\n\n if existing_hash != Some(current_hash) {\n return Ok(None);\n }\n\n // Content hash matches - try optimization\n let mut txn = pool.begin().await?;\n\n let current_tracking_info = db_tracking::read_source_tracking_info_for_precommit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n\n let Some(current_tracking_info) = current_tracking_info else {\n return Ok(None);\n };\n\n // Check 1: Same check as precommit - verify no newer version exists\n let current_source_version = SourceVersion::from_stored_precommit_info(\n ¤t_tracking_info,\n src_eval_ctx.plan.logic_fingerprint,\n );\n if current_source_version.should_skip(source_version, Some(update_stats)) {\n return Ok(Some(SkippedOr::Skipped(current_source_version)));\n }\n\n // Check 2: Verify process_ordinal hasn't changed (no concurrent processing)\n let original_process_ordinal = tracking_info.process_ordinal;\n if current_tracking_info.process_ordinal != original_process_ordinal {\n return Ok(None);\n }\n\n // Safe to apply optimization - just update tracking table\n db_tracking::update_source_tracking_ordinal(\n source_id,\n source_key_json,\n source_version.ordinal.0,\n db_setup,\n &mut *txn,\n )\n .await?;\n\n txn.commit().await?;\n update_stats.num_no_change.inc(1);\n Ok(Some(SkippedOr::Normal(())))\n}\n\npub async fn evaluate_source_entry_with_memory(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n options: EvaluationMemoryOptions,\n pool: &PgPool,\n) -> Result> {\n let stored_info = if options.enable_cache || !options.evaluation_only {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let source_id = setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id;\n let existing_tracking_info = read_source_tracking_info_for_processing(\n source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n existing_tracking_info\n .and_then(|info| info.memoization_info.map(|info| info.0))\n .flatten()\n } else {\n None\n };\n let memory = EvaluationMemory::new(chrono::Utc::now(), stored_info, options);\n let source_value = src_eval_ctx\n .import_op\n .executor\n .get_value(\n src_eval_ctx.key,\n &SourceExecutorGetOptions {\n include_value: true,\n include_ordinal: false,\n },\n )\n .await?\n .value\n .ok_or_else(|| anyhow::anyhow!(\"value not returned\"))?;\n let output = match source_value {\n interface::SourceValue::Existence(source_value) => {\n Some(evaluate_source_entry(src_eval_ctx, source_value, &memory).await?)\n }\n interface::SourceValue::NonExistence => None,\n };\n Ok(output)\n}\n\npub async fn update_source_row(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n source_value: interface::SourceValue,\n source_version: &SourceVersion,\n pool: &PgPool,\n update_stats: &stats::UpdateStats,\n) -> Result> {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let process_time = chrono::Utc::now();\n let source_id = setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id;\n\n // Phase 1: Check existing tracking info and apply optimizations\n let existing_tracking_info = read_source_tracking_info_for_processing(\n source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n\n let existing_version = match &existing_tracking_info {\n Some(info) => {\n let existing_version = SourceVersion::from_stored_processing_info(\n info,\n src_eval_ctx.plan.logic_fingerprint,\n );\n\n // First check ordinal-based skipping\n if existing_version.should_skip(source_version, Some(update_stats)) {\n return Ok(SkippedOr::Skipped(existing_version));\n }\n\n Some(existing_version)\n }\n None => None,\n };\n\n // Compute content hash once if needed for both optimization and evaluation\n let current_content_hash = match &source_value {\n interface::SourceValue::Existence(source_value) => Some(\n Fingerprinter::default()\n .with(source_value)?\n .into_fingerprint(),\n ),\n interface::SourceValue::NonExistence => None,\n };\n\n if let (Some(current_hash), Some(existing_tracking_info)) =\n (¤t_content_hash, &existing_tracking_info)\n {\n if let Some(optimization_result) = try_content_hash_optimization(\n source_id,\n src_eval_ctx,\n &source_key_json,\n source_version,\n current_hash,\n existing_tracking_info,\n &existing_version,\n &setup_execution_ctx.setup_state.tracking_table,\n update_stats,\n pool,\n )\n .await?\n {\n return Ok(optimization_result);\n }\n }\n\n let (output, stored_mem_info) = {\n let extracted_memoization_info = existing_tracking_info\n .and_then(|info| info.memoization_info)\n .and_then(|info| info.0);\n\n match source_value {\n interface::SourceValue::Existence(source_value) => {\n let evaluation_memory = EvaluationMemory::new(\n process_time,\n extracted_memoization_info,\n EvaluationMemoryOptions {\n enable_cache: true,\n evaluation_only: false,\n },\n );\n\n let output =\n evaluate_source_entry(src_eval_ctx, source_value, &evaluation_memory).await?;\n let mut stored_info = evaluation_memory.into_stored()?;\n stored_info.content_hash = current_content_hash;\n\n (Some(output), stored_info)\n }\n interface::SourceValue::NonExistence => (None, Default::default()),\n }\n };\n\n // Phase 2 (precommit): Update with the memoization info and stage target keys.\n let precommit_output = precommit_source_tracking_info(\n source_id,\n &source_key_json,\n source_version,\n src_eval_ctx.plan.logic_fingerprint,\n output.as_ref().map(|scope_value| PrecommitData {\n evaluate_output: scope_value,\n memoization_info: &stored_mem_info,\n }),\n &process_time,\n &setup_execution_ctx.setup_state.tracking_table,\n &src_eval_ctx.plan.export_ops,\n &setup_execution_ctx.export_ops,\n update_stats,\n pool,\n )\n .await?;\n let precommit_output = match precommit_output {\n SkippedOr::Normal(output) => output,\n SkippedOr::Skipped(source_version) => return Ok(SkippedOr::Skipped(source_version)),\n };\n\n // Phase 3: Apply changes to the target storage, including upserting new target records and removing existing ones.\n let mut target_mutations = precommit_output.target_mutations;\n let apply_futs = src_eval_ctx\n .plan\n .export_op_groups\n .iter()\n .filter_map(|export_op_group| {\n let mutations_w_ctx: Vec<_> = export_op_group\n .op_idx\n .iter()\n .filter_map(|export_op_idx| {\n let export_op = &src_eval_ctx.plan.export_ops[*export_op_idx];\n target_mutations\n .remove(&setup_execution_ctx.export_ops[*export_op_idx].target_id)\n .filter(|m| !m.is_empty())\n .map(|mutation| interface::ExportTargetMutationWithContext {\n mutation,\n export_context: export_op.export_context.as_ref(),\n })\n })\n .collect();\n (!mutations_w_ctx.is_empty()).then(|| {\n export_op_group\n .target_factory\n .apply_mutation(mutations_w_ctx)\n })\n });\n\n // TODO: Handle errors.\n try_join_all(apply_futs).await?;\n\n // Phase 4: Update the tracking record.\n commit_source_tracking_info(\n source_id,\n &source_key_json,\n source_version,\n &src_eval_ctx.plan.logic_fingerprint.0,\n precommit_output.metadata,\n &process_time,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n\n if let Some(existing_version) = existing_version {\n if output.is_some() {\n if !source_version.ordinal.is_available()\n || source_version.ordinal != existing_version.ordinal\n {\n update_stats.num_updates.inc(1);\n } else {\n update_stats.num_reprocesses.inc(1);\n }\n } else {\n update_stats.num_deletions.inc(1);\n }\n } else if output.is_some() {\n update_stats.num_insertions.inc(1);\n }\n\n Ok(SkippedOr::Normal(()))\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_github_actions_scenario_ordinal_behavior() {\n // Test ordinal-based behavior - should_skip only cares about ordinal monotonic invariance\n // Content hash optimization is handled at update_source_row level\n\n let processed_version = SourceVersion {\n ordinal: Ordinal(Some(1000)), // Original timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // GitHub Actions checkout: timestamp changes but content same\n let after_checkout_version = SourceVersion {\n ordinal: Ordinal(Some(2000)), // New timestamp after checkout\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Should NOT skip at should_skip level (ordinal is newer - monotonic invariance)\n // Content hash optimization happens at update_source_row level to update only tracking\n assert!(!processed_version.should_skip(&after_checkout_version, None));\n\n // Reverse case: if we somehow get an older ordinal, always skip\n assert!(after_checkout_version.should_skip(&processed_version, None));\n\n // Now simulate actual content change\n let content_changed_version = SourceVersion {\n ordinal: Ordinal(Some(3000)), // Even newer timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Should NOT skip processing (ordinal is newer)\n assert!(!processed_version.should_skip(&content_changed_version, None));\n }\n\n #[test]\n fn test_content_hash_computation() {\n use crate::base::value::{BasicValue, FieldValues, Value};\n use crate::utils::fingerprint::Fingerprinter;\n\n // Test that content hash is computed correctly from source data\n let source_data1 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"Hello\".into())),\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let source_data2 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"Hello\".into())),\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let source_data3 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"World\".into())), // Different content\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let hash1 = Fingerprinter::default()\n .with(&source_data1)\n .unwrap()\n .into_fingerprint();\n\n let hash2 = Fingerprinter::default()\n .with(&source_data2)\n .unwrap()\n .into_fingerprint();\n\n let hash3 = Fingerprinter::default()\n .with(&source_data3)\n .unwrap()\n .into_fingerprint();\n\n // Same content should produce same hash\n assert_eq!(hash1, hash2);\n\n // Different content should produce different hash\n assert_ne!(hash1, hash3);\n assert_ne!(hash2, hash3);\n }\n\n #[test]\n fn test_github_actions_content_hash_optimization_requirements() {\n // This test documents the exact requirements for GitHub Actions scenario\n // where file modification times change but content remains the same\n\n use crate::utils::fingerprint::Fingerprinter;\n\n // Simulate file content that remains the same across GitHub Actions checkout\n let file_content = \"const hello = 'world';\\nexport default hello;\";\n\n // Hash before checkout (original file)\n let hash_before_checkout = Fingerprinter::default()\n .with(&file_content)\n .unwrap()\n .into_fingerprint();\n\n // Hash after checkout (same content, different timestamp)\n let hash_after_checkout = Fingerprinter::default()\n .with(&file_content)\n .unwrap()\n .into_fingerprint();\n\n // Content hashes must be identical for optimization to work\n assert_eq!(\n hash_before_checkout, hash_after_checkout,\n \"Content hash optimization requires identical hashes for same content\"\n );\n\n // Test with slightly different content (should produce different hashes)\n let modified_content = \"const hello = 'world!';\\nexport default hello;\"; // Added !\n let hash_modified = Fingerprinter::default()\n .with(&modified_content)\n .unwrap()\n .into_fingerprint();\n\n assert_ne!(\n hash_before_checkout, hash_modified,\n \"Different content should produce different hashes\"\n );\n }\n\n #[test]\n fn test_github_actions_ordinal_behavior_with_content_optimization() {\n // Test the complete GitHub Actions scenario:\n // 1. File processed with ordinal=1000, content_hash=ABC\n // 2. GitHub Actions checkout: ordinal=2000, content_hash=ABC (same content)\n // 3. Should use content hash optimization (update only tracking, skip evaluation)\n\n let original_processing = SourceVersion {\n ordinal: Ordinal(Some(1000)), // Original file timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n let after_github_checkout = SourceVersion {\n ordinal: Ordinal(Some(2000)), // New timestamp after checkout\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Step 1: Ordinal check should NOT skip (newer ordinal means potential processing needed)\n assert!(\n !original_processing.should_skip(&after_github_checkout, None),\n \"GitHub Actions: newer ordinal should not be skipped at ordinal level\"\n );\n\n // Step 2: Content hash optimization should trigger when content is same\n // This is tested in the integration level - the optimization path should:\n // - Compare content hashes\n // - If same: update only tracking info (process_ordinal, process_time)\n // - Skip expensive evaluation and target storage updates\n\n // Step 3: After optimization, tracking shows the new ordinal\n let after_optimization = SourceVersion {\n ordinal: Ordinal(Some(2000)), // Updated to new ordinal\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Future requests with same ordinal should be skipped\n assert!(\n after_optimization.should_skip(&after_github_checkout, None),\n \"After optimization, same ordinal should be skipped\"\n );\n }\n}\n"], ["/cocoindex/src/execution/live_updater.rs", "use crate::{execution::stats::UpdateStats, prelude::*};\n\nuse super::stats;\nuse futures::future::try_join_all;\nuse sqlx::PgPool;\nuse tokio::{sync::watch, task::JoinSet, time::MissedTickBehavior};\n\npub struct FlowLiveUpdaterUpdates {\n pub active_sources: Vec,\n pub updated_sources: Vec,\n}\nstruct FlowLiveUpdaterStatus {\n pub active_source_idx: BTreeSet,\n pub source_updates_num: Vec,\n}\n\nstruct UpdateReceiveState {\n status_rx: watch::Receiver,\n last_num_source_updates: Vec,\n is_done: bool,\n}\n\npub struct FlowLiveUpdater {\n flow_ctx: Arc,\n join_set: Mutex>>>,\n stats_per_task: Vec>,\n recv_state: tokio::sync::Mutex,\n num_remaining_tasks_rx: watch::Receiver,\n\n // Hold tx to avoid dropping the sender.\n _status_tx: watch::Sender,\n _num_remaining_tasks_tx: watch::Sender,\n}\n\n#[derive(Debug, Clone, Default, Serialize, Deserialize)]\npub struct FlowLiveUpdaterOptions {\n /// If true, the updater will keep refreshing the index.\n /// Otherwise, it will only apply changes from the source up to the current time.\n pub live_mode: bool,\n\n /// If true, stats will be printed to the console.\n pub print_stats: bool,\n}\n\nconst REPORT_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10);\n\nstruct SharedAckFn Result<()>> {\n count: usize,\n ack_fn: Option,\n}\n\nimpl Result<()>> SharedAckFn {\n fn new(count: usize, ack_fn: AckAsyncFn) -> Self {\n Self {\n count,\n ack_fn: Some(ack_fn),\n }\n }\n\n async fn ack(v: &Mutex) -> Result<()> {\n let ack_fn = {\n let mut v = v.lock().unwrap();\n v.count -= 1;\n if v.count > 0 { None } else { v.ack_fn.take() }\n };\n if let Some(ack_fn) = ack_fn {\n ack_fn().await?;\n }\n Ok(())\n }\n}\n\nstruct SourceUpdateTask {\n source_idx: usize,\n\n flow: Arc,\n plan: Arc,\n execution_ctx: Arc>,\n source_update_stats: Arc,\n pool: PgPool,\n options: FlowLiveUpdaterOptions,\n\n status_tx: watch::Sender,\n num_remaining_tasks_tx: watch::Sender,\n}\n\nimpl Drop for SourceUpdateTask {\n fn drop(&mut self) {\n self.status_tx.send_modify(|update| {\n update.active_source_idx.remove(&self.source_idx);\n });\n self.num_remaining_tasks_tx.send_modify(|update| {\n *update -= 1;\n });\n }\n}\n\nimpl SourceUpdateTask {\n async fn run(self) -> Result<()> {\n let source_idx = self.source_idx;\n let source_context = self\n .execution_ctx\n .get_source_indexing_context(&self.flow, source_idx, &self.pool)\n .await?;\n\n let import_op = &self.plan.import_ops[source_idx];\n\n let report_stats = |stats: &stats::UpdateStats, kind: &str| {\n self.source_update_stats.merge(stats);\n if self.options.print_stats {\n println!(\n \"{}.{} ({kind}): {}\",\n self.flow.flow_instance.name, import_op.name, stats\n );\n } else {\n trace!(\n \"{}.{} ({kind}): {}\",\n self.flow.flow_instance.name, import_op.name, stats\n );\n }\n };\n\n let mut futs: Vec>> = Vec::new();\n\n // Deal with change streams.\n if self.options.live_mode {\n if let Some(change_stream) = import_op.executor.change_stream().await? {\n let change_stream_stats = Arc::new(stats::UpdateStats::default());\n futs.push(\n {\n let change_stream_stats = change_stream_stats.clone();\n let pool = self.pool.clone();\n let status_tx = self.status_tx.clone();\n async move {\n let mut change_stream = change_stream;\n let retry_options = retryable::RetryOptions {\n max_retries: None,\n initial_backoff: std::time::Duration::from_secs(5),\n max_backoff: std::time::Duration::from_secs(60),\n };\n loop {\n // Workaround as AsyncFnMut isn't mature yet.\n // Should be changed to use AsyncFnMut once it is.\n let change_stream = tokio::sync::Mutex::new(&mut change_stream);\n let change_msg = retryable::run(\n || async {\n let mut change_stream = change_stream.lock().await;\n change_stream\n .next()\n .await\n .transpose()\n .map_err(retryable::Error::always_retryable)\n },\n &retry_options,\n )\n .await?;\n let change_msg = if let Some(change_msg) = change_msg {\n change_msg\n } else {\n break;\n };\n\n let update_stats = Arc::new(stats::UpdateStats::default());\n let ack_fn = {\n let status_tx = status_tx.clone();\n let update_stats = update_stats.clone();\n let change_stream_stats = change_stream_stats.clone();\n async move || {\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n change_stream_stats.merge(&update_stats);\n }\n if let Some(ack_fn) = change_msg.ack_fn {\n ack_fn().await\n } else {\n Ok(())\n }\n }\n };\n let shared_ack_fn = Arc::new(Mutex::new(SharedAckFn::new(\n change_msg.changes.iter().len(),\n ack_fn,\n )));\n for change in change_msg.changes {\n let shared_ack_fn = shared_ack_fn.clone();\n let concur_permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n tokio::spawn(source_context.clone().process_source_key(\n change.key,\n change.data,\n update_stats.clone(),\n concur_permit,\n Some(move || async move {\n SharedAckFn::ack(&shared_ack_fn).await\n }),\n pool.clone(),\n ));\n }\n }\n Ok(())\n }\n }\n .boxed(),\n );\n\n futs.push(\n async move {\n let mut interval = tokio::time::interval(REPORT_INTERVAL);\n let mut last_change_stream_stats: UpdateStats =\n change_stream_stats.as_ref().clone();\n interval.set_missed_tick_behavior(MissedTickBehavior::Delay);\n interval.tick().await;\n loop {\n interval.tick().await;\n let curr_change_stream_stats = change_stream_stats.as_ref().clone();\n let delta = curr_change_stream_stats.delta(&last_change_stream_stats);\n if delta.has_any_change() {\n report_stats(&delta, \"change stream\");\n last_change_stream_stats = curr_change_stream_stats;\n }\n }\n }\n .boxed(),\n );\n }\n }\n\n // The main update loop.\n futs.push({\n let status_tx = self.status_tx.clone();\n let pool = self.pool.clone();\n let live_mode = self.options.live_mode;\n async move {\n let update_stats = Arc::new(stats::UpdateStats::default());\n source_context.update(&pool, &update_stats).await?;\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n }\n report_stats(&update_stats, \"batch update\");\n\n if let (true, Some(refresh_interval)) =\n (live_mode, import_op.refresh_options.refresh_interval)\n {\n let mut interval = tokio::time::interval(refresh_interval);\n interval.set_missed_tick_behavior(MissedTickBehavior::Delay);\n interval.tick().await;\n loop {\n interval.tick().await;\n\n let update_stats = Arc::new(stats::UpdateStats::default());\n source_context.update(&pool, &update_stats).await?;\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n }\n report_stats(&update_stats, \"interval refresh\");\n }\n }\n Ok(())\n }\n .boxed()\n });\n\n let join_result = try_join_all(futs).await;\n if let Err(err) = join_result {\n error!(\"Error in source `{}`: {:?}\", import_op.name, err);\n return Err(err);\n }\n Ok(())\n }\n}\n\nimpl FlowLiveUpdater {\n pub async fn start(\n flow_ctx: Arc,\n pool: &PgPool,\n options: FlowLiveUpdaterOptions,\n ) -> Result {\n let plan = flow_ctx.flow.get_execution_plan().await?;\n let execution_ctx = Arc::new(flow_ctx.use_owned_execution_ctx().await?);\n\n let (status_tx, status_rx) = watch::channel(FlowLiveUpdaterStatus {\n active_source_idx: BTreeSet::from_iter(0..plan.import_ops.len()),\n source_updates_num: vec![0; plan.import_ops.len()],\n });\n\n let (num_remaining_tasks_tx, num_remaining_tasks_rx) =\n watch::channel(plan.import_ops.len());\n\n let mut join_set = JoinSet::new();\n let mut stats_per_task = Vec::new();\n\n for source_idx in 0..plan.import_ops.len() {\n let source_update_stats = Arc::new(stats::UpdateStats::default());\n let source_update_task = SourceUpdateTask {\n source_idx,\n flow: flow_ctx.flow.clone(),\n plan: plan.clone(),\n execution_ctx: execution_ctx.clone(),\n source_update_stats: source_update_stats.clone(),\n pool: pool.clone(),\n options: options.clone(),\n status_tx: status_tx.clone(),\n num_remaining_tasks_tx: num_remaining_tasks_tx.clone(),\n };\n join_set.spawn(source_update_task.run());\n stats_per_task.push(source_update_stats);\n }\n Ok(Self {\n flow_ctx,\n join_set: Mutex::new(Some(join_set)),\n stats_per_task,\n recv_state: tokio::sync::Mutex::new(UpdateReceiveState {\n status_rx,\n last_num_source_updates: vec![0; plan.import_ops.len()],\n is_done: false,\n }),\n num_remaining_tasks_rx,\n\n _status_tx: status_tx,\n _num_remaining_tasks_tx: num_remaining_tasks_tx,\n })\n }\n\n pub async fn wait(&self) -> Result<()> {\n {\n let mut rx = self.num_remaining_tasks_rx.clone();\n rx.wait_for(|v| *v == 0).await?;\n }\n\n let Some(mut join_set) = self.join_set.lock().unwrap().take() else {\n return Ok(());\n };\n while let Some(task_result) = join_set.join_next().await {\n match task_result {\n Ok(Ok(_)) => {}\n Ok(Err(err)) => {\n return Err(err);\n }\n Err(err) if err.is_cancelled() => {}\n Err(err) => {\n return Err(err.into());\n }\n }\n }\n Ok(())\n }\n\n pub fn abort(&self) {\n let mut join_set = self.join_set.lock().unwrap();\n if let Some(join_set) = &mut *join_set {\n join_set.abort_all();\n }\n }\n\n pub fn index_update_info(&self) -> stats::IndexUpdateInfo {\n stats::IndexUpdateInfo {\n sources: std::iter::zip(\n self.flow_ctx.flow.flow_instance.import_ops.iter(),\n self.stats_per_task.iter(),\n )\n .map(|(import_op, stats)| stats::SourceUpdateInfo {\n source_name: import_op.name.clone(),\n stats: stats.as_ref().clone(),\n })\n .collect(),\n }\n }\n\n pub async fn next_status_updates(&self) -> Result {\n let mut recv_state = self.recv_state.lock().await;\n let recv_state = &mut *recv_state;\n\n if recv_state.is_done {\n return Ok(FlowLiveUpdaterUpdates {\n active_sources: vec![],\n updated_sources: vec![],\n });\n }\n\n recv_state.status_rx.changed().await?;\n let status = recv_state.status_rx.borrow_and_update();\n let updates = FlowLiveUpdaterUpdates {\n active_sources: status\n .active_source_idx\n .iter()\n .map(|idx| {\n self.flow_ctx.flow.flow_instance.import_ops[*idx]\n .name\n .clone()\n })\n .collect(),\n updated_sources: status\n .source_updates_num\n .iter()\n .enumerate()\n .filter_map(|(idx, num_updates)| {\n if num_updates > &recv_state.last_num_source_updates[idx] {\n Some(\n self.flow_ctx.flow.flow_instance.import_ops[idx]\n .name\n .clone(),\n )\n } else {\n None\n }\n })\n .collect(),\n };\n recv_state.last_num_source_updates = status.source_updates_num.clone();\n if status.active_source_idx.is_empty() {\n recv_state.is_done = true;\n }\n Ok(updates)\n }\n}\n"], ["/cocoindex/src/ops/targets/postgres.rs", "use crate::prelude::*;\n\nuse super::shared::table_columns::{\n TableColumnsSchema, TableMainSetupAction, TableUpsertionAction, check_table_compatibility,\n};\nuse crate::base::spec::{self, *};\nuse crate::ops::sdk::*;\nuse crate::settings::DatabaseConnectionSpec;\nuse async_trait::async_trait;\nuse indexmap::{IndexMap, IndexSet};\nuse itertools::Itertools;\nuse serde::Serialize;\nuse sqlx::PgPool;\nuse sqlx::postgres::types::PgRange;\nuse std::ops::Bound;\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n database: Option>,\n table_name: Option,\n}\nconst BIND_LIMIT: usize = 65535;\n\nfn key_value_fields_iter<'a>(\n key_fields_schema: &[FieldSchema],\n key_value: &'a KeyValue,\n) -> Result<&'a [KeyValue]> {\n let slice = if key_fields_schema.len() == 1 {\n std::slice::from_ref(key_value)\n } else {\n match key_value {\n KeyValue::Struct(fields) => fields,\n _ => bail!(\"expect struct key value\"),\n }\n };\n Ok(slice)\n}\n\nfn convertible_to_pgvector(vec_schema: &VectorTypeSchema) -> bool {\n if vec_schema.dimension.is_some() {\n matches!(\n *vec_schema.element_type,\n BasicValueType::Float32 | BasicValueType::Float64 | BasicValueType::Int64\n )\n } else {\n false\n }\n}\n\nfn bind_key_field<'arg>(\n builder: &mut sqlx::QueryBuilder<'arg, sqlx::Postgres>,\n key_value: &'arg KeyValue,\n) -> Result<()> {\n match key_value {\n KeyValue::Bytes(v) => {\n builder.push_bind(&**v);\n }\n KeyValue::Str(v) => {\n builder.push_bind(&**v);\n }\n KeyValue::Bool(v) => {\n builder.push_bind(v);\n }\n KeyValue::Int64(v) => {\n builder.push_bind(v);\n }\n KeyValue::Range(v) => {\n builder.push_bind(PgRange {\n start: Bound::Included(v.start as i64),\n end: Bound::Excluded(v.end as i64),\n });\n }\n KeyValue::Uuid(v) => {\n builder.push_bind(v);\n }\n KeyValue::Date(v) => {\n builder.push_bind(v);\n }\n KeyValue::Struct(fields) => {\n builder.push_bind(sqlx::types::Json(fields));\n }\n }\n Ok(())\n}\n\nfn bind_value_field<'arg>(\n builder: &mut sqlx::QueryBuilder<'arg, sqlx::Postgres>,\n field_schema: &'arg FieldSchema,\n value: &'arg Value,\n) -> Result<()> {\n match &value {\n Value::Basic(v) => match v {\n BasicValue::Bytes(v) => {\n builder.push_bind(&**v);\n }\n BasicValue::Str(v) => {\n builder.push_bind(&**v);\n }\n BasicValue::Bool(v) => {\n builder.push_bind(v);\n }\n BasicValue::Int64(v) => {\n builder.push_bind(v);\n }\n BasicValue::Float32(v) => {\n builder.push_bind(v);\n }\n BasicValue::Float64(v) => {\n builder.push_bind(v);\n }\n BasicValue::Range(v) => {\n builder.push_bind(PgRange {\n start: Bound::Included(v.start as i64),\n end: Bound::Excluded(v.end as i64),\n });\n }\n BasicValue::Uuid(v) => {\n builder.push_bind(v);\n }\n BasicValue::Date(v) => {\n builder.push_bind(v);\n }\n BasicValue::Time(v) => {\n builder.push_bind(v);\n }\n BasicValue::LocalDateTime(v) => {\n builder.push_bind(v);\n }\n BasicValue::OffsetDateTime(v) => {\n builder.push_bind(v);\n }\n BasicValue::TimeDelta(v) => {\n builder.push_bind(v);\n }\n BasicValue::Json(v) => {\n builder.push_bind(sqlx::types::Json(&**v));\n }\n BasicValue::Vector(v) => match &field_schema.value_type.typ {\n ValueType::Basic(BasicValueType::Vector(vs)) if convertible_to_pgvector(vs) => {\n let vec = v\n .iter()\n .map(|v| {\n Ok(match v {\n BasicValue::Float32(v) => *v,\n BasicValue::Float64(v) => *v as f32,\n BasicValue::Int64(v) => *v as f32,\n v => bail!(\"unexpected vector element type: {}\", v.kind()),\n })\n })\n .collect::>>()?;\n builder.push_bind(pgvector::Vector::from(vec));\n }\n _ => {\n builder.push_bind(sqlx::types::Json(v));\n }\n },\n BasicValue::UnionVariant { .. } => {\n builder.push_bind(sqlx::types::Json(TypedValue {\n t: &field_schema.value_type.typ,\n v: value,\n }));\n }\n },\n Value::Null => {\n builder.push(\"NULL\");\n }\n v => {\n builder.push_bind(sqlx::types::Json(TypedValue {\n t: &field_schema.value_type.typ,\n v,\n }));\n }\n };\n Ok(())\n}\n\npub struct ExportContext {\n db_ref: Option>,\n db_pool: PgPool,\n key_fields_schema: Vec,\n value_fields_schema: Vec,\n upsert_sql_prefix: String,\n upsert_sql_suffix: String,\n delete_sql_prefix: String,\n}\n\nimpl ExportContext {\n fn new(\n db_ref: Option>,\n db_pool: PgPool,\n table_name: String,\n key_fields_schema: Vec,\n value_fields_schema: Vec,\n ) -> Result {\n let key_fields = key_fields_schema\n .iter()\n .map(|f| format!(\"\\\"{}\\\"\", f.name))\n .collect::>()\n .join(\", \");\n let all_fields = (key_fields_schema.iter().chain(value_fields_schema.iter()))\n .map(|f| format!(\"\\\"{}\\\"\", f.name))\n .collect::>()\n .join(\", \");\n let set_value_fields = value_fields_schema\n .iter()\n .map(|f| format!(\"\\\"{}\\\" = EXCLUDED.\\\"{}\\\"\", f.name, f.name))\n .collect::>()\n .join(\", \");\n\n Ok(Self {\n db_ref,\n db_pool,\n upsert_sql_prefix: format!(\"INSERT INTO {table_name} ({all_fields}) VALUES \"),\n upsert_sql_suffix: if value_fields_schema.is_empty() {\n format!(\" ON CONFLICT ({key_fields}) DO NOTHING;\")\n } else {\n format!(\" ON CONFLICT ({key_fields}) DO UPDATE SET {set_value_fields};\")\n },\n delete_sql_prefix: format!(\"DELETE FROM {table_name} WHERE \"),\n key_fields_schema,\n value_fields_schema,\n })\n }\n}\n\nimpl ExportContext {\n async fn upsert(\n &self,\n upserts: &[interface::ExportTargetUpsertEntry],\n txn: &mut sqlx::PgTransaction<'_>,\n ) -> Result<()> {\n let num_parameters = self.key_fields_schema.len() + self.value_fields_schema.len();\n for upsert_chunk in upserts.chunks(BIND_LIMIT / num_parameters) {\n let mut query_builder = sqlx::QueryBuilder::new(&self.upsert_sql_prefix);\n for (i, upsert) in upsert_chunk.iter().enumerate() {\n if i > 0 {\n query_builder.push(\",\");\n }\n query_builder.push(\" (\");\n for (j, key_value) in key_value_fields_iter(&self.key_fields_schema, &upsert.key)?\n .iter()\n .enumerate()\n {\n if j > 0 {\n query_builder.push(\", \");\n }\n bind_key_field(&mut query_builder, key_value)?;\n }\n if self.value_fields_schema.len() != upsert.value.fields.len() {\n bail!(\n \"unmatched value length: {} vs {}\",\n self.value_fields_schema.len(),\n upsert.value.fields.len()\n );\n }\n for (schema, value) in self\n .value_fields_schema\n .iter()\n .zip(upsert.value.fields.iter())\n {\n query_builder.push(\", \");\n bind_value_field(&mut query_builder, schema, value)?;\n }\n query_builder.push(\")\");\n }\n query_builder.push(&self.upsert_sql_suffix);\n query_builder.build().execute(&mut **txn).await?;\n }\n Ok(())\n }\n\n async fn delete(\n &self,\n deletions: &[interface::ExportTargetDeleteEntry],\n txn: &mut sqlx::PgTransaction<'_>,\n ) -> Result<()> {\n // TODO: Find a way to batch delete.\n for deletion in deletions.iter() {\n let mut query_builder = sqlx::QueryBuilder::new(\"\");\n query_builder.push(&self.delete_sql_prefix);\n for (i, (schema, value)) in self\n .key_fields_schema\n .iter()\n .zip(key_value_fields_iter(&self.key_fields_schema, &deletion.key)?.iter())\n .enumerate()\n {\n if i > 0 {\n query_builder.push(\" AND \");\n }\n query_builder.push(\"\\\"\");\n query_builder.push(schema.name.as_str());\n query_builder.push(\"\\\"\");\n query_builder.push(\"=\");\n bind_key_field(&mut query_builder, value)?;\n }\n query_builder.build().execute(&mut **txn).await?;\n }\n Ok(())\n }\n}\n\n#[derive(Default)]\npub struct Factory {}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct TableId {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n database: Option>,\n table_name: String,\n}\n\nimpl std::fmt::Display for TableId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.table_name)?;\n if let Some(database) = &self.database {\n write!(f, \" (database: {database})\")?;\n }\n Ok(())\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SetupState {\n #[serde(flatten)]\n columns: TableColumnsSchema,\n\n vector_indexes: BTreeMap,\n}\n\nimpl SetupState {\n fn new(\n table_id: &TableId,\n key_fields_schema: &[FieldSchema],\n value_fields_schema: &[FieldSchema],\n index_options: &IndexOptions,\n ) -> Self {\n Self {\n columns: TableColumnsSchema {\n key_columns: key_fields_schema\n .iter()\n .map(|f| (f.name.clone(), f.value_type.typ.without_attrs()))\n .collect(),\n value_columns: value_fields_schema\n .iter()\n .map(|f| (f.name.clone(), f.value_type.typ.without_attrs()))\n .collect(),\n },\n vector_indexes: index_options\n .vector_indexes\n .iter()\n .map(|v| (to_vector_index_name(&table_id.table_name, v), v.clone()))\n .collect(),\n }\n }\n\n fn uses_pgvector(&self) -> bool {\n self.columns\n .value_columns\n .iter()\n .any(|(_, value)| match &value {\n ValueType::Basic(BasicValueType::Vector(vec_schema)) => {\n convertible_to_pgvector(vec_schema)\n }\n _ => false,\n })\n }\n}\n\nfn to_column_type_sql(column_type: &ValueType) -> String {\n match column_type {\n ValueType::Basic(basic_type) => match basic_type {\n BasicValueType::Bytes => \"bytea\".into(),\n BasicValueType::Str => \"text\".into(),\n BasicValueType::Bool => \"boolean\".into(),\n BasicValueType::Int64 => \"bigint\".into(),\n BasicValueType::Float32 => \"real\".into(),\n BasicValueType::Float64 => \"double precision\".into(),\n BasicValueType::Range => \"int8range\".into(),\n BasicValueType::Uuid => \"uuid\".into(),\n BasicValueType::Date => \"date\".into(),\n BasicValueType::Time => \"time\".into(),\n BasicValueType::LocalDateTime => \"timestamp\".into(),\n BasicValueType::OffsetDateTime => \"timestamp with time zone\".into(),\n BasicValueType::TimeDelta => \"interval\".into(),\n BasicValueType::Json => \"jsonb\".into(),\n BasicValueType::Vector(vec_schema) => {\n if convertible_to_pgvector(vec_schema) {\n format!(\"vector({})\", vec_schema.dimension.unwrap_or(0))\n } else {\n \"jsonb\".into()\n }\n }\n BasicValueType::Union(_) => \"jsonb\".into(),\n },\n _ => \"jsonb\".into(),\n }\n}\n\nimpl<'a> From<&'a SetupState> for Cow<'a, TableColumnsSchema> {\n fn from(val: &'a SetupState) -> Self {\n Cow::Owned(TableColumnsSchema {\n key_columns: val\n .columns\n .key_columns\n .iter()\n .map(|(k, v)| (k.clone(), to_column_type_sql(v)))\n .collect(),\n value_columns: val\n .columns\n .value_columns\n .iter()\n .map(|(k, v)| (k.clone(), to_column_type_sql(v)))\n .collect(),\n })\n }\n}\n\n#[derive(Debug)]\npub struct TableSetupAction {\n table_action: TableMainSetupAction,\n indexes_to_delete: IndexSet,\n indexes_to_create: IndexMap,\n}\n\n#[derive(Debug)]\npub struct SetupStatus {\n create_pgvector_extension: bool,\n actions: TableSetupAction,\n vector_as_jsonb_columns: Vec<(String, ValueType)>,\n}\n\nimpl SetupStatus {\n fn new(desired_state: Option, existing: setup::CombinedState) -> Self {\n let table_action =\n TableMainSetupAction::from_states(desired_state.as_ref(), &existing, false);\n let vector_as_jsonb_columns = desired_state\n .as_ref()\n .iter()\n .flat_map(|s| {\n s.columns.value_columns.iter().filter_map(|(name, schema)| {\n if let ValueType::Basic(BasicValueType::Vector(vec_schema)) = schema\n && !convertible_to_pgvector(vec_schema)\n {\n let is_touched = match &table_action.table_upsertion {\n Some(TableUpsertionAction::Create { values, .. }) => {\n values.contains_key(name)\n }\n Some(TableUpsertionAction::Update {\n columns_to_upsert, ..\n }) => columns_to_upsert.contains_key(name),\n None => false,\n };\n if is_touched {\n Some((name.clone(), schema.clone()))\n } else {\n None\n }\n } else {\n None\n }\n })\n })\n .collect::>();\n let (indexes_to_delete, indexes_to_create) = desired_state\n .as_ref()\n .map(|desired| {\n (\n existing\n .possible_versions()\n .flat_map(|v| v.vector_indexes.keys())\n .filter(|index_name| !desired.vector_indexes.contains_key(*index_name))\n .cloned()\n .collect::>(),\n desired\n .vector_indexes\n .iter()\n .filter(|(name, def)| {\n !existing.always_exists()\n || existing\n .possible_versions()\n .any(|v| v.vector_indexes.get(*name) != Some(def))\n })\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect::>(),\n )\n })\n .unwrap_or_default();\n let create_pgvector_extension = desired_state\n .as_ref()\n .map(|s| s.uses_pgvector())\n .unwrap_or(false)\n && !existing.current.map(|s| s.uses_pgvector()).unwrap_or(false);\n\n Self {\n create_pgvector_extension,\n actions: TableSetupAction {\n table_action,\n indexes_to_delete,\n indexes_to_create,\n },\n vector_as_jsonb_columns,\n }\n }\n}\n\nfn to_vector_similarity_metric_sql(metric: VectorSimilarityMetric) -> &'static str {\n match metric {\n VectorSimilarityMetric::CosineSimilarity => \"vector_cosine_ops\",\n VectorSimilarityMetric::L2Distance => \"vector_l2_ops\",\n VectorSimilarityMetric::InnerProduct => \"vector_ip_ops\",\n }\n}\n\nfn to_index_spec_sql(index_spec: &VectorIndexDef) -> Cow<'static, str> {\n format!(\n \"USING hnsw ({} {})\",\n index_spec.field_name,\n to_vector_similarity_metric_sql(index_spec.metric)\n )\n .into()\n}\n\nfn to_vector_index_name(table_name: &str, vector_index_def: &spec::VectorIndexDef) -> String {\n format!(\n \"{}__{}__{}\",\n table_name,\n vector_index_def.field_name,\n to_vector_similarity_metric_sql(vector_index_def.metric)\n )\n}\n\nfn describe_index_spec(index_name: &str, index_spec: &VectorIndexDef) -> String {\n format!(\"{} {}\", index_name, to_index_spec_sql(index_spec))\n}\n\nimpl setup::ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut descriptions = self.actions.table_action.describe_changes();\n for (column_name, schema) in self.vector_as_jsonb_columns.iter() {\n descriptions.push(setup::ChangeDescription::Note(format!(\n \"Field `{}` has type `{}`. Only number vector with fixed size is supported by pgvector. It will be stored as `jsonb`.\",\n column_name,\n schema\n )));\n }\n if self.create_pgvector_extension {\n descriptions.push(setup::ChangeDescription::Action(\n \"Create pg_vector extension (if not exists)\".to_string(),\n ));\n }\n if !self.actions.indexes_to_delete.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Delete indexes from table: {}\",\n self.actions.indexes_to_delete.iter().join(\", \"),\n )));\n }\n if !self.actions.indexes_to_create.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Create indexes in table: {}\",\n self.actions\n .indexes_to_create\n .iter()\n .map(|(index_name, index_spec)| describe_index_spec(index_name, index_spec))\n .join(\", \"),\n )));\n }\n descriptions\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n let has_other_update = !self.actions.indexes_to_create.is_empty()\n || !self.actions.indexes_to_delete.is_empty();\n self.actions.table_action.change_type(has_other_update)\n }\n}\n\nimpl SetupStatus {\n async fn apply_change(&self, db_pool: &PgPool, table_name: &str) -> Result<()> {\n if self.actions.table_action.drop_existing {\n sqlx::query(&format!(\"DROP TABLE IF EXISTS {table_name}\"))\n .execute(db_pool)\n .await?;\n }\n if self.create_pgvector_extension {\n sqlx::query(\"CREATE EXTENSION IF NOT EXISTS vector;\")\n .execute(db_pool)\n .await?;\n }\n for index_name in self.actions.indexes_to_delete.iter() {\n let sql = format!(\"DROP INDEX IF EXISTS {index_name}\");\n sqlx::query(&sql).execute(db_pool).await?;\n }\n if let Some(table_upsertion) = &self.actions.table_action.table_upsertion {\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n let mut fields = (keys\n .iter()\n .map(|(name, typ)| format!(\"\\\"{name}\\\" {typ} NOT NULL\")))\n .chain(values.iter().map(|(name, typ)| format!(\"\\\"{name}\\\" {typ}\")));\n let sql = format!(\n \"CREATE TABLE IF NOT EXISTS {table_name} ({}, PRIMARY KEY ({}))\",\n fields.join(\", \"),\n keys.keys().join(\", \")\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n for column_name in columns_to_delete.iter() {\n let sql = format!(\n \"ALTER TABLE {table_name} DROP COLUMN IF EXISTS \\\"{column_name}\\\"\",\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n for (column_name, column_type) in columns_to_upsert.iter() {\n let sql = format!(\n \"ALTER TABLE {table_name} DROP COLUMN IF EXISTS \\\"{column_name}\\\", ADD COLUMN \\\"{column_name}\\\" {column_type}\"\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n }\n }\n }\n for (index_name, index_spec) in self.actions.indexes_to_create.iter() {\n let sql = format!(\n \"CREATE INDEX IF NOT EXISTS {index_name} ON {table_name} {}\",\n to_index_spec_sql(index_spec)\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n Ok(())\n }\n}\n\nasync fn get_db_pool(\n db_ref: Option<&spec::AuthEntryReference>,\n auth_registry: &AuthRegistry,\n) -> Result {\n let lib_context = get_lib_context()?;\n let db_conn_spec = db_ref\n .as_ref()\n .map(|db_ref| auth_registry.get(db_ref))\n .transpose()?;\n let db_pool = match db_conn_spec {\n Some(db_conn_spec) => lib_context.db_pools.get_pool(&db_conn_spec).await?,\n None => lib_context.require_builtin_db_pool()?.clone(),\n };\n Ok(db_pool)\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = ();\n type SetupState = SetupState;\n type SetupStatus = SetupStatus;\n type Key = TableId;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Postgres\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n _declarations: Vec<()>,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(TableId, SetupState)>,\n )> {\n let data_coll_output = data_collections\n .into_iter()\n .map(|d| {\n let table_id = TableId {\n database: d.spec.database.clone(),\n table_name: d.spec.table_name.unwrap_or_else(|| {\n utils::db::sanitize_identifier(&format!(\n \"{}__{}\",\n context.flow_instance_name, d.name\n ))\n }),\n };\n let setup_state = SetupState::new(\n &table_id,\n &d.key_fields_schema,\n &d.value_fields_schema,\n &d.index_options,\n );\n let table_name = table_id.table_name.clone();\n let db_ref = d.spec.database;\n let auth_registry = context.auth_registry.clone();\n let export_context = Box::pin(async move {\n let db_pool = get_db_pool(db_ref.as_ref(), &auth_registry).await?;\n let export_context = Arc::new(ExportContext::new(\n db_ref,\n db_pool.clone(),\n table_name,\n d.key_fields_schema,\n d.value_fields_schema,\n )?);\n Ok(export_context)\n });\n Ok(TypedExportDataCollectionBuildOutput {\n setup_key: table_id,\n desired_setup_state: setup_state,\n export_context,\n })\n })\n .collect::>>()?;\n Ok((data_coll_output, vec![]))\n }\n\n async fn check_setup_status(\n &self,\n _key: TableId,\n desired: Option,\n existing: setup::CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n Ok(SetupStatus::new(desired, existing))\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(check_table_compatibility(\n &desired.columns,\n &existing.columns,\n ))\n }\n\n fn describe_resource(&self, key: &TableId) -> Result {\n Ok(format!(\"Postgres table {}\", key.table_name))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut mut_groups_by_db_ref = HashMap::new();\n for mutation in mutations.iter() {\n mut_groups_by_db_ref\n .entry(mutation.export_context.db_ref.clone())\n .or_insert_with(Vec::new)\n .push(mutation);\n }\n for mut_groups in mut_groups_by_db_ref.values() {\n let db_pool = &mut_groups\n .first()\n .ok_or_else(|| anyhow!(\"empty group\"))?\n .export_context\n .db_pool;\n let mut txn = db_pool.begin().await?;\n for mut_group in mut_groups.iter() {\n mut_group\n .export_context\n .upsert(&mut_group.mutation.upserts, &mut txn)\n .await?;\n }\n for mut_group in mut_groups.iter() {\n mut_group\n .export_context\n .delete(&mut_group.mutation.deletes, &mut txn)\n .await?;\n }\n txn.commit().await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n for change in changes.iter() {\n let db_pool = get_db_pool(change.key.database.as_ref(), &context.auth_registry).await?;\n change\n .setup_status\n .apply_change(&db_pool, &change.key.table_name)\n .await?;\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/builder/exec_ctx.rs", "use crate::prelude::*;\n\nuse crate::execution::db_tracking_setup;\nuse crate::ops::get_executor_factory;\nuse crate::ops::interface::SetupStateCompatibility;\n\npub struct ImportOpExecutionContext {\n pub source_id: i32,\n}\n\npub struct ExportOpExecutionContext {\n pub target_id: i32,\n}\n\npub struct FlowSetupExecutionContext {\n pub setup_state: setup::FlowSetupState,\n pub import_ops: Vec,\n pub export_ops: Vec,\n}\n\npub struct AnalyzedTargetSetupState {\n pub target_kind: String,\n pub setup_key: serde_json::Value,\n pub desired_setup_state: serde_json::Value,\n pub setup_by_user: bool,\n}\n\npub struct AnalyzedSetupState {\n pub targets: Vec,\n pub declarations: Vec,\n}\n\nfn build_import_op_exec_ctx(\n import_field_name: &spec::FieldName,\n import_op_output_type: &schema::EnrichedValueType,\n existing_source_states: Option<&Vec<&setup::SourceSetupState>>,\n metadata: &mut setup::FlowSetupMetadata,\n) -> Result {\n let key_schema_no_attrs = import_op_output_type\n .typ\n .key_type()\n .ok_or_else(|| api_error!(\"Source must produce a type with key\"))?\n .typ\n .without_attrs();\n\n let existing_source_ids = existing_source_states\n .iter()\n .flat_map(|v| v.iter())\n .filter_map(|state| {\n if state.key_schema == key_schema_no_attrs {\n Some(state.source_id)\n } else {\n None\n }\n })\n .collect::>();\n let source_id = if existing_source_ids.len() == 1 {\n existing_source_ids.into_iter().next().unwrap()\n } else {\n if existing_source_ids.len() > 1 {\n warn!(\"Multiple source states with the same key schema found\");\n }\n metadata.last_source_id += 1;\n metadata.last_source_id\n };\n metadata.sources.insert(\n import_field_name.clone(),\n setup::SourceSetupState {\n source_id,\n key_schema: key_schema_no_attrs,\n },\n );\n Ok(ImportOpExecutionContext { source_id })\n}\n\nfn build_target_id(\n analyzed_target_ss: &AnalyzedTargetSetupState,\n existing_target_states: &HashMap<&setup::ResourceIdentifier, Vec<&setup::TargetSetupState>>,\n flow_setup_state: &mut setup::FlowSetupState,\n) -> Result {\n let interface::ExecutorFactory::ExportTarget(target_factory) =\n get_executor_factory(&analyzed_target_ss.target_kind)?\n else {\n api_bail!(\n \"`{}` is not a export target op\",\n analyzed_target_ss.target_kind\n )\n };\n\n let resource_id = setup::ResourceIdentifier {\n key: analyzed_target_ss.setup_key.clone(),\n target_kind: analyzed_target_ss.target_kind.clone(),\n };\n let existing_target_states = existing_target_states.get(&resource_id);\n let mut compatible_target_ids = HashSet::>::new();\n let mut reusable_schema_version_ids = HashSet::>::new();\n for existing_state in existing_target_states.iter().flat_map(|v| v.iter()) {\n let compatibility =\n if analyzed_target_ss.setup_by_user == existing_state.common.setup_by_user {\n target_factory.check_state_compatibility(\n &analyzed_target_ss.desired_setup_state,\n &existing_state.state,\n )?\n } else {\n SetupStateCompatibility::NotCompatible\n };\n let compatible_target_id = if compatibility != SetupStateCompatibility::NotCompatible {\n reusable_schema_version_ids.insert(\n (compatibility == SetupStateCompatibility::Compatible)\n .then_some(existing_state.common.schema_version_id),\n );\n Some(existing_state.common.target_id)\n } else {\n None\n };\n compatible_target_ids.insert(compatible_target_id);\n }\n\n let target_id = if compatible_target_ids.len() == 1 {\n compatible_target_ids.into_iter().next().flatten()\n } else {\n if compatible_target_ids.len() > 1 {\n warn!(\"Multiple target states with the same key schema found\");\n }\n None\n };\n let target_id = target_id.unwrap_or_else(|| {\n flow_setup_state.metadata.last_target_id += 1;\n flow_setup_state.metadata.last_target_id\n });\n let max_schema_version_id = existing_target_states\n .iter()\n .flat_map(|v| v.iter())\n .map(|s| s.common.max_schema_version_id)\n .max()\n .unwrap_or(0);\n let schema_version_id = if reusable_schema_version_ids.len() == 1 {\n reusable_schema_version_ids\n .into_iter()\n .next()\n .unwrap()\n .unwrap_or(max_schema_version_id + 1)\n } else {\n max_schema_version_id + 1\n };\n match flow_setup_state.targets.entry(resource_id) {\n indexmap::map::Entry::Occupied(entry) => {\n api_bail!(\n \"Target resource already exists: kind = {}, key = {}\",\n entry.key().target_kind,\n entry.key().key\n );\n }\n indexmap::map::Entry::Vacant(entry) => {\n entry.insert(setup::TargetSetupState {\n common: setup::TargetSetupStateCommon {\n target_id,\n schema_version_id,\n max_schema_version_id: max_schema_version_id.max(schema_version_id),\n setup_by_user: analyzed_target_ss.setup_by_user,\n },\n state: analyzed_target_ss.desired_setup_state.clone(),\n });\n }\n }\n Ok(target_id)\n}\n\npub fn build_flow_setup_execution_context(\n flow_inst: &spec::FlowInstanceSpec,\n data_schema: &schema::FlowSchema,\n analyzed_ss: &AnalyzedSetupState,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n) -> Result {\n let existing_metadata_versions = || {\n existing_flow_ss\n .iter()\n .flat_map(|flow_ss| flow_ss.metadata.possible_versions())\n };\n\n let mut source_states_by_name = HashMap::<&str, Vec<&setup::SourceSetupState>>::new();\n for metadata_version in existing_metadata_versions() {\n for (source_name, state) in metadata_version.sources.iter() {\n source_states_by_name\n .entry(source_name.as_str())\n .or_default()\n .push(state);\n }\n }\n\n let mut target_states_by_name_type =\n HashMap::<&setup::ResourceIdentifier, Vec<&setup::TargetSetupState>>::new();\n for metadata_version in existing_flow_ss.iter() {\n for (resource_id, target) in metadata_version.targets.iter() {\n target_states_by_name_type\n .entry(resource_id)\n .or_default()\n .extend(target.possible_versions());\n }\n }\n\n let mut setup_state = setup::FlowSetupState:: {\n seen_flow_metadata_version: existing_flow_ss\n .and_then(|flow_ss| flow_ss.seen_flow_metadata_version),\n metadata: setup::FlowSetupMetadata {\n last_source_id: existing_metadata_versions()\n .map(|metadata| metadata.last_source_id)\n .max()\n .unwrap_or(0),\n last_target_id: existing_metadata_versions()\n .map(|metadata| metadata.last_target_id)\n .max()\n .unwrap_or(0),\n sources: BTreeMap::new(),\n },\n tracking_table: db_tracking_setup::TrackingTableSetupState {\n table_name: existing_flow_ss\n .and_then(|flow_ss| {\n flow_ss\n .tracking_table\n .current\n .as_ref()\n .map(|v| v.table_name.clone())\n })\n .unwrap_or_else(|| db_tracking_setup::default_tracking_table_name(&flow_inst.name)),\n version_id: db_tracking_setup::CURRENT_TRACKING_TABLE_VERSION,\n },\n targets: IndexMap::new(),\n };\n\n let import_op_exec_ctx = flow_inst\n .import_ops\n .iter()\n .map(|import_op| {\n let output_type = data_schema\n .root_op_scope\n .op_output_types\n .get(&import_op.name)\n .ok_or_else(invariance_violation)?;\n build_import_op_exec_ctx(\n &import_op.name,\n output_type,\n source_states_by_name.get(&import_op.name.as_str()),\n &mut setup_state.metadata,\n )\n })\n .collect::>>()?;\n\n let export_op_exec_ctx = analyzed_ss\n .targets\n .iter()\n .map(|analyzed_target_ss| {\n let target_id = build_target_id(\n analyzed_target_ss,\n &target_states_by_name_type,\n &mut setup_state,\n )?;\n Ok(ExportOpExecutionContext { target_id })\n })\n .collect::>>()?;\n\n for analyzed_target_ss in analyzed_ss.declarations.iter() {\n build_target_id(\n analyzed_target_ss,\n &target_states_by_name_type,\n &mut setup_state,\n )?;\n }\n\n Ok(FlowSetupExecutionContext {\n setup_state,\n import_ops: import_op_exec_ctx,\n export_ops: export_op_exec_ctx,\n })\n}\n"], ["/cocoindex/src/ops/targets/qdrant.rs", "use crate::ops::sdk::*;\nuse crate::prelude::*;\n\nuse std::fmt::Display;\n\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::setup;\nuse qdrant_client::Qdrant;\nuse qdrant_client::qdrant::{\n CreateCollectionBuilder, DeletePointsBuilder, DenseVector, Distance, MultiDenseVector,\n MultiVectorComparator, MultiVectorConfigBuilder, NamedVectors, PointId, PointStruct,\n PointsIdsList, UpsertPointsBuilder, Value as QdrantValue, Vector as QdrantVector,\n VectorParamsBuilder, VectorsConfigBuilder,\n};\n\nconst DEFAULT_VECTOR_SIMILARITY_METRIC: spec::VectorSimilarityMetric =\n spec::VectorSimilarityMetric::CosineSimilarity;\nconst DEFAULT_URL: &str = \"http://localhost:6334/\";\n\n////////////////////////////////////////////////////////////\n// Public Types\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n grpc_url: String,\n api_key: Option,\n}\n\n#[derive(Debug, Deserialize, Clone)]\nstruct Spec {\n connection: Option>,\n collection_name: String,\n}\n\n////////////////////////////////////////////////////////////\n// Common\n////////////////////////////////////////////////////////////\n\nstruct FieldInfo {\n field_schema: schema::FieldSchema,\n vector_shape: Option,\n}\n\nenum VectorShape {\n Vector(usize),\n MultiVector(usize),\n}\n\nimpl VectorShape {\n fn vector_size(&self) -> usize {\n match self {\n VectorShape::Vector(size) => *size,\n VectorShape::MultiVector(size) => *size,\n }\n }\n\n fn multi_vector_comparator(&self) -> Option {\n match self {\n VectorShape::MultiVector(_) => Some(MultiVectorComparator::MaxSim),\n _ => None,\n }\n }\n}\n\nfn parse_vector_schema_shape(vector_schema: &schema::VectorTypeSchema) -> Option {\n match &*vector_schema.element_type {\n schema::BasicValueType::Float32\n | schema::BasicValueType::Float64\n | schema::BasicValueType::Int64 => vector_schema.dimension.map(VectorShape::Vector),\n\n schema::BasicValueType::Vector(nested_vector_schema) => {\n match parse_vector_schema_shape(nested_vector_schema) {\n Some(VectorShape::Vector(dim)) => Some(VectorShape::MultiVector(dim)),\n _ => None,\n }\n }\n _ => None,\n }\n}\n\nfn parse_vector_shape(typ: &schema::ValueType) -> Option {\n match typ {\n schema::ValueType::Basic(schema::BasicValueType::Vector(vector_schema)) => {\n parse_vector_schema_shape(vector_schema)\n }\n _ => None,\n }\n}\n\nfn encode_dense_vector(v: &BasicValue) -> Result {\n let vec = match v {\n BasicValue::Vector(v) => v\n .iter()\n .map(|elem| {\n Ok(match elem {\n BasicValue::Float32(f) => *f,\n BasicValue::Float64(f) => *f as f32,\n BasicValue::Int64(i) => *i as f32,\n _ => bail!(\"Unsupported vector type: {:?}\", elem.kind()),\n })\n })\n .collect::>>()?,\n _ => bail!(\"Expected a vector field, got {:?}\", v),\n };\n Ok(vec.into())\n}\n\nfn encode_multi_dense_vector(v: &BasicValue) -> Result {\n let vecs = match v {\n BasicValue::Vector(v) => v\n .iter()\n .map(encode_dense_vector)\n .collect::>>()?,\n _ => bail!(\"Expected a vector field, got {:?}\", v),\n };\n Ok(vecs.into())\n}\n\nfn embedding_metric_to_qdrant(metric: spec::VectorSimilarityMetric) -> Result {\n Ok(match metric {\n spec::VectorSimilarityMetric::CosineSimilarity => Distance::Cosine,\n spec::VectorSimilarityMetric::L2Distance => Distance::Euclid,\n spec::VectorSimilarityMetric::InnerProduct => Distance::Dot,\n })\n}\n\n////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct CollectionKey {\n connection: Option>,\n collection_name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\nstruct VectorDef {\n vector_size: usize,\n metric: spec::VectorSimilarityMetric,\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n multi_vector_comparator: Option,\n}\n#[derive(Debug, Clone, Serialize, Deserialize)]\nstruct SetupState {\n #[serde(default)]\n vectors: BTreeMap,\n\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n unsupported_vector_fields: Vec<(String, ValueType)>,\n}\n\n#[derive(Debug)]\nstruct SetupStatus {\n delete_collection: bool,\n add_collection: Option,\n}\n\nimpl setup::ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n if self.delete_collection {\n result.push(setup::ChangeDescription::Action(\n \"Delete collection\".to_string(),\n ));\n }\n if let Some(add_collection) = &self.add_collection {\n let vector_descriptions = add_collection\n .vectors\n .iter()\n .map(|(name, vector_def)| {\n format!(\n \"{}[{}], {}\",\n name, vector_def.vector_size, vector_def.metric\n )\n })\n .collect::>()\n .join(\"; \");\n result.push(setup::ChangeDescription::Action(format!(\n \"Create collection{}\",\n if vector_descriptions.is_empty() {\n \"\".to_string()\n } else {\n format!(\" with vectors: {vector_descriptions}\")\n }\n )));\n for (name, schema) in add_collection.unsupported_vector_fields.iter() {\n result.push(setup::ChangeDescription::Note(format!(\n \"Field `{}` has type `{}`. Only number vector with fixed size is supported by Qdrant. It will be stored in payload.\",\n name, schema\n )));\n }\n }\n result\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n match (self.delete_collection, self.add_collection.is_some()) {\n (false, false) => setup::SetupChangeType::NoChange,\n (false, true) => setup::SetupChangeType::Create,\n (true, false) => setup::SetupChangeType::Delete,\n (true, true) => setup::SetupChangeType::Update,\n }\n }\n}\n\nimpl SetupStatus {\n async fn apply_delete(&self, collection_name: &String, qdrant_client: &Qdrant) -> Result<()> {\n if self.delete_collection {\n qdrant_client.delete_collection(collection_name).await?;\n }\n Ok(())\n }\n\n async fn apply_create(&self, collection_name: &String, qdrant_client: &Qdrant) -> Result<()> {\n if let Some(add_collection) = &self.add_collection {\n let mut builder = CreateCollectionBuilder::new(collection_name);\n if !add_collection.vectors.is_empty() {\n let mut vectors_config = VectorsConfigBuilder::default();\n for (name, vector_def) in add_collection.vectors.iter() {\n let mut params = VectorParamsBuilder::new(\n vector_def.vector_size as u64,\n embedding_metric_to_qdrant(vector_def.metric)?,\n );\n if let Some(multi_vector_comparator) = &vector_def.multi_vector_comparator {\n params = params.multivector_config(MultiVectorConfigBuilder::new(\n MultiVectorComparator::from_str_name(multi_vector_comparator)\n .ok_or_else(|| {\n anyhow!(\n \"unrecognized multi vector comparator: {}\",\n multi_vector_comparator\n )\n })?,\n ));\n }\n vectors_config.add_named_vector_params(name, params);\n }\n builder = builder.vectors_config(vectors_config);\n }\n qdrant_client.create_collection(builder).await?;\n }\n Ok(())\n }\n}\n\n////////////////////////////////////////////////////////////\n// Deal with mutations\n////////////////////////////////////////////////////////////\n\nstruct ExportContext {\n qdrant_client: Arc,\n collection_name: String,\n fields_info: Vec,\n}\n\nimpl ExportContext {\n async fn apply_mutation(&self, mutation: ExportTargetMutation) -> Result<()> {\n let mut points: Vec = Vec::with_capacity(mutation.upserts.len());\n for upsert in mutation.upserts.iter() {\n let point_id = key_to_point_id(&upsert.key)?;\n let (payload, vectors) = values_to_payload(&upsert.value.fields, &self.fields_info)?;\n\n points.push(PointStruct::new(point_id, vectors, payload));\n }\n\n if !points.is_empty() {\n self.qdrant_client\n .upsert_points(UpsertPointsBuilder::new(&self.collection_name, points).wait(true))\n .await?;\n }\n\n let ids = mutation\n .deletes\n .iter()\n .map(|deletion| key_to_point_id(&deletion.key))\n .collect::>>()?;\n\n if !ids.is_empty() {\n self.qdrant_client\n .delete_points(\n DeletePointsBuilder::new(&self.collection_name)\n .points(PointsIdsList { ids })\n .wait(true),\n )\n .await?;\n }\n\n Ok(())\n }\n}\nfn key_to_point_id(key_value: &KeyValue) -> Result {\n let point_id = match key_value {\n KeyValue::Str(v) => PointId::from(v.to_string()),\n KeyValue::Int64(v) => PointId::from(*v as u64),\n KeyValue::Uuid(v) => PointId::from(v.to_string()),\n e => bail!(\"Invalid Qdrant point ID: {e}\"),\n };\n\n Ok(point_id)\n}\n\nfn values_to_payload(\n value_fields: &[Value],\n fields_info: &[FieldInfo],\n) -> Result<(HashMap, NamedVectors)> {\n let mut payload = HashMap::with_capacity(value_fields.len());\n let mut vectors = NamedVectors::default();\n\n for (value, field_info) in value_fields.iter().zip(fields_info.iter()) {\n let field_name = &field_info.field_schema.name;\n\n match &field_info.vector_shape {\n Some(vector_shape) => {\n if value.is_null() {\n continue;\n }\n let vector: QdrantVector = match value {\n Value::Basic(basic_value) => match vector_shape {\n VectorShape::Vector(_) => encode_dense_vector(&basic_value)?.into(),\n VectorShape::MultiVector(_) => {\n encode_multi_dense_vector(&basic_value)?.into()\n }\n },\n _ => {\n bail!(\"Expected a vector field, got {:?}\", value);\n }\n };\n vectors = vectors.add_vector(field_name.clone(), vector);\n }\n None => {\n let json_value = serde_json::to_value(TypedValue {\n t: &field_info.field_schema.value_type.typ,\n v: value,\n })?;\n payload.insert(field_name.clone(), json_value.into());\n }\n }\n }\n\n Ok((payload, vectors))\n}\n\n////////////////////////////////////////////////////////////\n// Factory implementation\n////////////////////////////////////////////////////////////\n\n#[derive(Default)]\nstruct Factory {\n qdrant_clients: Mutex>, Arc>>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct CollectionId {\n collection_name: String,\n}\n\nimpl Display for CollectionId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.collection_name)?;\n Ok(())\n }\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = ();\n type SetupState = SetupState;\n type SetupStatus = SetupStatus;\n type Key = CollectionKey;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Qdrant\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n _declarations: Vec<()>,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(CollectionKey, SetupState)>,\n )> {\n let data_coll_output = data_collections\n .into_iter()\n .map(|d| {\n if d.key_fields_schema.len() != 1 {\n api_bail!(\n \"Expected one primary key field for the point ID. Got {}.\",\n d.key_fields_schema.len()\n )\n }\n\n let mut fields_info = Vec::::new();\n let mut vector_def = BTreeMap::::new();\n let mut unsupported_vector_fields = Vec::<(String, ValueType)>::new();\n\n for field in d.value_fields_schema.iter() {\n let vector_shape = parse_vector_shape(&field.value_type.typ);\n if let Some(vector_shape) = &vector_shape {\n vector_def.insert(\n field.name.clone(),\n VectorDef {\n vector_size: vector_shape.vector_size(),\n metric: DEFAULT_VECTOR_SIMILARITY_METRIC,\n multi_vector_comparator: vector_shape.multi_vector_comparator().map(|s| s.as_str_name().to_string()),\n },\n );\n } else if matches!(\n &field.value_type.typ,\n schema::ValueType::Basic(schema::BasicValueType::Vector(_))\n ) {\n // This is a vector field but not supported by Qdrant\n unsupported_vector_fields.push((field.name.clone(), field.value_type.typ.clone()));\n }\n fields_info.push(FieldInfo {\n field_schema: field.clone(),\n vector_shape,\n });\n }\n\n let mut specified_vector_fields = HashSet::new();\n for vector_index in d.index_options.vector_indexes {\n match vector_def.get_mut(&vector_index.field_name) {\n Some(vector_def) => {\n if specified_vector_fields.insert(vector_index.field_name.clone()) {\n // Validate the metric is supported by Qdrant\n embedding_metric_to_qdrant(vector_index.metric)\n .with_context(||\n format!(\"Parsing vector index metric {} for field `{}`\", vector_index.metric, vector_index.field_name))?;\n vector_def.metric = vector_index.metric;\n } else {\n api_bail!(\"Field `{}` specified more than once in vector index definition\", vector_index.field_name);\n }\n }\n None => {\n if let Some(field) = d.value_fields_schema.iter().find(|f| f.name == vector_index.field_name) {\n api_bail!(\n \"Field `{}` specified in vector index is expected to be a number vector with fixed size, actual type: {}\",\n vector_index.field_name, field.value_type.typ\n );\n } else {\n api_bail!(\"Field `{}` specified in vector index is not found\", vector_index.field_name);\n }\n }\n }\n }\n\n let export_context = Arc::new(ExportContext {\n qdrant_client: self\n .get_qdrant_client(&d.spec.connection, &context.auth_registry)?,\n collection_name: d.spec.collection_name.clone(),\n fields_info,\n });\n Ok(TypedExportDataCollectionBuildOutput {\n export_context: Box::pin(async move { Ok(export_context) }),\n setup_key: CollectionKey {\n connection: d.spec.connection,\n collection_name: d.spec.collection_name,\n },\n desired_setup_state: SetupState {\n vectors: vector_def,\n unsupported_vector_fields,\n },\n })\n })\n .collect::>>()?;\n Ok((data_coll_output, vec![]))\n }\n\n fn deserialize_setup_key(key: serde_json::Value) -> Result {\n Ok(match key {\n serde_json::Value::String(s) => {\n // For backward compatibility.\n CollectionKey {\n collection_name: s,\n connection: None,\n }\n }\n _ => serde_json::from_value(key)?,\n })\n }\n\n async fn check_setup_status(\n &self,\n _key: CollectionKey,\n desired: Option,\n existing: setup::CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n let desired_exists = desired.is_some();\n let add_collection = desired.filter(|state| {\n !existing.always_exists()\n || existing\n .possible_versions()\n .any(|v| v.vectors != state.vectors)\n });\n let delete_collection = existing.possible_versions().next().is_some()\n && (!desired_exists || add_collection.is_some());\n Ok(SetupStatus {\n delete_collection,\n add_collection,\n })\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(if desired.vectors == existing.vectors {\n SetupStateCompatibility::Compatible\n } else {\n SetupStateCompatibility::NotCompatible\n })\n }\n\n fn describe_resource(&self, key: &CollectionKey) -> Result {\n Ok(format!(\n \"Qdrant collection {}{}\",\n key.collection_name,\n key.connection\n .as_ref()\n .map_or_else(|| \"\".to_string(), |auth_entry| format!(\" @ {auth_entry}\"))\n ))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n for mutation_w_ctx in mutations.into_iter() {\n mutation_w_ctx\n .export_context\n .apply_mutation(mutation_w_ctx.mutation)\n .await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n for setup_change in setup_status.iter() {\n let qdrant_client =\n self.get_qdrant_client(&setup_change.key.connection, &context.auth_registry)?;\n setup_change\n .setup_status\n .apply_delete(&setup_change.key.collection_name, &qdrant_client)\n .await?;\n }\n for setup_change in setup_status.iter() {\n let qdrant_client =\n self.get_qdrant_client(&setup_change.key.connection, &context.auth_registry)?;\n setup_change\n .setup_status\n .apply_create(&setup_change.key.collection_name, &qdrant_client)\n .await?;\n }\n Ok(())\n }\n}\n\nimpl Factory {\n fn new() -> Self {\n Self {\n qdrant_clients: Mutex::new(HashMap::new()),\n }\n }\n\n fn get_qdrant_client(\n &self,\n auth_entry: &Option>,\n auth_registry: &AuthRegistry,\n ) -> Result> {\n let mut clients = self.qdrant_clients.lock().unwrap();\n if let Some(client) = clients.get(auth_entry) {\n return Ok(client.clone());\n }\n\n let spec = auth_entry.as_ref().map_or_else(\n || {\n Ok(ConnectionSpec {\n grpc_url: DEFAULT_URL.to_string(),\n api_key: None,\n })\n },\n |auth_entry| auth_registry.get(auth_entry),\n )?;\n let client = Arc::new(\n Qdrant::from_url(&spec.grpc_url)\n .api_key(spec.api_key)\n .skip_compatibility_check()\n .build()?,\n );\n clients.insert(auth_entry.clone(), client.clone());\n Ok(client)\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory::new().register(registry)\n}\n"], ["/cocoindex/src/ops/targets/kuzu.rs", "use chrono::TimeDelta;\nuse serde_json::json;\n\nuse std::fmt::Write;\n\nuse super::shared::property_graph::GraphElementMapping;\nuse super::shared::property_graph::*;\nuse super::shared::table_columns::{\n TableColumnsSchema, TableMainSetupAction, TableUpsertionAction, check_table_compatibility,\n};\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::prelude::*;\n\nuse crate::setup::SetupChangeType;\nuse crate::{ops::sdk::*, setup::CombinedState};\n\nconst SELF_CONTAINED_TAG_FIELD_NAME: &str = \"__self_contained\";\n\n////////////////////////////////////////////////////////////\n// Public Types\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n /// The URL of the [Kuzu API server](https://kuzu.com/docs/api/server/overview),\n /// e.g. `http://localhost:8000`.\n api_server_url: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n connection: spec::AuthEntryReference,\n mapping: GraphElementMapping,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Declaration {\n connection: spec::AuthEntryReference,\n #[serde(flatten)]\n decl: GraphDeclaration,\n}\n\n////////////////////////////////////////////////////////////\n// Utils to deal with Kuzu\n////////////////////////////////////////////////////////////\n\nstruct CypherBuilder {\n query: String,\n}\n\nimpl CypherBuilder {\n fn new() -> Self {\n Self {\n query: String::new(),\n }\n }\n\n fn query_mut(&mut self) -> &mut String {\n &mut self.query\n }\n}\n\nstruct KuzuThinClient {\n reqwest_client: reqwest::Client,\n query_url: String,\n}\n\nimpl KuzuThinClient {\n fn new(conn_spec: &ConnectionSpec, reqwest_client: reqwest::Client) -> Self {\n Self {\n reqwest_client,\n query_url: format!(\"{}/cypher\", conn_spec.api_server_url.trim_end_matches('/')),\n }\n }\n\n async fn run_cypher(&self, cyper_builder: CypherBuilder) -> Result<()> {\n if cyper_builder.query.is_empty() {\n return Ok(());\n }\n let query = json!({\n \"query\": cyper_builder.query\n });\n let response = self\n .reqwest_client\n .post(&self.query_url)\n .json(&query)\n .send()\n .await?;\n if !response.status().is_success() {\n return Err(anyhow::anyhow!(\n \"Failed to run cypher: {}\",\n response.text().await?\n ));\n }\n Ok(())\n }\n}\n\nfn kuzu_table_type(elem_type: &ElementType) -> &'static str {\n match elem_type {\n ElementType::Node(_) => \"NODE\",\n ElementType::Relationship(_) => \"REL\",\n }\n}\n\nfn basic_type_to_kuzu(basic_type: &BasicValueType) -> Result {\n Ok(match basic_type {\n BasicValueType::Bytes => \"BLOB\".to_string(),\n BasicValueType::Str => \"STRING\".to_string(),\n BasicValueType::Bool => \"BOOL\".to_string(),\n BasicValueType::Int64 => \"INT64\".to_string(),\n BasicValueType::Float32 => \"FLOAT\".to_string(),\n BasicValueType::Float64 => \"DOUBLE\".to_string(),\n BasicValueType::Range => \"UINT64[2]\".to_string(),\n BasicValueType::Uuid => \"UUID\".to_string(),\n BasicValueType::Date => \"DATE\".to_string(),\n BasicValueType::LocalDateTime => \"TIMESTAMP\".to_string(),\n BasicValueType::OffsetDateTime => \"TIMESTAMP\".to_string(),\n BasicValueType::TimeDelta => \"INTERVAL\".to_string(),\n BasicValueType::Vector(t) => format!(\n \"{}[{}]\",\n basic_type_to_kuzu(&t.element_type)?,\n t.dimension\n .map_or_else(|| \"\".to_string(), |d| d.to_string())\n ),\n t @ (BasicValueType::Union(_) | BasicValueType::Time | BasicValueType::Json) => {\n api_bail!(\"{t} is not supported in Kuzu\")\n }\n })\n}\n\nfn struct_schema_to_kuzu(struct_schema: &StructSchema) -> Result {\n Ok(format!(\n \"STRUCT({})\",\n struct_schema\n .fields\n .iter()\n .map(|f| Ok(format!(\n \"{} {}\",\n f.name,\n value_type_to_kuzu(&f.value_type.typ)?\n )))\n .collect::>>()?\n .join(\", \")\n ))\n}\n\nfn value_type_to_kuzu(value_type: &ValueType) -> Result {\n Ok(match value_type {\n ValueType::Basic(basic_type) => basic_type_to_kuzu(basic_type)?,\n ValueType::Struct(struct_type) => struct_schema_to_kuzu(struct_type)?,\n ValueType::Table(table_type) => format!(\"{}[]\", struct_schema_to_kuzu(&table_type.row)?),\n })\n}\n\n////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]\nstruct ReferencedNodeTable {\n table_name: String,\n\n #[serde(with = \"indexmap::map::serde_seq\")]\n key_columns: IndexMap,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\nstruct SetupState {\n schema: TableColumnsSchema,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n referenced_node_tables: Option<(ReferencedNodeTable, ReferencedNodeTable)>,\n}\n\nimpl<'a> From<&'a SetupState> for Cow<'a, TableColumnsSchema> {\n fn from(val: &'a SetupState) -> Self {\n Cow::Borrowed(&val.schema)\n }\n}\n\n#[derive(Debug)]\nstruct GraphElementDataSetupStatus {\n actions: TableMainSetupAction,\n referenced_node_tables: Option<(String, String)>,\n drop_affected_referenced_node_tables: IndexSet,\n}\n\nimpl setup::ResourceSetupStatus for GraphElementDataSetupStatus {\n fn describe_changes(&self) -> Vec {\n self.actions.describe_changes()\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.actions.change_type(false)\n }\n}\n\nfn append_drop_table(\n cypher: &mut CypherBuilder,\n setup_status: &GraphElementDataSetupStatus,\n elem_type: &ElementType,\n) -> Result<()> {\n if !setup_status.actions.drop_existing {\n return Ok(());\n }\n writeln!(\n cypher.query_mut(),\n \"DROP TABLE IF EXISTS {};\",\n elem_type.label()\n )?;\n Ok(())\n}\n\nfn append_delete_orphaned_nodes(cypher: &mut CypherBuilder, node_table: &str) -> Result<()> {\n writeln!(\n cypher.query_mut(),\n \"MATCH (n:{node_table}) WITH n WHERE NOT (n)--() DELETE n;\"\n )?;\n Ok(())\n}\n\nfn append_upsert_table(\n cypher: &mut CypherBuilder,\n setup_status: &GraphElementDataSetupStatus,\n elem_type: &ElementType,\n) -> Result<()> {\n let table_upsertion = if let Some(table_upsertion) = &setup_status.actions.table_upsertion {\n table_upsertion\n } else {\n return Ok(());\n };\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n write!(\n cypher.query_mut(),\n \"CREATE {kuzu_table_type} TABLE IF NOT EXISTS {table_name} (\",\n kuzu_table_type = kuzu_table_type(elem_type),\n table_name = elem_type.label(),\n )?;\n if let Some((src, tgt)) = &setup_status.referenced_node_tables {\n write!(cypher.query_mut(), \"FROM {src} TO {tgt}, \")?;\n }\n cypher.query_mut().push_str(\n keys.iter()\n .chain(values.iter())\n .map(|(name, kuzu_type)| format!(\"{name} {kuzu_type}\"))\n .join(\", \")\n .as_str(),\n );\n match elem_type {\n ElementType::Node(_) => {\n write!(\n cypher.query_mut(),\n \", {SELF_CONTAINED_TAG_FIELD_NAME} BOOL, PRIMARY KEY ({})\",\n keys.iter().map(|(name, _)| name).join(\", \")\n )?;\n }\n ElementType::Relationship(_) => {}\n }\n write!(cypher.query_mut(), \");\\n\\n\")?;\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n let table_name = elem_type.label();\n for name in columns_to_delete\n .iter()\n .chain(columns_to_upsert.iter().map(|(name, _)| name))\n {\n writeln!(\n cypher.query_mut(),\n \"ALTER TABLE {table_name} DROP IF EXISTS {name};\"\n )?;\n }\n for (name, kuzu_type) in columns_to_upsert.iter() {\n writeln!(\n cypher.query_mut(),\n \"ALTER TABLE {table_name} ADD {name} {kuzu_type};\",\n )?;\n }\n }\n }\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Utils to convert value to Kuzu literals\n////////////////////////////////////////////////////////////\n\nfn append_string_literal(cypher: &mut CypherBuilder, s: &str) -> Result<()> {\n let out = cypher.query_mut();\n out.push('\"');\n for c in s.chars() {\n match c {\n '\\\\' => out.push_str(\"\\\\\\\\\"),\n '\"' => out.push_str(\"\\\\\\\"\"),\n // Control characters (0x00..=0x1F)\n c if (c as u32) < 0x20 => write!(out, \"\\\\u{:04X}\", c as u32)?,\n // BMP Unicode\n c if (c as u32) <= 0xFFFF => out.push(c),\n // Non-BMP Unicode: Encode as surrogate pairs for Cypher \\uXXXX\\uXXXX\n c => {\n let code = c as u32;\n let high = 0xD800 + ((code - 0x10000) >> 10);\n let low = 0xDC00 + ((code - 0x10000) & 0x3FF);\n write!(out, \"\\\\u{high:04X}\\\\u{low:04X}\")?;\n }\n }\n }\n out.push('\"');\n Ok(())\n}\n\nfn append_basic_value(cypher: &mut CypherBuilder, basic_value: &BasicValue) -> Result<()> {\n match basic_value {\n BasicValue::Bytes(bytes) => {\n write!(cypher.query_mut(), \"BLOB(\")?;\n for byte in bytes {\n write!(cypher.query_mut(), \"\\\\\\\\x{byte:02X}\")?;\n }\n write!(cypher.query_mut(), \")\")?;\n }\n BasicValue::Str(s) => {\n append_string_literal(cypher, s)?;\n }\n BasicValue::Bool(b) => {\n write!(cypher.query_mut(), \"{b}\")?;\n }\n BasicValue::Int64(i) => {\n write!(cypher.query_mut(), \"{i}\")?;\n }\n BasicValue::Float32(f) => {\n write!(cypher.query_mut(), \"{f}\")?;\n }\n BasicValue::Float64(f) => {\n write!(cypher.query_mut(), \"{f}\")?;\n }\n BasicValue::Range(r) => {\n write!(cypher.query_mut(), \"[{}, {}]\", r.start, r.end)?;\n }\n BasicValue::Uuid(u) => {\n write!(cypher.query_mut(), \"UUID(\\\"{u}\\\")\")?;\n }\n BasicValue::Date(d) => {\n write!(cypher.query_mut(), \"DATE(\\\"{d}\\\")\")?;\n }\n BasicValue::LocalDateTime(dt) => write!(cypher.query_mut(), \"TIMESTAMP(\\\"{dt}\\\")\")?,\n BasicValue::OffsetDateTime(dt) => write!(cypher.query_mut(), \"TIMESTAMP(\\\"{dt}\\\")\")?,\n BasicValue::TimeDelta(td) => {\n let num_days = td.num_days();\n let sub_day_duration = *td - TimeDelta::days(num_days);\n write!(cypher.query_mut(), \"INTERVAL(\\\"\")?;\n if num_days != 0 {\n write!(cypher.query_mut(), \"{num_days} days \")?;\n }\n let microseconds = sub_day_duration\n .num_microseconds()\n .ok_or_else(invariance_violation)?;\n write!(cypher.query_mut(), \"{microseconds} microseconds\\\")\")?;\n }\n BasicValue::Vector(v) => {\n write!(cypher.query_mut(), \"[\")?;\n let mut prefix = \"\";\n for elem in v.iter() {\n cypher.query_mut().push_str(prefix);\n append_basic_value(cypher, elem)?;\n prefix = \", \";\n }\n write!(cypher.query_mut(), \"]\")?;\n }\n v @ (BasicValue::UnionVariant { .. } | BasicValue::Time(_) | BasicValue::Json(_)) => {\n bail!(\"value types are not supported in Kuzu: {}\", v.kind());\n }\n }\n Ok(())\n}\n\nfn append_struct_fields<'a>(\n cypher: &'a mut CypherBuilder,\n field_schema: &[schema::FieldSchema],\n field_values: impl Iterator,\n) -> Result<()> {\n let mut prefix = \"\";\n for (f, v) in std::iter::zip(field_schema.iter(), field_values) {\n write!(cypher.query_mut(), \"{prefix}{}: \", f.name)?;\n append_value(cypher, &f.value_type.typ, v)?;\n prefix = \", \";\n }\n Ok(())\n}\n\nfn append_value(\n cypher: &mut CypherBuilder,\n typ: &schema::ValueType,\n value: &value::Value,\n) -> Result<()> {\n match value {\n value::Value::Null => {\n write!(cypher.query_mut(), \"NULL\")?;\n }\n value::Value::Basic(basic_value) => append_basic_value(cypher, basic_value)?,\n value::Value::Struct(struct_value) => {\n let struct_schema = match typ {\n schema::ValueType::Struct(struct_schema) => struct_schema,\n _ => {\n api_bail!(\"Expected struct type, got {}\", typ);\n }\n };\n cypher.query_mut().push('{');\n append_struct_fields(cypher, &struct_schema.fields, struct_value.fields.iter())?;\n cypher.query_mut().push('}');\n }\n value::Value::KTable(map) => {\n let row_schema = match typ {\n schema::ValueType::Table(table_schema) => &table_schema.row,\n _ => {\n api_bail!(\"Expected table type, got {}\", typ);\n }\n };\n cypher.query_mut().push('[');\n let mut prefix = \"\";\n for (k, v) in map.iter() {\n let key_value = value::Value::from(k);\n cypher.query_mut().push_str(prefix);\n cypher.query_mut().push('{');\n append_struct_fields(\n cypher,\n &row_schema.fields,\n std::iter::once(&key_value).chain(v.fields.iter()),\n )?;\n cypher.query_mut().push('}');\n prefix = \", \";\n }\n cypher.query_mut().push(']');\n }\n value::Value::LTable(rows) | value::Value::UTable(rows) => {\n let row_schema = match typ {\n schema::ValueType::Table(table_schema) => &table_schema.row,\n _ => {\n api_bail!(\"Expected table type, got {}\", typ);\n }\n };\n cypher.query_mut().push('[');\n let mut prefix = \"\";\n for v in rows.iter() {\n cypher.query_mut().push_str(prefix);\n cypher.query_mut().push('{');\n append_struct_fields(cypher, &row_schema.fields, v.fields.iter())?;\n cypher.query_mut().push('}');\n prefix = \", \";\n }\n cypher.query_mut().push(']');\n }\n }\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Deal with mutations\n////////////////////////////////////////////////////////////\n\nstruct ExportContext {\n conn_ref: AuthEntryReference,\n kuzu_client: KuzuThinClient,\n analyzed_data_coll: AnalyzedDataCollection,\n}\n\nfn append_key_pattern<'a>(\n cypher: &'a mut CypherBuilder,\n key_fields: &'a [FieldSchema],\n values: impl Iterator>,\n) -> Result<()> {\n write!(cypher.query_mut(), \"{{\")?;\n let mut prefix = \"\";\n for (f, v) in std::iter::zip(key_fields.iter(), values) {\n write!(cypher.query_mut(), \"{prefix}{}: \", f.name)?;\n append_value(cypher, &f.value_type.typ, v.as_ref())?;\n prefix = \", \";\n }\n write!(cypher.query_mut(), \"}}\")?;\n Ok(())\n}\n\nfn append_set_value_fields(\n cypher: &mut CypherBuilder,\n var_name: &str,\n value_fields: &[FieldSchema],\n value_fields_idx: &[usize],\n upsert_entry: &ExportTargetUpsertEntry,\n set_self_contained_tag: bool,\n) -> Result<()> {\n let mut prefix = \" SET \";\n if set_self_contained_tag {\n write!(\n cypher.query_mut(),\n \"{prefix}{var_name}.{SELF_CONTAINED_TAG_FIELD_NAME} = TRUE\"\n )?;\n prefix = \", \";\n }\n for (value_field, value_idx) in std::iter::zip(value_fields.iter(), value_fields_idx.iter()) {\n let field_name = &value_field.name;\n write!(cypher.query_mut(), \"{prefix}{var_name}.{field_name}=\")?;\n append_value(\n cypher,\n &value_field.value_type.typ,\n &upsert_entry.value.fields[*value_idx],\n )?;\n prefix = \", \";\n }\n Ok(())\n}\n\nfn append_upsert_node(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n {\n write!(\n cypher.query_mut(),\n \"MERGE ({NODE_VAR_NAME}:{label} \",\n label = data_coll.schema.elem_type.label(),\n )?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n upsert_entry\n .key\n .fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n write!(cypher.query_mut(), \")\")?;\n }\n append_set_value_fields(\n cypher,\n NODE_VAR_NAME,\n &data_coll.schema.value_fields,\n &data_coll.value_fields_input_idx,\n upsert_entry,\n true,\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_merge_node_for_rel(\n cypher: &mut CypherBuilder,\n var_name: &str,\n field_mapping: &AnalyzedGraphElementFieldMapping,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n {\n write!(\n cypher.query_mut(),\n \"MERGE ({var_name}:{label} \",\n label = field_mapping.schema.elem_type.label(),\n )?;\n append_key_pattern(\n cypher,\n &field_mapping.schema.key_fields,\n field_mapping\n .fields_input_idx\n .key\n .iter()\n .map(|idx| Cow::Borrowed(&upsert_entry.value.fields[*idx])),\n )?;\n write!(cypher.query_mut(), \")\")?;\n }\n append_set_value_fields(\n cypher,\n var_name,\n &field_mapping.schema.value_fields,\n &field_mapping.fields_input_idx.value,\n upsert_entry,\n false,\n )?;\n writeln!(cypher.query_mut())?;\n Ok(())\n}\n\nfn append_upsert_rel(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n const REL_VAR_NAME: &str = \"r\";\n const SRC_NODE_VAR_NAME: &str = \"s\";\n const TGT_NODE_VAR_NAME: &str = \"t\";\n\n let rel_info = if let Some(rel_info) = &data_coll.rel {\n rel_info\n } else {\n return Ok(());\n };\n append_merge_node_for_rel(cypher, SRC_NODE_VAR_NAME, &rel_info.source, upsert_entry)?;\n append_merge_node_for_rel(cypher, TGT_NODE_VAR_NAME, &rel_info.target, upsert_entry)?;\n {\n let rel_type = data_coll.schema.elem_type.label();\n write!(\n cypher.query_mut(),\n \"MERGE ({SRC_NODE_VAR_NAME})-[{REL_VAR_NAME}:{rel_type} \"\n )?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n upsert_entry\n .key\n .fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n write!(cypher.query_mut(), \"]->({TGT_NODE_VAR_NAME})\")?;\n }\n append_set_value_fields(\n cypher,\n REL_VAR_NAME,\n &data_coll.schema.value_fields,\n &data_coll.value_fields_input_idx,\n upsert_entry,\n false,\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_delete_node(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n key: &KeyValue,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n let node_label = data_coll.schema.elem_type.label();\n write!(cypher.query_mut(), \"MATCH ({NODE_VAR_NAME}:{node_label} \")?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n key.fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n writeln!(cypher.query_mut(), \")\")?;\n writeln!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} SET {NODE_VAR_NAME}.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\"\n )?;\n writeln!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} WHERE NOT ({NODE_VAR_NAME})--() DELETE {NODE_VAR_NAME}\"\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_delete_rel(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n key: &KeyValue,\n src_node_key: &KeyValue,\n tgt_node_key: &KeyValue,\n) -> Result<()> {\n const REL_VAR_NAME: &str = \"r\";\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n let rel_type = data_coll.schema.elem_type.label();\n\n write!(\n cypher.query_mut(),\n \"MATCH (:{label} \",\n label = rel.source.schema.elem_type.label()\n )?;\n let src_key_schema = &rel.source.schema.key_fields;\n append_key_pattern(\n cypher,\n src_key_schema,\n src_node_key\n .fields_iter(src_key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n\n write!(cypher.query_mut(), \")-[{REL_VAR_NAME}:{rel_type} \")?;\n let key_schema = &data_coll.schema.key_fields;\n append_key_pattern(\n cypher,\n key_schema,\n key.fields_iter(key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n\n write!(\n cypher.query_mut(),\n \"]->(:{label} \",\n label = rel.target.schema.elem_type.label()\n )?;\n let tgt_key_schema = &rel.target.schema.key_fields;\n append_key_pattern(\n cypher,\n tgt_key_schema,\n tgt_node_key\n .fields_iter(tgt_key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n write!(cypher.query_mut(), \") DELETE {REL_VAR_NAME}\")?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_maybe_gc_node(\n cypher: &mut CypherBuilder,\n schema: &GraphElementSchema,\n key: &KeyValue,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n let node_label = schema.elem_type.label();\n write!(cypher.query_mut(), \"MATCH ({NODE_VAR_NAME}:{node_label} \")?;\n append_key_pattern(\n cypher,\n &schema.key_fields,\n key.fields_iter(schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n writeln!(cypher.query_mut(), \")\")?;\n write!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} WHERE NOT ({NODE_VAR_NAME})--() DELETE {NODE_VAR_NAME}\"\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Factory implementation\n////////////////////////////////////////////////////////////\n\ntype KuzuGraphElement = GraphElementType;\n\nstruct Factory {\n reqwest_client: reqwest::Client,\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = Declaration;\n type SetupState = SetupState;\n type SetupStatus = GraphElementDataSetupStatus;\n\n type Key = KuzuGraphElement;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Kuzu\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(KuzuGraphElement, SetupState)>,\n )> {\n let (analyzed_data_colls, declared_graph_elements) = analyze_graph_mappings(\n data_collections\n .iter()\n .map(|d| DataCollectionGraphMappingInput {\n auth_ref: &d.spec.connection,\n mapping: &d.spec.mapping,\n index_options: &d.index_options,\n key_fields_schema: d.key_fields_schema.clone(),\n value_fields_schema: d.value_fields_schema.clone(),\n }),\n declarations.iter().map(|d| (&d.connection, &d.decl)),\n )?;\n fn to_kuzu_cols(fields: &[FieldSchema]) -> Result> {\n fields\n .iter()\n .map(|f| Ok((f.name.clone(), value_type_to_kuzu(&f.value_type.typ)?)))\n .collect::>>()\n }\n let data_coll_outputs: Vec> =\n std::iter::zip(data_collections, analyzed_data_colls.into_iter())\n .map(|(data_coll, analyzed)| {\n fn to_dep_table(\n field_mapping: &AnalyzedGraphElementFieldMapping,\n ) -> Result {\n Ok(ReferencedNodeTable {\n table_name: field_mapping.schema.elem_type.label().to_string(),\n key_columns: to_kuzu_cols(&field_mapping.schema.key_fields)?,\n })\n }\n let setup_key = KuzuGraphElement {\n connection: data_coll.spec.connection.clone(),\n typ: analyzed.schema.elem_type.clone(),\n };\n let desired_setup_state = SetupState {\n schema: TableColumnsSchema {\n key_columns: to_kuzu_cols(&analyzed.schema.key_fields)?,\n value_columns: to_kuzu_cols(&analyzed.schema.value_fields)?,\n },\n referenced_node_tables: (analyzed.rel.as_ref())\n .map(|rel| {\n anyhow::Ok((to_dep_table(&rel.source)?, to_dep_table(&rel.target)?))\n })\n .transpose()?,\n };\n\n let export_context = ExportContext {\n conn_ref: data_coll.spec.connection.clone(),\n kuzu_client: KuzuThinClient::new(\n &context\n .auth_registry\n .get::(&data_coll.spec.connection)?,\n self.reqwest_client.clone(),\n ),\n analyzed_data_coll: analyzed,\n };\n Ok(TypedExportDataCollectionBuildOutput {\n export_context: async move { Ok(Arc::new(export_context)) }.boxed(),\n setup_key,\n desired_setup_state,\n })\n })\n .collect::>()?;\n let decl_output = std::iter::zip(declarations, declared_graph_elements)\n .map(|(decl, graph_elem_schema)| {\n let setup_state = SetupState {\n schema: TableColumnsSchema {\n key_columns: to_kuzu_cols(&graph_elem_schema.key_fields)?,\n value_columns: to_kuzu_cols(&graph_elem_schema.value_fields)?,\n },\n referenced_node_tables: None,\n };\n let setup_key = GraphElementType {\n connection: decl.connection,\n typ: graph_elem_schema.elem_type.clone(),\n };\n Ok((setup_key, setup_state))\n })\n .collect::>()?;\n Ok((data_coll_outputs, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n _key: KuzuGraphElement,\n desired: Option,\n existing: CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n let existing_invalidated = desired.as_ref().is_some_and(|desired| {\n existing\n .possible_versions()\n .any(|v| v.referenced_node_tables != desired.referenced_node_tables)\n });\n let actions =\n TableMainSetupAction::from_states(desired.as_ref(), &existing, existing_invalidated);\n let drop_affected_referenced_node_tables = if actions.drop_existing {\n existing\n .possible_versions()\n .flat_map(|v| &v.referenced_node_tables)\n .flat_map(|(src, tgt)| [src.table_name.clone(), tgt.table_name.clone()].into_iter())\n .collect()\n } else {\n IndexSet::new()\n };\n Ok(GraphElementDataSetupStatus {\n actions,\n referenced_node_tables: desired\n .and_then(|desired| desired.referenced_node_tables)\n .map(|(src, tgt)| (src.table_name, tgt.table_name)),\n drop_affected_referenced_node_tables,\n })\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(\n if desired.referenced_node_tables != existing.referenced_node_tables {\n SetupStateCompatibility::NotCompatible\n } else {\n check_table_compatibility(&desired.schema, &existing.schema)\n },\n )\n }\n\n fn describe_resource(&self, key: &KuzuGraphElement) -> Result {\n Ok(format!(\n \"Kuzu {} TABLE {}\",\n kuzu_table_type(&key.typ),\n key.typ.label()\n ))\n }\n\n fn extract_additional_key(\n &self,\n _key: &KeyValue,\n value: &FieldValues,\n export_context: &ExportContext,\n ) -> Result {\n let additional_key = if let Some(rel_info) = &export_context.analyzed_data_coll.rel {\n serde_json::to_value((\n (rel_info.source.fields_input_idx).extract_key(&value.fields)?,\n (rel_info.target.fields_input_idx).extract_key(&value.fields)?,\n ))?\n } else {\n serde_json::Value::Null\n };\n Ok(additional_key)\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut mutations_by_conn = IndexMap::new();\n for mutation in mutations.into_iter() {\n mutations_by_conn\n .entry(mutation.export_context.conn_ref.clone())\n .or_insert_with(Vec::new)\n .push(mutation);\n }\n for mutations in mutations_by_conn.into_values() {\n let kuzu_client = &mutations[0].export_context.kuzu_client;\n let mut cypher = CypherBuilder::new();\n writeln!(cypher.query_mut(), \"BEGIN TRANSACTION;\")?;\n\n let (mut rel_mutations, nodes_mutations): (Vec<_>, Vec<_>) = mutations\n .into_iter()\n .partition(|m| m.export_context.analyzed_data_coll.rel.is_some());\n\n struct NodeTableGcInfo {\n schema: Arc,\n keys: IndexSet,\n }\n fn register_gc_node(\n map: &mut IndexMap,\n schema: &Arc,\n key: KeyValue,\n ) {\n map.entry(schema.elem_type.clone())\n .or_insert_with(|| NodeTableGcInfo {\n schema: schema.clone(),\n keys: IndexSet::new(),\n })\n .keys\n .insert(key);\n }\n fn resolve_gc_node(\n map: &mut IndexMap,\n schema: &Arc,\n key: &KeyValue,\n ) {\n map.get_mut(&schema.elem_type)\n .map(|info| info.keys.shift_remove(key));\n }\n let mut gc_info = IndexMap::::new();\n\n // Deletes for relationships\n for rel_mutation in rel_mutations.iter_mut() {\n let data_coll = &rel_mutation.export_context.analyzed_data_coll;\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n for delete in rel_mutation.mutation.deletes.iter_mut() {\n let mut additional_keys = match delete.additional_key.take() {\n serde_json::Value::Array(keys) => keys,\n _ => return Err(invariance_violation()),\n };\n if additional_keys.len() != 2 {\n api_bail!(\n \"Expected additional key with 2 fields, got {}\",\n delete.additional_key\n );\n }\n let src_key = KeyValue::from_json(\n additional_keys[0].take(),\n &rel.source.schema.key_fields,\n )?;\n let tgt_key = KeyValue::from_json(\n additional_keys[1].take(),\n &rel.target.schema.key_fields,\n )?;\n append_delete_rel(&mut cypher, data_coll, &delete.key, &src_key, &tgt_key)?;\n register_gc_node(&mut gc_info, &rel.source.schema, src_key);\n register_gc_node(&mut gc_info, &rel.target.schema, tgt_key);\n }\n }\n\n for node_mutation in nodes_mutations.iter() {\n let data_coll = &node_mutation.export_context.analyzed_data_coll;\n // Deletes for nodes\n for delete in node_mutation.mutation.deletes.iter() {\n append_delete_node(&mut cypher, data_coll, &delete.key)?;\n resolve_gc_node(&mut gc_info, &data_coll.schema, &delete.key);\n }\n\n // Upserts for nodes\n for upsert in node_mutation.mutation.upserts.iter() {\n append_upsert_node(&mut cypher, data_coll, upsert)?;\n resolve_gc_node(&mut gc_info, &data_coll.schema, &upsert.key);\n }\n }\n // Upserts for relationships\n for rel_mutation in rel_mutations.iter() {\n let data_coll = &rel_mutation.export_context.analyzed_data_coll;\n for upsert in rel_mutation.mutation.upserts.iter() {\n append_upsert_rel(&mut cypher, data_coll, upsert)?;\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n resolve_gc_node(\n &mut gc_info,\n &rel.source.schema,\n &(rel.source.fields_input_idx).extract_key(&upsert.value.fields)?,\n );\n resolve_gc_node(\n &mut gc_info,\n &rel.target.schema,\n &(rel.target.fields_input_idx).extract_key(&upsert.value.fields)?,\n );\n }\n }\n\n // GC orphaned nodes\n for info in gc_info.into_values() {\n for key in info.keys {\n append_maybe_gc_node(&mut cypher, &info.schema, &key)?;\n }\n }\n\n writeln!(cypher.query_mut(), \"COMMIT;\")?;\n kuzu_client.run_cypher(cypher).await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n let mut changes_by_conn = IndexMap::new();\n for change in changes.into_iter() {\n changes_by_conn\n .entry(change.key.connection.clone())\n .or_insert_with(Vec::new)\n .push(change);\n }\n for (conn, changes) in changes_by_conn.into_iter() {\n let conn_spec = context.auth_registry.get::(&conn)?;\n let kuzu_client = KuzuThinClient::new(&conn_spec, self.reqwest_client.clone());\n\n let (node_changes, rel_changes): (Vec<_>, Vec<_>) =\n changes.into_iter().partition(|c| match &c.key.typ {\n ElementType::Node(_) => true,\n ElementType::Relationship(_) => false,\n });\n\n let mut partial_affected_node_tables = IndexSet::new();\n let mut cypher = CypherBuilder::new();\n // Relationships first when dropping.\n for change in rel_changes.iter().chain(node_changes.iter()) {\n if !change.setup_status.actions.drop_existing {\n continue;\n }\n append_drop_table(&mut cypher, change.setup_status, &change.key.typ)?;\n\n partial_affected_node_tables.extend(\n change\n .setup_status\n .drop_affected_referenced_node_tables\n .iter(),\n );\n if let ElementType::Node(label) = &change.key.typ {\n partial_affected_node_tables.swap_remove(label);\n }\n }\n // Nodes first when creating.\n for change in node_changes.iter().chain(rel_changes.iter()) {\n append_upsert_table(&mut cypher, change.setup_status, &change.key.typ)?;\n }\n\n for table in partial_affected_node_tables {\n append_delete_orphaned_nodes(&mut cypher, table)?;\n }\n\n kuzu_client.run_cypher(cypher).await?;\n }\n Ok(())\n }\n}\n\npub fn register(\n registry: &mut ExecutorFactoryRegistry,\n reqwest_client: reqwest::Client,\n) -> Result<()> {\n Factory { reqwest_client }.register(registry)\n}\n"], ["/cocoindex/src/ops/factory_bases.rs", "use crate::prelude::*;\nuse crate::setup::ResourceSetupStatus;\nuse std::fmt::Debug;\nuse std::hash::Hash;\n\nuse super::interface::*;\nuse super::registry::*;\nuse crate::api_bail;\nuse crate::api_error;\nuse crate::base::schema::*;\nuse crate::base::spec::*;\nuse crate::builder::plan::AnalyzedValueMapping;\nuse crate::setup;\n// SourceFactoryBase\npub struct ResolvedOpArg {\n pub name: String,\n pub typ: EnrichedValueType,\n pub idx: usize,\n}\n\npub trait ResolvedOpArgExt: Sized {\n fn expect_type(self, expected_type: &ValueType) -> Result;\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value>;\n fn take_value(&self, args: &mut [value::Value]) -> Result;\n}\n\nimpl ResolvedOpArgExt for ResolvedOpArg {\n fn expect_type(self, expected_type: &ValueType) -> Result {\n if &self.typ.typ != expected_type {\n api_bail!(\n \"Expected argument `{}` to be of type `{}`, got `{}`\",\n self.name,\n expected_type,\n self.typ.typ\n );\n }\n Ok(self)\n }\n\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value> {\n if self.idx >= args.len() {\n api_bail!(\n \"Two few arguments, {} provided, expected at least {} for `{}`\",\n args.len(),\n self.idx + 1,\n self.name\n );\n }\n Ok(&args[self.idx])\n }\n\n fn take_value(&self, args: &mut [value::Value]) -> Result {\n if self.idx >= args.len() {\n api_bail!(\n \"Two few arguments, {} provided, expected at least {} for `{}`\",\n args.len(),\n self.idx + 1,\n self.name\n );\n }\n Ok(std::mem::take(&mut args[self.idx]))\n }\n}\n\nimpl ResolvedOpArgExt for Option {\n fn expect_type(self, expected_type: &ValueType) -> Result {\n self.map(|arg| arg.expect_type(expected_type)).transpose()\n }\n\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value> {\n Ok(self\n .as_ref()\n .map(|arg| arg.value(args))\n .transpose()?\n .unwrap_or(&value::Value::Null))\n }\n\n fn take_value(&self, args: &mut [value::Value]) -> Result {\n Ok(self\n .as_ref()\n .map(|arg| arg.take_value(args))\n .transpose()?\n .unwrap_or(value::Value::Null))\n }\n}\n\npub struct OpArgsResolver<'a> {\n args: &'a [OpArgSchema],\n num_positional_args: usize,\n next_positional_idx: usize,\n remaining_kwargs: HashMap<&'a str, usize>,\n}\n\nimpl<'a> OpArgsResolver<'a> {\n pub fn new(args: &'a [OpArgSchema]) -> Result {\n let mut num_positional_args = 0;\n let mut kwargs = HashMap::new();\n for (idx, arg) in args.iter().enumerate() {\n if let Some(name) = &arg.name.0 {\n kwargs.insert(name.as_str(), idx);\n } else {\n if !kwargs.is_empty() {\n api_bail!(\"Positional arguments must be provided before keyword arguments\");\n }\n num_positional_args += 1;\n }\n }\n Ok(Self {\n args,\n num_positional_args,\n next_positional_idx: 0,\n remaining_kwargs: kwargs,\n })\n }\n\n pub fn next_optional_arg(&mut self, name: &str) -> Result> {\n let idx = if let Some(idx) = self.remaining_kwargs.remove(name) {\n if self.next_positional_idx < self.num_positional_args {\n api_bail!(\"`{name}` is provided as both positional and keyword arguments\");\n } else {\n Some(idx)\n }\n } else if self.next_positional_idx < self.num_positional_args {\n let idx = self.next_positional_idx;\n self.next_positional_idx += 1;\n Some(idx)\n } else {\n None\n };\n Ok(idx.map(|idx| ResolvedOpArg {\n name: name.to_string(),\n typ: self.args[idx].value_type.clone(),\n idx,\n }))\n }\n\n pub fn next_arg(&mut self, name: &str) -> Result {\n Ok(self\n .next_optional_arg(name)?\n .ok_or_else(|| api_error!(\"Required argument `{name}` is missing\",))?)\n }\n\n pub fn done(self) -> Result<()> {\n if self.next_positional_idx < self.num_positional_args {\n api_bail!(\n \"Expected {} positional arguments, got {}\",\n self.next_positional_idx,\n self.num_positional_args\n );\n }\n if !self.remaining_kwargs.is_empty() {\n api_bail!(\n \"Unexpected keyword arguments: {}\",\n self.remaining_kwargs\n .keys()\n .map(|k| format!(\"`{k}`\"))\n .collect::>()\n .join(\", \")\n )\n }\n Ok(())\n }\n\n pub fn get_analyze_value(&self, resolved_arg: &ResolvedOpArg) -> &AnalyzedValueMapping {\n &self.args[resolved_arg.idx].analyzed_value\n }\n}\n\n#[async_trait]\npub trait SourceFactoryBase: SourceFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n\n fn name(&self) -> &str;\n\n async fn get_output_schema(\n &self,\n spec: &Self::Spec,\n context: &FlowInstanceContext,\n ) -> Result;\n\n async fn build_executor(\n self: Arc,\n spec: Self::Spec,\n context: Arc,\n ) -> Result>;\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::Source(Arc::new(self)),\n )\n }\n}\n\n#[async_trait]\nimpl SourceFactory for T {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let spec: T::Spec = serde_json::from_value(spec)?;\n let output_schema = self.get_output_schema(&spec, &context).await?;\n let executor = self.build_executor(spec, context);\n Ok((output_schema, executor))\n }\n}\n\n// SimpleFunctionFactoryBase\n\n#[async_trait]\npub trait SimpleFunctionFactoryBase: SimpleFunctionFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n type ResolvedArgs: Send + Sync;\n\n fn name(&self) -> &str;\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Self::Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n context: &FlowInstanceContext,\n ) -> Result<(Self::ResolvedArgs, EnrichedValueType)>;\n\n async fn build_executor(\n self: Arc,\n spec: Self::Spec,\n resolved_input_schema: Self::ResolvedArgs,\n context: Arc,\n ) -> Result>;\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::SimpleFunction(Arc::new(self)),\n )\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionFactory for T {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let spec: T::Spec = serde_json::from_value(spec)?;\n let mut args_resolver = OpArgsResolver::new(&input_schema)?;\n let (resolved_input_schema, output_schema) = self\n .resolve_schema(&spec, &mut args_resolver, &context)\n .await?;\n args_resolver.done()?;\n let executor = self.build_executor(spec, resolved_input_schema, context);\n Ok((output_schema, executor))\n }\n}\n\npub struct TypedExportDataCollectionBuildOutput {\n pub export_context: BoxFuture<'static, Result>>,\n pub setup_key: F::Key,\n pub desired_setup_state: F::SetupState,\n}\npub struct TypedExportDataCollectionSpec {\n pub name: String,\n pub spec: F::Spec,\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n pub index_options: IndexOptions,\n}\n\npub struct TypedResourceSetupChangeItem<'a, F: StorageFactoryBase + ?Sized> {\n pub key: F::Key,\n pub setup_status: &'a F::SetupStatus,\n}\n\n#[async_trait]\npub trait StorageFactoryBase: ExportTargetFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n type DeclarationSpec: DeserializeOwned + Send + Sync;\n type Key: Debug + Clone + Serialize + DeserializeOwned + Eq + Hash + Send + Sync;\n type SetupState: Debug + Clone + Serialize + DeserializeOwned + Send + Sync;\n type SetupStatus: ResourceSetupStatus;\n type ExportContext: Send + Sync + 'static;\n\n fn name(&self) -> &str;\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(Self::Key, Self::SetupState)>,\n )>;\n\n /// Deserialize the setup key from a JSON value.\n /// You can override this method to provide a custom deserialization logic, e.g. to perform backward compatible deserialization.\n fn deserialize_setup_key(key: serde_json::Value) -> Result {\n Ok(serde_json::from_value(key)?)\n }\n\n /// Will not be called if it's setup by user.\n /// It returns an error if the target only supports setup by user.\n async fn check_setup_status(\n &self,\n key: Self::Key,\n desired_state: Option,\n existing_states: setup::CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result;\n\n fn check_state_compatibility(\n &self,\n desired_state: &Self::SetupState,\n existing_state: &Self::SetupState,\n ) -> Result;\n\n fn describe_resource(&self, key: &Self::Key) -> Result;\n\n fn extract_additional_key(\n &self,\n _key: &value::KeyValue,\n _value: &value::FieldValues,\n _export_context: &Self::ExportContext,\n ) -> Result {\n Ok(serde_json::Value::Null)\n }\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::ExportTarget(Arc::new(self)),\n )\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()>;\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()>;\n}\n\n#[async_trait]\nimpl ExportTargetFactory for T {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )> {\n let (data_coll_output, decl_output) = StorageFactoryBase::build(\n self,\n data_collections\n .into_iter()\n .map(|d| {\n anyhow::Ok(TypedExportDataCollectionSpec {\n name: d.name,\n spec: serde_json::from_value(d.spec)?,\n key_fields_schema: d.key_fields_schema,\n value_fields_schema: d.value_fields_schema,\n index_options: d.index_options,\n })\n })\n .collect::>>()?,\n declarations\n .into_iter()\n .map(|d| anyhow::Ok(serde_json::from_value(d)?))\n .collect::>>()?,\n context,\n )\n .await?;\n\n let data_coll_output = data_coll_output\n .into_iter()\n .map(|d| {\n Ok(interface::ExportDataCollectionBuildOutput {\n export_context: async move {\n Ok(d.export_context.await? as Arc)\n }\n .boxed(),\n setup_key: serde_json::to_value(d.setup_key)?,\n desired_setup_state: serde_json::to_value(d.desired_setup_state)?,\n })\n })\n .collect::>>()?;\n let decl_output = decl_output\n .into_iter()\n .map(|(key, state)| Ok((serde_json::to_value(key)?, serde_json::to_value(state)?)))\n .collect::>>()?;\n Ok((data_coll_output, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result> {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n let desired_state: Option = desired_state\n .map(|v| serde_json::from_value(v.clone()))\n .transpose()?;\n let existing_states = from_json_combined_state(existing_states)?;\n let setup_status = StorageFactoryBase::check_setup_status(\n self,\n key,\n desired_state,\n existing_states,\n flow_instance_ctx,\n )\n .await?;\n Ok(Box::new(setup_status))\n }\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n StorageFactoryBase::describe_resource(self, &key)\n }\n\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n Ok(serde_json::to_value(key)?)\n }\n\n fn check_state_compatibility(\n &self,\n desired_state: &serde_json::Value,\n existing_state: &serde_json::Value,\n ) -> Result {\n let result = StorageFactoryBase::check_state_compatibility(\n self,\n &serde_json::from_value(desired_state.clone())?,\n &serde_json::from_value(existing_state.clone())?,\n )?;\n Ok(result)\n }\n\n fn extract_additional_key(\n &self,\n key: &value::KeyValue,\n value: &value::FieldValues,\n export_context: &(dyn Any + Send + Sync),\n ) -> Result {\n StorageFactoryBase::extract_additional_key(\n self,\n key,\n value,\n export_context\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n )\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mutations = mutations\n .into_iter()\n .map(|m| {\n anyhow::Ok(ExportTargetMutationWithContext {\n mutation: m.mutation,\n export_context: m\n .export_context\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n })\n })\n .collect::>()?;\n StorageFactoryBase::apply_mutation(self, mutations).await\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n StorageFactoryBase::apply_setup_changes(\n self,\n setup_status\n .into_iter()\n .map(|item| -> anyhow::Result<_> {\n Ok(TypedResourceSetupChangeItem {\n key: serde_json::from_value(item.key.clone())?,\n setup_status: (item.setup_status as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n })\n })\n .collect::>>()?,\n context,\n )\n .await\n }\n}\nfn from_json_combined_state(\n existing_states: setup::CombinedState,\n) -> Result> {\n Ok(setup::CombinedState {\n current: existing_states\n .current\n .map(|v| serde_json::from_value(v))\n .transpose()?,\n staging: existing_states\n .staging\n .into_iter()\n .map(|v| {\n anyhow::Ok(match v {\n setup::StateChange::Upsert(v) => {\n setup::StateChange::Upsert(serde_json::from_value(v)?)\n }\n setup::StateChange::Delete => setup::StateChange::Delete,\n })\n })\n .collect::>()?,\n legacy_state_key: existing_states.legacy_state_key,\n })\n}\n"], ["/cocoindex/src/setup/db_metadata.rs", "use crate::prelude::*;\n\nuse super::{ResourceSetupInfo, ResourceSetupStatus, SetupChangeType, StateChange};\nuse crate::utils::db::WriteAction;\nuse axum::http::StatusCode;\nuse sqlx::PgPool;\n\nconst SETUP_METADATA_TABLE_NAME: &str = \"cocoindex_setup_metadata\";\npub const FLOW_VERSION_RESOURCE_TYPE: &str = \"__FlowVersion\";\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SetupMetadataRecord {\n pub flow_name: String,\n // e.g. \"Flow\", \"SourceTracking\", \"Target:{TargetType}\"\n pub resource_type: String,\n pub key: serde_json::Value,\n pub state: Option,\n pub staging_changes: sqlx::types::Json>>,\n}\n\npub fn parse_flow_version(state: &Option) -> Option {\n match state {\n Some(serde_json::Value::Number(n)) => n.as_u64(),\n _ => None,\n }\n}\n\n/// Returns None if metadata table doesn't exist.\npub async fn read_setup_metadata(pool: &PgPool) -> Result>> {\n let mut db_conn = pool.acquire().await?;\n let query_str = format!(\n \"SELECT flow_name, resource_type, key, state, staging_changes FROM {SETUP_METADATA_TABLE_NAME}\",\n );\n let metadata = sqlx::query_as(&query_str).fetch_all(&mut *db_conn).await;\n let result = match metadata {\n Ok(metadata) => Some(metadata),\n Err(err) => {\n let exists: Option = sqlx::query_scalar(\n \"SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = $1)\",\n )\n .bind(SETUP_METADATA_TABLE_NAME)\n .fetch_one(&mut *db_conn)\n .await?;\n if !exists.unwrap_or(false) {\n None\n } else {\n return Err(err.into());\n }\n }\n };\n Ok(result)\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct ResourceTypeKey {\n pub resource_type: String,\n pub key: serde_json::Value,\n}\n\nimpl ResourceTypeKey {\n pub fn new(resource_type: String, key: serde_json::Value) -> Self {\n Self { resource_type, key }\n }\n}\n\nstatic VERSION_RESOURCE_TYPE_ID: LazyLock = LazyLock::new(|| ResourceTypeKey {\n resource_type: FLOW_VERSION_RESOURCE_TYPE.to_string(),\n key: serde_json::Value::Null,\n});\n\nasync fn read_metadata_records_for_flow(\n flow_name: &str,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT flow_name, resource_type, key, state, staging_changes FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1\",\n );\n let metadata: Vec = sqlx::query_as(&query_str)\n .bind(flow_name)\n .fetch_all(db_executor)\n .await?;\n let result = metadata\n .into_iter()\n .map(|m| {\n (\n ResourceTypeKey {\n resource_type: m.resource_type.clone(),\n key: m.key.clone(),\n },\n m,\n )\n })\n .collect();\n Ok(result)\n}\n\nasync fn read_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT state FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n );\n let state: Option = sqlx::query_scalar(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .fetch_optional(db_executor)\n .await?;\n Ok(state)\n}\n\nasync fn upsert_staging_changes(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n staging_changes: Vec>,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {SETUP_METADATA_TABLE_NAME} (flow_name, resource_type, key, staging_changes) VALUES ($1, $2, $3, $4)\",\n ),\n WriteAction::Update => format!(\n \"UPDATE {SETUP_METADATA_TABLE_NAME} SET staging_changes = $4 WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n ),\n };\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .bind(sqlx::types::Json(staging_changes))\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\nasync fn upsert_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n state: &serde_json::Value,\n action: WriteAction,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {SETUP_METADATA_TABLE_NAME} (flow_name, resource_type, key, state, staging_changes) VALUES ($1, $2, $3, $4, $5)\",\n ),\n WriteAction::Update => format!(\n \"UPDATE {SETUP_METADATA_TABLE_NAME} SET state = $4, staging_changes = $5 WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n ),\n };\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .bind(sqlx::types::Json(state))\n .bind(sqlx::types::Json(Vec::::new()))\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\nasync fn delete_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"DELETE FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n );\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\npub struct StateUpdateInfo {\n pub desired_state: Option,\n pub legacy_key: Option,\n}\n\nimpl StateUpdateInfo {\n pub fn new(\n desired_state: Option<&impl Serialize>,\n legacy_key: Option,\n ) -> Result {\n Ok(Self {\n desired_state: desired_state\n .as_ref()\n .map(serde_json::to_value)\n .transpose()?,\n legacy_key,\n })\n }\n}\n\npub async fn stage_changes_for_flow(\n flow_name: &str,\n seen_metadata_version: Option,\n resource_update_info: &HashMap,\n pool: &PgPool,\n) -> Result {\n let mut txn = pool.begin().await?;\n let mut existing_records = read_metadata_records_for_flow(flow_name, &mut *txn).await?;\n let latest_metadata_version = existing_records\n .get(&VERSION_RESOURCE_TYPE_ID)\n .and_then(|m| parse_flow_version(&m.state));\n if seen_metadata_version < latest_metadata_version {\n return Err(ApiError::new(\n \"seen newer version in the metadata table\",\n StatusCode::CONFLICT,\n ))?;\n }\n let new_metadata_version = seen_metadata_version.unwrap_or_default() + 1;\n upsert_state(\n flow_name,\n &VERSION_RESOURCE_TYPE_ID,\n &serde_json::Value::Number(new_metadata_version.into()),\n if latest_metadata_version.is_some() {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n &mut *txn,\n )\n .await?;\n\n for (type_id, update_info) in resource_update_info {\n let existing = existing_records.remove(type_id);\n let change = match &update_info.desired_state {\n Some(desired_state) => StateChange::Upsert(desired_state.clone()),\n None => StateChange::Delete,\n };\n let mut new_staging_changes = vec![];\n if let Some(legacy_key) = &update_info.legacy_key {\n if let Some(legacy_record) = existing_records.remove(legacy_key) {\n new_staging_changes.extend(legacy_record.staging_changes.0);\n delete_state(flow_name, legacy_key, &mut *txn).await?;\n }\n }\n let (action, existing_staging_changes) = match existing {\n Some(existing) => {\n let existing_staging_changes = existing.staging_changes.0;\n if existing_staging_changes.iter().all(|c| c != &change) {\n new_staging_changes.push(change);\n }\n (WriteAction::Update, existing_staging_changes)\n }\n None => {\n if update_info.desired_state.is_some() {\n new_staging_changes.push(change);\n }\n (WriteAction::Insert, vec![])\n }\n };\n if !new_staging_changes.is_empty() {\n upsert_staging_changes(\n flow_name,\n type_id,\n [existing_staging_changes, new_staging_changes].concat(),\n &mut *txn,\n action,\n )\n .await?;\n }\n }\n txn.commit().await?;\n Ok(new_metadata_version)\n}\n\npub async fn commit_changes_for_flow(\n flow_name: &str,\n curr_metadata_version: u64,\n state_updates: &HashMap,\n delete_version: bool,\n pool: &PgPool,\n) -> Result<()> {\n let mut txn = pool.begin().await?;\n let latest_metadata_version =\n parse_flow_version(&read_state(flow_name, &VERSION_RESOURCE_TYPE_ID, &mut *txn).await?);\n if latest_metadata_version != Some(curr_metadata_version) {\n return Err(ApiError::new(\n \"seen newer version in the metadata table\",\n StatusCode::CONFLICT,\n ))?;\n }\n for (type_id, update_info) in state_updates.iter() {\n match &update_info.desired_state {\n Some(desired_state) => {\n upsert_state(\n flow_name,\n type_id,\n desired_state,\n WriteAction::Update,\n &mut *txn,\n )\n .await?;\n }\n None => {\n delete_state(flow_name, type_id, &mut *txn).await?;\n }\n }\n }\n if delete_version {\n delete_state(flow_name, &VERSION_RESOURCE_TYPE_ID, &mut *txn).await?;\n }\n txn.commit().await?;\n Ok(())\n}\n\n#[derive(Debug)]\npub struct MetadataTableSetup {\n pub metadata_table_missing: bool,\n}\n\nimpl MetadataTableSetup {\n pub fn into_setup_info(self) -> ResourceSetupInfo<(), (), MetadataTableSetup> {\n ResourceSetupInfo {\n key: (),\n state: None,\n description: \"CocoIndex Metadata Table\".to_string(),\n setup_status: Some(self),\n legacy_key: None,\n }\n }\n}\n\nimpl ResourceSetupStatus for MetadataTableSetup {\n fn describe_changes(&self) -> Vec {\n if self.metadata_table_missing {\n vec![setup::ChangeDescription::Action(format!(\n \"Create the cocoindex metadata table {SETUP_METADATA_TABLE_NAME}\"\n ))]\n } else {\n vec![]\n }\n }\n\n fn change_type(&self) -> SetupChangeType {\n if self.metadata_table_missing {\n SetupChangeType::Create\n } else {\n SetupChangeType::NoChange\n }\n }\n}\n\nimpl MetadataTableSetup {\n pub async fn apply_change(&self) -> Result<()> {\n if !self.metadata_table_missing {\n return Ok(());\n }\n let lib_context = get_lib_context()?;\n let pool = lib_context.require_builtin_db_pool()?;\n let query_str = format!(\n \"CREATE TABLE IF NOT EXISTS {SETUP_METADATA_TABLE_NAME} (\n flow_name TEXT NOT NULL,\n resource_type TEXT NOT NULL,\n key JSONB NOT NULL,\n state JSONB,\n staging_changes JSONB NOT NULL,\n\n PRIMARY KEY (flow_name, resource_type, key)\n )\n \",\n );\n sqlx::query(&query_str).execute(pool).await?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/py_factory.rs", "use crate::prelude::*;\n\nuse pyo3::{\n IntoPyObjectExt, Py, PyAny, Python, pyclass, pymethods,\n types::{IntoPyDict, PyList, PyString, PyTuple},\n};\nuse pythonize::{depythonize, pythonize};\n\nuse crate::{\n base::{schema, value},\n builder::plan,\n ops::sdk::SetupStateCompatibility,\n py::{self, ToResultWithPyTrace},\n};\nuse anyhow::{Result, anyhow};\n\n#[pyclass(name = \"OpArgSchema\")]\npub struct PyOpArgSchema {\n value_type: crate::py::Pythonized,\n analyzed_value: crate::py::Pythonized,\n}\n\n#[pymethods]\nimpl PyOpArgSchema {\n #[getter]\n fn value_type(&self) -> &crate::py::Pythonized {\n &self.value_type\n }\n\n #[getter]\n fn analyzed_value(&self) -> &crate::py::Pythonized {\n &self.analyzed_value\n }\n}\n\nstruct PyFunctionExecutor {\n py_function_executor: Py,\n py_exec_ctx: Arc,\n\n num_positional_args: usize,\n kw_args_names: Vec>,\n result_type: schema::EnrichedValueType,\n\n enable_cache: bool,\n behavior_version: Option,\n}\n\nimpl PyFunctionExecutor {\n fn call_py_fn<'py>(\n &self,\n py: Python<'py>,\n input: Vec,\n ) -> Result> {\n let mut args = Vec::with_capacity(self.num_positional_args);\n for v in input[0..self.num_positional_args].iter() {\n args.push(py::value_to_py_object(py, v)?);\n }\n\n let kwargs = if self.kw_args_names.is_empty() {\n None\n } else {\n let mut kwargs = Vec::with_capacity(self.kw_args_names.len());\n for (name, v) in self\n .kw_args_names\n .iter()\n .zip(input[self.num_positional_args..].iter())\n {\n kwargs.push((name.bind(py), py::value_to_py_object(py, v)?));\n }\n Some(kwargs)\n };\n\n let result = self\n .py_function_executor\n .call(\n py,\n PyTuple::new(py, args.into_iter())?,\n kwargs\n .map(|kwargs| -> Result<_> { Ok(kwargs.into_py_dict(py)?) })\n .transpose()?\n .as_ref(),\n )\n .to_result_with_py_trace(py)?;\n Ok(result.into_bound(py))\n }\n}\n\n#[async_trait]\nimpl interface::SimpleFunctionExecutor for Arc {\n async fn evaluate(&self, input: Vec) -> Result {\n let self = self.clone();\n let result_fut = Python::with_gil(|py| -> Result<_> {\n let result_coro = self.call_py_fn(py, input)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(self.py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro,\n )?)\n })?;\n let result = result_fut.await;\n Python::with_gil(|py| -> Result<_> {\n let result = result.to_result_with_py_trace(py)?;\n Ok(py::value_from_py_object(\n &self.result_type.typ,\n &result.into_bound(py),\n )?)\n })\n }\n\n fn enable_cache(&self) -> bool {\n self.enable_cache\n }\n\n fn behavior_version(&self) -> Option {\n self.behavior_version\n }\n}\n\npub(crate) struct PyFunctionFactory {\n pub py_function_factory: Py,\n}\n\n#[async_trait]\nimpl interface::SimpleFunctionFactory for PyFunctionFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n schema::EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let (result_type, executor, kw_args_names, num_positional_args) =\n Python::with_gil(|py| -> anyhow::Result<_> {\n let mut args = vec![pythonize(py, &spec)?];\n let mut kwargs = vec![];\n let mut num_positional_args = 0;\n for arg in input_schema.into_iter() {\n let py_arg_schema = PyOpArgSchema {\n value_type: crate::py::Pythonized(arg.value_type.clone()),\n analyzed_value: crate::py::Pythonized(arg.analyzed_value.clone()),\n };\n match arg.name.0 {\n Some(name) => {\n kwargs.push((name.clone(), py_arg_schema));\n }\n None => {\n args.push(py_arg_schema.into_bound_py_any(py)?);\n num_positional_args += 1;\n }\n }\n }\n\n let kw_args_names = kwargs\n .iter()\n .map(|(name, _)| PyString::new(py, name).unbind())\n .collect::>();\n let result = self\n .py_function_factory\n .call(\n py,\n PyTuple::new(py, args.into_iter())?,\n Some(&kwargs.into_py_dict(py)?),\n )\n .to_result_with_py_trace(py)?;\n let (result_type, executor) = result\n .extract::<(crate::py::Pythonized, Py)>(py)?;\n Ok((\n result_type.into_inner(),\n executor,\n kw_args_names,\n num_positional_args,\n ))\n })?;\n\n let executor_fut = {\n let result_type = result_type.clone();\n async move {\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n let (prepare_fut, enable_cache, behavior_version) =\n Python::with_gil(|py| -> anyhow::Result<_> {\n let prepare_coro = executor\n .call_method(py, \"prepare\", (), None)\n .to_result_with_py_trace(py)?;\n let prepare_fut = pyo3_async_runtimes::into_future_with_locals(\n &pyo3_async_runtimes::TaskLocals::new(\n py_exec_ctx.event_loop.bind(py).clone(),\n ),\n prepare_coro.into_bound(py),\n )?;\n let enable_cache = executor\n .call_method(py, \"enable_cache\", (), None)\n .to_result_with_py_trace(py)?\n .extract::(py)?;\n let behavior_version = executor\n .call_method(py, \"behavior_version\", (), None)\n .to_result_with_py_trace(py)?\n .extract::>(py)?;\n Ok((prepare_fut, enable_cache, behavior_version))\n })?;\n prepare_fut.await?;\n Ok(Box::new(Arc::new(PyFunctionExecutor {\n py_function_executor: executor,\n py_exec_ctx,\n num_positional_args,\n kw_args_names,\n result_type,\n enable_cache,\n behavior_version,\n }))\n as Box)\n }\n };\n\n Ok((result_type, executor_fut.boxed()))\n }\n}\n\npub(crate) struct PyExportTargetFactory {\n pub py_target_connector: Py,\n}\n\nstruct PyTargetExecutorContext {\n py_export_ctx: Py,\n py_exec_ctx: Arc,\n}\n\n#[derive(Debug)]\nstruct PyTargetResourceSetupStatus {\n stale_existing_states: IndexSet>,\n desired_state: Option,\n}\n\nimpl setup::ResourceSetupStatus for PyTargetResourceSetupStatus {\n fn describe_changes(&self) -> Vec {\n vec![]\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n if self.stale_existing_states.is_empty() {\n setup::SetupChangeType::NoChange\n } else if self.desired_state.is_some() {\n if self\n .stale_existing_states\n .iter()\n .any(|state| state.is_none())\n {\n setup::SetupChangeType::Create\n } else {\n setup::SetupChangeType::Update\n }\n } else {\n setup::SetupChangeType::Delete\n }\n }\n}\n\n#[async_trait]\nimpl interface::ExportTargetFactory for PyExportTargetFactory {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )> {\n if declarations.len() != 0 {\n api_error!(\"Custom target connector doesn't support declarations yet\");\n }\n\n let mut build_outputs = Vec::with_capacity(data_collections.len());\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n for data_collection in data_collections.into_iter() {\n let (py_export_ctx, persistent_key) =\n Python::with_gil(|py| -> Result<(Py, serde_json::Value)> {\n // Deserialize the spec to Python object.\n let py_export_ctx = self\n .py_target_connector\n .call_method(\n py,\n \"create_export_context\",\n (\n &data_collection.name,\n pythonize(py, &data_collection.spec)?,\n pythonize(py, &data_collection.key_fields_schema)?,\n pythonize(py, &data_collection.value_fields_schema)?,\n ),\n None,\n )\n .to_result_with_py_trace(py)?;\n\n // Call the `get_persistent_key` method to get the persistent key.\n let persistent_key = self\n .py_target_connector\n .call_method(py, \"get_persistent_key\", (&py_export_ctx,), None)\n .to_result_with_py_trace(py)?;\n let persistent_key = depythonize(&persistent_key.into_bound(py))?;\n Ok((py_export_ctx, persistent_key))\n })?;\n\n let py_exec_ctx = py_exec_ctx.clone();\n let build_output = interface::ExportDataCollectionBuildOutput {\n export_context: Box::pin(async move {\n Ok(Arc::new(PyTargetExecutorContext {\n py_export_ctx,\n py_exec_ctx,\n }) as Arc)\n }),\n setup_key: persistent_key,\n desired_setup_state: data_collection.spec,\n };\n build_outputs.push(build_output);\n }\n Ok((build_outputs, vec![]))\n }\n\n async fn check_setup_status(\n &self,\n _key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n _context: Arc,\n ) -> Result> {\n // Collect all possible existing states that are not the desired state.\n let mut stale_existing_states = IndexSet::new();\n if !existing_states.always_exists() && desired_state.is_some() {\n stale_existing_states.insert(None);\n }\n for possible_state in existing_states.possible_versions() {\n if Some(possible_state) != desired_state.as_ref() {\n stale_existing_states.insert(Some(possible_state.clone()));\n }\n }\n\n Ok(Box::new(PyTargetResourceSetupStatus {\n stale_existing_states,\n desired_state,\n }))\n }\n\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result {\n Ok(key.clone())\n }\n\n fn check_state_compatibility(\n &self,\n _desired_state: &serde_json::Value,\n _existing_state: &serde_json::Value,\n ) -> Result {\n // The Python target connector doesn't support state update yet.\n Ok(SetupStateCompatibility::Compatible)\n }\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result {\n Python::with_gil(|py| -> Result {\n let result = self\n .py_target_connector\n .call_method(py, \"describe_resource\", (pythonize(py, key)?,), None)\n .to_result_with_py_trace(py)?;\n let description = result.extract::(py)?;\n Ok(description)\n })\n }\n\n fn extract_additional_key(\n &self,\n _key: &value::KeyValue,\n _value: &value::FieldValues,\n _export_context: &(dyn Any + Send + Sync),\n ) -> Result {\n Ok(serde_json::Value::Null)\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n // Filter the setup changes that are not NoChange, and flatten to\n // `list[tuple[key, list[stale_existing_states | None], desired_state | None]]` for Python.\n let mut setup_changes = Vec::new();\n for item in setup_status.into_iter() {\n let decoded_setup_status = (item.setup_status as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?;\n if ::change_type(decoded_setup_status)\n != setup::SetupChangeType::NoChange\n {\n setup_changes.push((\n item.key,\n &decoded_setup_status.stale_existing_states,\n &decoded_setup_status.desired_state,\n ));\n }\n }\n\n if setup_changes.is_empty() {\n return Ok(());\n }\n\n // Call the `apply_setup_changes_async()` method.\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n let py_result = Python::with_gil(move |py| -> Result<_> {\n let result_coro = self\n .py_target_connector\n .call_method(\n py,\n \"apply_setup_changes_async\",\n (pythonize(py, &setup_changes)?,),\n None,\n )\n .to_result_with_py_trace(py)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro.into_bound(py),\n )?)\n })?\n .await;\n Python::with_gil(move |py| py_result.to_result_with_py_trace(py))?;\n\n Ok(())\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec<\n interface::ExportTargetMutationWithContext<'async_trait, dyn Any + Send + Sync>,\n >,\n ) -> Result<()> {\n if mutations.is_empty() {\n return Ok(());\n }\n\n let py_result = Python::with_gil(|py| -> Result<_> {\n // Create a `list[tuple[export_ctx, list[tuple[key, value | None]]]]` for Python, and collect `py_exec_ctx`.\n let mut py_args = Vec::with_capacity(mutations.len());\n let mut py_exec_ctx: Option<&Arc> = None;\n for mutation in mutations.into_iter() {\n // Downcast export_context to PyTargetExecutorContext.\n let export_context = (mutation.export_context as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?;\n\n let mut flattened_mutations = Vec::with_capacity(\n mutation.mutation.upserts.len() + mutation.mutation.deletes.len(),\n );\n for upsert in mutation.mutation.upserts.into_iter() {\n flattened_mutations.push((\n py::value_to_py_object(py, &upsert.key.into())?,\n py::field_values_to_py_object(py, upsert.value.fields.iter())?,\n ));\n }\n for delete in mutation.mutation.deletes.into_iter() {\n flattened_mutations.push((\n py::value_to_py_object(py, &delete.key.into())?,\n py.None().into_bound(py),\n ));\n }\n py_args.push((\n &export_context.py_export_ctx,\n PyList::new(py, flattened_mutations)?.into_any(),\n ));\n py_exec_ctx = py_exec_ctx.or(Some(&export_context.py_exec_ctx));\n }\n let py_exec_ctx = py_exec_ctx.ok_or_else(invariance_violation)?;\n\n let result_coro = self\n .py_target_connector\n .call_method(py, \"mutate_async\", (py_args,), None)\n .to_result_with_py_trace(py)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro.into_bound(py),\n )?)\n })?\n .await;\n\n Python::with_gil(move |py| py_result.to_result_with_py_trace(py))?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/targets/shared/property_graph.rs", "use crate::prelude::*;\n\nuse crate::ops::sdk::{AuthEntryReference, FieldSchema};\n\n#[derive(Debug, Deserialize)]\npub struct TargetFieldMapping {\n pub source: spec::FieldName,\n\n /// Field name for the node in the Knowledge Graph.\n /// If unspecified, it's the same as `field_name`.\n #[serde(default)]\n pub target: Option,\n}\n\nimpl TargetFieldMapping {\n pub fn get_target(&self) -> &spec::FieldName {\n self.target.as_ref().unwrap_or(&self.source)\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct NodeFromFieldsSpec {\n pub label: String,\n pub fields: Vec,\n}\n\n#[derive(Debug, Deserialize)]\npub struct NodesSpec {\n pub label: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct RelationshipsSpec {\n pub rel_type: String,\n pub source: NodeFromFieldsSpec,\n pub target: NodeFromFieldsSpec,\n}\n\n#[derive(Debug, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum GraphElementMapping {\n Relationship(RelationshipsSpec),\n Node(NodesSpec),\n}\n\n#[derive(Debug, Deserialize)]\npub struct GraphDeclaration {\n pub nodes_label: String,\n\n #[serde(flatten)]\n pub index_options: spec::IndexOptions,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Clone)]\npub enum ElementType {\n Node(String),\n Relationship(String),\n}\n\nimpl ElementType {\n pub fn label(&self) -> &str {\n match self {\n ElementType::Node(label) => label,\n ElementType::Relationship(label) => label,\n }\n }\n\n pub fn from_mapping_spec(spec: &GraphElementMapping) -> Self {\n match spec {\n GraphElementMapping::Relationship(spec) => {\n ElementType::Relationship(spec.rel_type.clone())\n }\n GraphElementMapping::Node(spec) => ElementType::Node(spec.label.clone()),\n }\n }\n\n pub fn matcher(&self, var_name: &str) -> String {\n match self {\n ElementType::Relationship(label) => format!(\"()-[{var_name}:{label}]->()\"),\n ElementType::Node(label) => format!(\"({var_name}:{label})\"),\n }\n }\n}\n\nimpl std::fmt::Display for ElementType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ElementType::Node(label) => write!(f, \"Node(label:{label})\"),\n ElementType::Relationship(rel_type) => write!(f, \"Relationship(type:{rel_type})\"),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Derivative)]\n#[derivative(\n Clone(bound = \"\"),\n PartialEq(bound = \"\"),\n Eq(bound = \"\"),\n Hash(bound = \"\")\n)]\npub struct GraphElementType {\n #[serde(bound = \"\")]\n pub connection: AuthEntryReference,\n pub typ: ElementType,\n}\n\nimpl std::fmt::Display for GraphElementType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}/{}\", self.connection.key, self.typ)\n }\n}\n\npub struct GraphElementSchema {\n pub elem_type: ElementType,\n pub key_fields: Vec,\n pub value_fields: Vec,\n}\n\npub struct GraphElementInputFieldsIdx {\n pub key: Vec,\n pub value: Vec,\n}\n\nimpl GraphElementInputFieldsIdx {\n pub fn extract_key(&self, fields: &[value::Value]) -> Result {\n value::KeyValue::from_values(self.key.iter().map(|idx| &fields[*idx]))\n }\n}\n\npub struct AnalyzedGraphElementFieldMapping {\n pub schema: Arc,\n pub fields_input_idx: GraphElementInputFieldsIdx,\n}\n\nimpl AnalyzedGraphElementFieldMapping {\n pub fn has_value_fields(&self) -> bool {\n !self.fields_input_idx.value.is_empty()\n }\n}\n\npub struct AnalyzedRelationshipInfo {\n pub source: AnalyzedGraphElementFieldMapping,\n pub target: AnalyzedGraphElementFieldMapping,\n}\n\npub struct AnalyzedDataCollection {\n pub schema: Arc,\n pub value_fields_input_idx: Vec,\n\n pub rel: Option,\n}\n\nimpl AnalyzedDataCollection {\n pub fn dependent_node_labels(&self) -> IndexSet<&str> {\n let mut dependent_node_labels = IndexSet::new();\n if let Some(rel) = &self.rel {\n dependent_node_labels.insert(rel.source.schema.elem_type.label());\n dependent_node_labels.insert(rel.target.schema.elem_type.label());\n }\n dependent_node_labels\n }\n}\n\nstruct GraphElementSchemaBuilder {\n elem_type: ElementType,\n key_fields: Vec,\n value_fields: Vec,\n}\n\nimpl GraphElementSchemaBuilder {\n fn new(elem_type: ElementType) -> Self {\n Self {\n elem_type,\n key_fields: vec![],\n value_fields: vec![],\n }\n }\n\n fn merge_fields(\n elem_type: &ElementType,\n kind: &str,\n existing_fields: &mut Vec,\n fields: Vec<(usize, schema::FieldSchema)>,\n ) -> Result> {\n if fields.is_empty() {\n return Ok(vec![]);\n }\n let result: Vec = if existing_fields.is_empty() {\n let fields_idx: Vec = fields.iter().map(|(idx, _)| *idx).collect();\n existing_fields.extend(fields.into_iter().map(|(_, f)| f));\n fields_idx\n } else {\n if existing_fields.len() != fields.len() {\n bail!(\n \"{elem_type} {kind} fields number mismatch: {} vs {}\",\n existing_fields.len(),\n fields.len()\n );\n }\n let mut fields_map: HashMap<_, _> = fields\n .into_iter()\n .map(|(idx, schema)| (schema.name, (idx, schema.value_type)))\n .collect();\n // Follow the order of existing fields\n existing_fields\n .iter()\n .map(|existing_field| {\n let (idx, typ) = fields_map.remove(&existing_field.name).ok_or_else(|| {\n anyhow!(\n \"{elem_type} {kind} field `{}` not found in some collector\",\n existing_field.name\n )\n })?;\n if typ != existing_field.value_type {\n bail!(\n \"{elem_type} {kind} field `{}` type mismatch: {} vs {}\",\n existing_field.name,\n typ,\n existing_field.value_type\n )\n }\n Ok(idx)\n })\n .collect::>>()?\n };\n Ok(result)\n }\n\n fn merge(\n &mut self,\n key_fields: Vec<(usize, schema::FieldSchema)>,\n value_fields: Vec<(usize, schema::FieldSchema)>,\n ) -> Result {\n let key_fields_idx =\n Self::merge_fields(&self.elem_type, \"key\", &mut self.key_fields, key_fields)?;\n let value_fields_idx = Self::merge_fields(\n &self.elem_type,\n \"value\",\n &mut self.value_fields,\n value_fields,\n )?;\n Ok(GraphElementInputFieldsIdx {\n key: key_fields_idx,\n value: value_fields_idx,\n })\n }\n\n fn build_schema(self) -> Result {\n if self.key_fields.is_empty() {\n bail!(\n \"No key fields specified for Node label `{}`\",\n self.elem_type\n );\n }\n Ok(GraphElementSchema {\n elem_type: self.elem_type,\n key_fields: self.key_fields,\n value_fields: self.value_fields,\n })\n }\n}\nstruct DependentNodeLabelAnalyzer<'a, AuthEntry> {\n graph_elem_type: GraphElementType,\n fields: IndexMap,\n remaining_fields: HashMap<&'a str, &'a TargetFieldMapping>,\n primary_key_fields: &'a [String],\n}\n\nimpl<'a, AuthEntry> DependentNodeLabelAnalyzer<'a, AuthEntry> {\n fn new(\n conn: &'a spec::AuthEntryReference,\n rel_end_spec: &'a NodeFromFieldsSpec,\n primary_key_fields_map: &'a HashMap<&'a GraphElementType, &'a [String]>,\n ) -> Result {\n let graph_elem_type = GraphElementType {\n connection: conn.clone(),\n typ: ElementType::Node(rel_end_spec.label.clone()),\n };\n let primary_key_fields = primary_key_fields_map\n .get(&graph_elem_type)\n .ok_or_else(invariance_violation)?;\n Ok(Self {\n graph_elem_type,\n fields: IndexMap::new(),\n remaining_fields: rel_end_spec\n .fields\n .iter()\n .map(|f| (f.source.as_str(), f))\n .collect(),\n primary_key_fields,\n })\n }\n\n fn process_field(&mut self, field_idx: usize, field_schema: &schema::FieldSchema) -> bool {\n let field_mapping = match self.remaining_fields.remove(field_schema.name.as_str()) {\n Some(field_mapping) => field_mapping,\n None => return false,\n };\n self.fields.insert(\n field_mapping.get_target().clone(),\n (field_idx, field_schema.value_type.clone()),\n );\n true\n }\n\n fn build(\n self,\n schema_builders: &mut HashMap, GraphElementSchemaBuilder>,\n ) -> Result<(GraphElementType, GraphElementInputFieldsIdx)> {\n if !self.remaining_fields.is_empty() {\n anyhow::bail!(\n \"Fields not mapped for {}: {}\",\n self.graph_elem_type,\n self.remaining_fields.keys().join(\", \")\n );\n }\n\n let (mut key_fields, value_fields): (Vec<_>, Vec<_>) = self\n .fields\n .into_iter()\n .map(|(field_name, (idx, typ))| (idx, FieldSchema::new(field_name, typ)))\n .partition(|(_, f)| self.primary_key_fields.contains(&f.name));\n if key_fields.len() != self.primary_key_fields.len() {\n bail!(\n \"Primary key fields number mismatch: {} vs {}\",\n key_fields.iter().map(|(_, f)| &f.name).join(\", \"),\n self.primary_key_fields.iter().join(\", \")\n );\n }\n key_fields.sort_by_key(|(_, f)| {\n self.primary_key_fields\n .iter()\n .position(|k| k == &f.name)\n .unwrap()\n });\n\n let fields_idx = schema_builders\n .entry(self.graph_elem_type.clone())\n .or_insert_with(|| GraphElementSchemaBuilder::new(self.graph_elem_type.typ.clone()))\n .merge(key_fields, value_fields)?;\n Ok((self.graph_elem_type, fields_idx))\n }\n}\n\npub struct DataCollectionGraphMappingInput<'a, AuthEntry> {\n pub auth_ref: &'a spec::AuthEntryReference,\n pub mapping: &'a GraphElementMapping,\n pub index_options: &'a spec::IndexOptions,\n\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n}\n\npub fn analyze_graph_mappings<'a, AuthEntry: 'a>(\n data_coll_inputs: impl Iterator>,\n declarations: impl Iterator<\n Item = (\n &'a spec::AuthEntryReference,\n &'a GraphDeclaration,\n ),\n >,\n) -> Result<(Vec, Vec>)> {\n let data_coll_inputs: Vec<_> = data_coll_inputs.collect();\n let decls: Vec<_> = declarations.collect();\n\n // 1a. Prepare graph element types\n let graph_elem_types = data_coll_inputs\n .iter()\n .map(|d| GraphElementType {\n connection: d.auth_ref.clone(),\n typ: ElementType::from_mapping_spec(d.mapping),\n })\n .collect::>();\n let decl_graph_elem_types = decls\n .iter()\n .map(|(auth_ref, decl)| GraphElementType {\n connection: (*auth_ref).clone(),\n typ: ElementType::Node(decl.nodes_label.clone()),\n })\n .collect::>();\n\n // 1b. Prepare primary key fields map\n let primary_key_fields_map: HashMap<&GraphElementType, &[spec::FieldName]> =\n std::iter::zip(data_coll_inputs.iter(), graph_elem_types.iter())\n .map(|(data_coll_input, graph_elem_type)| {\n (\n graph_elem_type,\n data_coll_input.index_options.primary_key_fields(),\n )\n })\n .chain(\n std::iter::zip(decl_graph_elem_types.iter(), decls.iter()).map(\n |(graph_elem_type, (_, decl))| {\n (graph_elem_type, decl.index_options.primary_key_fields())\n },\n ),\n )\n .map(|(graph_elem_type, primary_key_fields)| {\n Ok((\n graph_elem_type,\n primary_key_fields.with_context(|| {\n format!(\"Primary key fields are not set for {graph_elem_type}\")\n })?,\n ))\n })\n .collect::>()?;\n\n // 2. Analyze data collection graph mappings and build target schema\n let mut node_schema_builders =\n HashMap::, GraphElementSchemaBuilder>::new();\n struct RelationshipProcessedInfo {\n rel_schema: GraphElementSchema,\n source_typ: GraphElementType,\n source_fields_idx: GraphElementInputFieldsIdx,\n target_typ: GraphElementType,\n target_fields_idx: GraphElementInputFieldsIdx,\n }\n struct DataCollectionProcessedInfo {\n value_input_fields_idx: Vec,\n rel_specific: Option>,\n }\n let data_collection_processed_info = std::iter::zip(data_coll_inputs, graph_elem_types.iter())\n .map(|(data_coll_input, graph_elem_type)| -> Result<_> {\n let processed_info = match data_coll_input.mapping {\n GraphElementMapping::Node(_) => {\n let input_fields_idx = node_schema_builders\n .entry(graph_elem_type.clone())\n .or_insert_with_key(|graph_elem| {\n GraphElementSchemaBuilder::new(graph_elem.typ.clone())\n })\n .merge(\n data_coll_input\n .key_fields_schema\n .into_iter()\n .enumerate()\n .collect(),\n data_coll_input\n .value_fields_schema\n .into_iter()\n .enumerate()\n .collect(),\n )?;\n\n if !(0..input_fields_idx.key.len()).eq(input_fields_idx.key.into_iter()) {\n return Err(invariance_violation());\n }\n DataCollectionProcessedInfo {\n value_input_fields_idx: input_fields_idx.value,\n rel_specific: None,\n }\n }\n GraphElementMapping::Relationship(rel_spec) => {\n let mut src_analyzer = DependentNodeLabelAnalyzer::new(\n data_coll_input.auth_ref,\n &rel_spec.source,\n &primary_key_fields_map,\n )?;\n let mut tgt_analyzer = DependentNodeLabelAnalyzer::new(\n data_coll_input.auth_ref,\n &rel_spec.target,\n &primary_key_fields_map,\n )?;\n\n let mut value_fields_schema = vec![];\n let mut value_input_fields_idx = vec![];\n for (field_idx, field_schema) in\n data_coll_input.value_fields_schema.into_iter().enumerate()\n {\n if !src_analyzer.process_field(field_idx, &field_schema)\n && !tgt_analyzer.process_field(field_idx, &field_schema)\n {\n value_fields_schema.push(field_schema.clone());\n value_input_fields_idx.push(field_idx);\n }\n }\n\n let rel_schema = GraphElementSchema {\n elem_type: graph_elem_type.typ.clone(),\n key_fields: data_coll_input.key_fields_schema,\n value_fields: value_fields_schema,\n };\n let (source_typ, source_fields_idx) =\n src_analyzer.build(&mut node_schema_builders)?;\n let (target_typ, target_fields_idx) =\n tgt_analyzer.build(&mut node_schema_builders)?;\n DataCollectionProcessedInfo {\n value_input_fields_idx,\n rel_specific: Some(RelationshipProcessedInfo {\n rel_schema,\n source_typ,\n source_fields_idx,\n target_typ,\n target_fields_idx,\n }),\n }\n }\n };\n Ok(processed_info)\n })\n .collect::>>()?;\n\n let node_schemas: HashMap, Arc> =\n node_schema_builders\n .into_iter()\n .map(|(graph_elem_type, schema_builder)| {\n Ok((graph_elem_type, Arc::new(schema_builder.build_schema()?)))\n })\n .collect::>()?;\n\n // 3. Build output\n let analyzed_data_colls: Vec =\n std::iter::zip(data_collection_processed_info, graph_elem_types.iter())\n .map(|(processed_info, graph_elem_type)| {\n let result = match processed_info.rel_specific {\n // Node\n None => AnalyzedDataCollection {\n schema: node_schemas\n .get(graph_elem_type)\n .ok_or_else(invariance_violation)?\n .clone(),\n value_fields_input_idx: processed_info.value_input_fields_idx,\n rel: None,\n },\n // Relationship\n Some(rel_info) => AnalyzedDataCollection {\n schema: Arc::new(rel_info.rel_schema),\n value_fields_input_idx: processed_info.value_input_fields_idx,\n rel: Some(AnalyzedRelationshipInfo {\n source: AnalyzedGraphElementFieldMapping {\n schema: node_schemas\n .get(&rel_info.source_typ)\n .ok_or_else(invariance_violation)?\n .clone(),\n fields_input_idx: rel_info.source_fields_idx,\n },\n target: AnalyzedGraphElementFieldMapping {\n schema: node_schemas\n .get(&rel_info.target_typ)\n .ok_or_else(invariance_violation)?\n .clone(),\n fields_input_idx: rel_info.target_fields_idx,\n },\n }),\n },\n };\n Ok(result)\n })\n .collect::>()?;\n let decl_schemas: Vec> = decl_graph_elem_types\n .iter()\n .map(|graph_elem_type| {\n Ok(node_schemas\n .get(graph_elem_type)\n .ok_or_else(invariance_violation)?\n .clone())\n })\n .collect::>()?;\n Ok((analyzed_data_colls, decl_schemas))\n}\n"], ["/cocoindex/src/ops/interface.rs", "use std::time::SystemTime;\n\nuse crate::base::{schema::*, spec::IndexOptions, value::*};\nuse crate::prelude::*;\nuse crate::setup;\nuse chrono::TimeZone;\nuse serde::Serialize;\n\npub struct FlowInstanceContext {\n pub flow_instance_name: String,\n pub auth_registry: Arc,\n pub py_exec_ctx: Option>,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Default)]\npub struct Ordinal(pub Option);\n\nimpl Ordinal {\n pub fn unavailable() -> Self {\n Self(None)\n }\n\n pub fn is_available(&self) -> bool {\n self.0.is_some()\n }\n}\n\nimpl From for Option {\n fn from(val: Ordinal) -> Self {\n val.0\n }\n}\n\nimpl TryFrom for Ordinal {\n type Error = anyhow::Error;\n\n fn try_from(time: SystemTime) -> Result {\n let duration = time.duration_since(std::time::UNIX_EPOCH)?;\n Ok(Ordinal(Some(duration.as_micros().try_into()?)))\n }\n}\n\nimpl TryFrom> for Ordinal {\n type Error = anyhow::Error;\n\n fn try_from(time: chrono::DateTime) -> Result {\n Ok(Ordinal(Some(time.timestamp_micros())))\n }\n}\n\npub struct PartialSourceRowMetadata {\n pub key: KeyValue,\n pub ordinal: Option,\n}\n\n#[derive(Debug)]\npub enum SourceValue {\n Existence(FieldValues),\n NonExistence,\n}\n\nimpl SourceValue {\n pub fn is_existent(&self) -> bool {\n matches!(self, Self::Existence(_))\n }\n\n pub fn as_optional(&self) -> Option<&FieldValues> {\n match self {\n Self::Existence(value) => Some(value),\n Self::NonExistence => None,\n }\n }\n\n pub fn into_optional(self) -> Option {\n match self {\n Self::Existence(value) => Some(value),\n Self::NonExistence => None,\n }\n }\n}\n\npub struct SourceData {\n pub value: SourceValue,\n pub ordinal: Ordinal,\n}\n\npub struct SourceChange {\n pub key: KeyValue,\n\n /// If None, the engine will poll to get the latest existence state and value.\n pub data: Option,\n}\n\npub struct SourceChangeMessage {\n pub changes: Vec,\n pub ack_fn: Option BoxFuture<'static, Result<()>> + Send + Sync>>,\n}\n\n#[derive(Debug, Default)]\npub struct SourceExecutorListOptions {\n pub include_ordinal: bool,\n}\n\n#[derive(Debug, Default)]\npub struct SourceExecutorGetOptions {\n pub include_ordinal: bool,\n pub include_value: bool,\n}\n\n#[derive(Debug)]\npub struct PartialSourceRowData {\n pub value: Option,\n pub ordinal: Option,\n}\n\nimpl TryFrom for SourceData {\n type Error = anyhow::Error;\n\n fn try_from(data: PartialSourceRowData) -> Result {\n Ok(Self {\n value: data\n .value\n .ok_or_else(|| anyhow::anyhow!(\"value is missing\"))?,\n ordinal: data\n .ordinal\n .ok_or_else(|| anyhow::anyhow!(\"ordinal is missing\"))?,\n })\n }\n}\n#[async_trait]\npub trait SourceExecutor: Send + Sync {\n /// Get the list of keys for the source.\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>>;\n\n // Get the value for the given key.\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result;\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n Ok(None)\n }\n}\n\n#[async_trait]\npub trait SourceFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )>;\n}\n\n#[async_trait]\npub trait SimpleFunctionExecutor: Send + Sync {\n /// Evaluate the operation.\n async fn evaluate(&self, args: Vec) -> Result;\n\n fn enable_cache(&self) -> bool {\n false\n }\n\n /// Must be Some if `enable_cache` is true.\n /// If it changes, the cache will be invalidated.\n fn behavior_version(&self) -> Option {\n None\n }\n}\n\n#[async_trait]\npub trait SimpleFunctionFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )>;\n}\n\n#[derive(Debug)]\npub struct ExportTargetUpsertEntry {\n pub key: KeyValue,\n pub additional_key: serde_json::Value,\n pub value: FieldValues,\n}\n\n#[derive(Debug)]\npub struct ExportTargetDeleteEntry {\n pub key: KeyValue,\n pub additional_key: serde_json::Value,\n}\n\n#[derive(Debug, Default)]\npub struct ExportTargetMutation {\n pub upserts: Vec,\n pub deletes: Vec,\n}\n\nimpl ExportTargetMutation {\n pub fn is_empty(&self) -> bool {\n self.upserts.is_empty() && self.deletes.is_empty()\n }\n}\n\n#[derive(Debug)]\npub struct ExportTargetMutationWithContext<'ctx, T: ?Sized + Send + Sync> {\n pub mutation: ExportTargetMutation,\n pub export_context: &'ctx T,\n}\n\npub struct ResourceSetupChangeItem<'a> {\n pub key: &'a serde_json::Value,\n pub setup_status: &'a dyn setup::ResourceSetupStatus,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum SetupStateCompatibility {\n /// The resource is fully compatible with the desired state.\n /// This means the resource can be updated to the desired state without any loss of data.\n Compatible,\n /// The resource is partially compatible with the desired state.\n /// This means data from some existing fields will be lost after applying the setup change.\n /// But at least their key fields of all rows are still preserved.\n PartialCompatible,\n /// The resource needs to be rebuilt. After applying the setup change, all data will be gone.\n NotCompatible,\n}\n\npub struct ExportDataCollectionBuildOutput {\n pub export_context: BoxFuture<'static, Result>>,\n pub setup_key: serde_json::Value,\n pub desired_setup_state: serde_json::Value,\n}\n\npub struct ExportDataCollectionSpec {\n pub name: String,\n pub spec: serde_json::Value,\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n pub index_options: IndexOptions,\n}\n\n#[async_trait]\npub trait ExportTargetFactory: Send + Sync {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )>;\n\n /// Will not be called if it's setup by user.\n /// It returns an error if the target only supports setup by user.\n async fn check_setup_status(\n &self,\n key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n context: Arc,\n ) -> Result>;\n\n /// Normalize the key. e.g. the JSON format may change (after code change, e.g. new optional field or field ordering), even if the underlying value is not changed.\n /// This should always return the canonical serialized form.\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result;\n\n fn check_state_compatibility(\n &self,\n desired_state: &serde_json::Value,\n existing_state: &serde_json::Value,\n ) -> Result;\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result;\n\n fn extract_additional_key(\n &self,\n key: &KeyValue,\n value: &FieldValues,\n export_context: &(dyn Any + Send + Sync),\n ) -> Result;\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()>;\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()>;\n}\n\n#[derive(Clone)]\npub enum ExecutorFactory {\n Source(Arc),\n SimpleFunction(Arc),\n ExportTarget(Arc),\n}\n"], ["/cocoindex/src/execution/indexing_status.rs", "use crate::prelude::*;\n\nuse super::db_tracking;\nuse super::evaluator;\nuse futures::try_join;\n\n#[derive(Debug, Serialize)]\npub struct SourceRowLastProcessedInfo {\n pub source_ordinal: interface::Ordinal,\n pub processing_time: Option>,\n pub is_logic_current: bool,\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceRowInfo {\n pub ordinal: interface::Ordinal,\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceRowIndexingStatus {\n pub last_processed: Option,\n pub current: Option,\n}\n\npub async fn get_source_row_indexing_status(\n src_eval_ctx: &evaluator::SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n pool: &sqlx::PgPool,\n) -> Result {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let last_processed_fut = db_tracking::read_source_last_processed_info(\n setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n );\n let current_fut = src_eval_ctx.import_op.executor.get_value(\n src_eval_ctx.key,\n &interface::SourceExecutorGetOptions {\n include_value: false,\n include_ordinal: true,\n },\n );\n let (last_processed, current) = try_join!(last_processed_fut, current_fut)?;\n\n let last_processed = last_processed.map(|l| SourceRowLastProcessedInfo {\n source_ordinal: interface::Ordinal(l.processed_source_ordinal),\n processing_time: l\n .process_time_micros\n .and_then(chrono::DateTime::::from_timestamp_micros),\n is_logic_current: Some(src_eval_ctx.plan.logic_fingerprint.0.as_slice())\n == l.process_logic_fingerprint.as_deref(),\n });\n let current = SourceRowInfo {\n ordinal: current\n .ordinal\n .ok_or(anyhow::anyhow!(\"Ordinal is unavailable for the source\"))?,\n };\n Ok(SourceRowIndexingStatus {\n last_processed,\n current: Some(current),\n })\n}\n"], ["/cocoindex/src/ops/sources/amazon_s3.rs", "use crate::fields_value;\nuse async_stream::try_stream;\nuse aws_config::BehaviorVersion;\nuse aws_sdk_s3::Client;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse std::sync::Arc;\nuse urlencoding;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\n/// Decode a form-encoded URL string, treating '+' as spaces\nfn decode_form_encoded_url(input: &str) -> Result> {\n // Replace '+' with spaces (form encoding convention), then decode\n // This handles both cases correctly:\n // - Literal '+' would be encoded as '%2B' and remain unchanged after replacement\n // - Space would be encoded as '+' and become ' ' after replacement\n let with_spaces = input.replace(\"+\", \" \");\n Ok(urlencoding::decode(&with_spaces)?.into())\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n bucket_name: String,\n prefix: Option,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n sqs_queue_url: Option,\n}\n\nstruct SqsContext {\n client: aws_sdk_sqs::Client,\n queue_url: String,\n}\n\nimpl SqsContext {\n async fn delete_message(&self, receipt_handle: String) -> Result<()> {\n self.client\n .delete_message()\n .queue_url(&self.queue_url)\n .receipt_handle(receipt_handle)\n .send()\n .await?;\n Ok(())\n }\n}\n\nstruct Executor {\n client: Client,\n bucket_name: String,\n prefix: Option,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n sqs_context: Option>,\n}\n\nimpl Executor {\n fn is_excluded(&self, key: &str) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(key))\n }\n\n fn is_file_included(&self, key: &str) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(key))\n && !self.is_excluded(key)\n }\n}\n\nfn datetime_to_ordinal(dt: &aws_sdk_s3::primitives::DateTime) -> Ordinal {\n Ordinal(Some((dt.as_nanos() / 1000) as i64))\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n _options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n try_stream! {\n let mut continuation_token = None;\n loop {\n let mut req = self.client\n .list_objects_v2()\n .bucket(&self.bucket_name);\n if let Some(ref p) = self.prefix {\n req = req.prefix(p);\n }\n if let Some(ref token) = continuation_token {\n req = req.continuation_token(token);\n }\n let resp = req.send().await?;\n if let Some(contents) = &resp.contents {\n let mut batch = Vec::new();\n for obj in contents {\n if let Some(key) = obj.key() {\n // Only include files (not folders)\n if key.ends_with('/') { continue; }\n let include = self.included_glob_set\n .as_ref()\n .map(|gs| gs.is_match(key))\n .unwrap_or(true);\n let exclude = self.excluded_glob_set\n .as_ref()\n .map(|gs| gs.is_match(key))\n .unwrap_or(false);\n if include && !exclude {\n batch.push(PartialSourceRowMetadata {\n key: KeyValue::Str(key.to_string().into()),\n ordinal: obj.last_modified().map(datetime_to_ordinal),\n });\n }\n }\n }\n if !batch.is_empty() {\n yield batch;\n }\n }\n if resp.is_truncated == Some(true) {\n continuation_token = resp.next_continuation_token.clone().map(|s| s.to_string());\n } else {\n break;\n }\n }\n }.boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let key_str = key.str_value()?;\n if !self.is_file_included(key_str) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n let resp = self\n .client\n .get_object()\n .bucket(&self.bucket_name)\n .key(key_str.as_ref())\n .send()\n .await;\n let obj = match resp {\n Err(e) if e.as_service_error().is_some_and(|e| e.is_no_such_key()) => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n r => r?,\n };\n let ordinal = if options.include_ordinal {\n obj.last_modified().map(datetime_to_ordinal)\n } else {\n None\n };\n let value = if options.include_value {\n let bytes = obj.body.collect().await?.into_bytes();\n Some(SourceValue::Existence(if self.binary {\n fields_value!(bytes.to_vec())\n } else {\n fields_value!(String::from_utf8_lossy(&bytes).to_string())\n }))\n } else {\n None\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n let sqs_context = if let Some(sqs_context) = &self.sqs_context {\n sqs_context\n } else {\n return Ok(None);\n };\n let stream = stream! {\n loop {\n match self.poll_sqs(sqs_context).await {\n Ok(messages) => {\n for message in messages {\n yield Ok(message);\n }\n }\n Err(e) => {\n yield Err(e);\n }\n };\n }\n };\n Ok(Some(stream.boxed()))\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3EventNotification {\n #[serde(default, rename = \"Records\")]\n pub records: Vec,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3EventRecord {\n #[serde(rename = \"eventName\")]\n pub event_name: String,\n pub s3: Option,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Entity {\n pub bucket: S3Bucket,\n pub object: S3Object,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Bucket {\n pub name: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Object {\n pub key: String,\n}\n\nimpl Executor {\n async fn poll_sqs(&self, sqs_context: &Arc) -> Result> {\n let resp = sqs_context\n .client\n .receive_message()\n .queue_url(&sqs_context.queue_url)\n .max_number_of_messages(10)\n .wait_time_seconds(20)\n .send()\n .await?;\n let messages = if let Some(messages) = resp.messages {\n messages\n } else {\n return Ok(Vec::new());\n };\n let mut change_messages = vec![];\n for message in messages.into_iter() {\n if let Some(body) = message.body {\n let notification: S3EventNotification = serde_json::from_str(&body)?;\n let mut changes = vec![];\n for record in notification.records {\n let s3 = if let Some(s3) = record.s3 {\n s3\n } else {\n continue;\n };\n if s3.bucket.name != self.bucket_name {\n continue;\n }\n if !self\n .prefix\n .as_ref()\n .is_none_or(|prefix| s3.object.key.starts_with(prefix))\n {\n continue;\n }\n if record.event_name.starts_with(\"ObjectCreated:\")\n || record.event_name.starts_with(\"ObjectRemoved:\")\n {\n let decoded_key = decode_form_encoded_url(&s3.object.key)?;\n changes.push(SourceChange {\n key: KeyValue::Str(decoded_key),\n data: None,\n });\n }\n }\n if let Some(receipt_handle) = message.receipt_handle {\n if !changes.is_empty() {\n let sqs_context = sqs_context.clone();\n change_messages.push(SourceChangeMessage {\n changes,\n ack_fn: Some(Box::new(move || {\n async move { sqs_context.delete_message(receipt_handle).await }\n .boxed()\n })),\n });\n } else {\n sqs_context.delete_message(receipt_handle).await?;\n }\n }\n }\n }\n Ok(change_messages)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"AmazonS3\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n let config = aws_config::load_defaults(BehaviorVersion::latest()).await;\n Ok(Box::new(Executor {\n client: Client::new(&config),\n bucket_name: spec.bucket_name,\n prefix: spec.prefix,\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n sqs_context: spec.sqs_queue_url.map(|url| {\n Arc::new(SqsContext {\n client: aws_sdk_sqs::Client::new(&config),\n queue_url: url,\n })\n }),\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/builder/analyzed_flow.rs", "use crate::{ops::interface::FlowInstanceContext, prelude::*};\n\nuse super::{analyzer, plan};\nuse crate::service::error::{SharedError, SharedResultExt, shared_ok};\n\npub struct AnalyzedFlow {\n pub flow_instance: spec::FlowInstanceSpec,\n pub data_schema: schema::FlowSchema,\n pub setup_state: exec_ctx::AnalyzedSetupState,\n\n pub flow_instance_ctx: Arc,\n\n /// It's None if the flow is not up to date\n pub execution_plan: Shared, SharedError>>>,\n}\n\nimpl AnalyzedFlow {\n pub async fn from_flow_instance(\n flow_instance: crate::base::spec::FlowInstanceSpec,\n flow_instance_ctx: Arc,\n ) -> Result {\n let (data_schema, setup_state, execution_plan_fut) =\n analyzer::analyze_flow(&flow_instance, flow_instance_ctx.clone()).await?;\n let execution_plan = async move {\n shared_ok(Arc::new(\n execution_plan_fut.await.map_err(SharedError::new)?,\n ))\n }\n .boxed()\n .shared();\n let result = Self {\n flow_instance,\n data_schema,\n setup_state,\n flow_instance_ctx,\n execution_plan,\n };\n Ok(result)\n }\n\n pub async fn get_execution_plan(&self) -> Result> {\n let execution_plan = self.execution_plan.clone().await.std_result()?;\n Ok(execution_plan)\n }\n}\n\npub struct AnalyzedTransientFlow {\n pub transient_flow_instance: spec::TransientFlowSpec,\n pub data_schema: schema::FlowSchema,\n pub execution_plan: plan::TransientExecutionPlan,\n pub output_type: schema::EnrichedValueType,\n}\n\nimpl AnalyzedTransientFlow {\n pub async fn from_transient_flow(\n transient_flow: spec::TransientFlowSpec,\n py_exec_ctx: Option,\n ) -> Result {\n let ctx = analyzer::build_flow_instance_context(&transient_flow.name, py_exec_ctx);\n let (output_type, data_schema, execution_plan_fut) =\n analyzer::analyze_transient_flow(&transient_flow, ctx).await?;\n Ok(Self {\n transient_flow_instance: transient_flow,\n data_schema,\n execution_plan: execution_plan_fut.await?,\n output_type,\n })\n }\n}\n"], ["/cocoindex/src/base/spec.rs", "use crate::prelude::*;\n\nuse super::schema::{EnrichedValueType, FieldSchema};\nuse serde::{Deserialize, Serialize};\nuse std::fmt;\nuse std::ops::Deref;\n\n/// OutputMode enum for displaying spec info in different granularity\n#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"lowercase\")]\npub enum OutputMode {\n Concise,\n Verbose,\n}\n\n/// Formatting spec per output mode\npub trait SpecFormatter {\n fn format(&self, mode: OutputMode) -> String;\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum SpecString {\n /// The value comes from the environment variable.\n Env(String),\n /// The value is defined by the literal string.\n #[serde(untagged)]\n Literal(String),\n}\n\npub type ScopeName = String;\n\n/// Used to identify a data field within a flow.\n/// Within a flow, in each specific scope, each field name must be unique.\n/// - A field is defined by `outputs` of an operation. There must be exactly one definition for each field.\n/// - A field can be used as an input for multiple operations.\npub type FieldName = String;\n\npub const ROOT_SCOPE_NAME: &str = \"_root\";\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Default)]\npub struct FieldPath(pub Vec);\n\nimpl Deref for FieldPath {\n type Target = Vec;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl fmt::Display for FieldPath {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if self.is_empty() {\n write!(f, \"*\")\n } else {\n write!(f, \"{}\", self.join(\".\"))\n }\n }\n}\n\n/// Used to identify an input or output argument for an operator.\n/// Useful to identify different inputs/outputs of the same operation. Usually omitted for operations with the same purpose of input/output.\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]\npub struct OpArgName(pub Option);\n\nimpl fmt::Display for OpArgName {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if let Some(arg_name) = &self.0 {\n write!(f, \"${arg_name}\")\n } else {\n write!(f, \"?\")\n }\n }\n}\n\nimpl OpArgName {\n pub fn is_unnamed(&self) -> bool {\n self.0.is_none()\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct NamedSpec {\n pub name: String,\n\n #[serde(flatten)]\n pub spec: T,\n}\n\nimpl fmt::Display for NamedSpec {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}: {}\", self.name, self.spec)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FieldMapping {\n /// If unspecified, means the current scope.\n /// \"_root\" refers to the top-level scope.\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub scope: Option,\n\n pub field_path: FieldPath,\n}\n\nimpl fmt::Display for FieldMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let scope = self.scope.as_deref().unwrap_or(\"\");\n write!(\n f,\n \"{}{}\",\n if scope.is_empty() {\n \"\".to_string()\n } else {\n format!(\"{scope}.\")\n },\n self.field_path\n )\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ConstantMapping {\n pub schema: EnrichedValueType,\n pub value: serde_json::Value,\n}\n\nimpl fmt::Display for ConstantMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let value = serde_json::to_string(&self.value).unwrap_or(\"#serde_error\".to_string());\n write!(f, \"{value}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CollectionMapping {\n pub field: FieldMapping,\n pub scope_name: ScopeName,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StructMapping {\n pub fields: Vec>,\n}\n\nimpl fmt::Display for StructMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let fields = self\n .fields\n .iter()\n .map(|field| field.name.clone())\n .collect::>()\n .join(\",\");\n write!(f, \"{fields}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum ValueMapping {\n Constant(ConstantMapping),\n Field(FieldMapping),\n Struct(StructMapping),\n // TODO: Add support for collections\n}\n\nimpl ValueMapping {\n pub fn is_entire_scope(&self) -> bool {\n match self {\n ValueMapping::Field(FieldMapping {\n scope: None,\n field_path,\n }) => field_path.is_empty(),\n _ => false,\n }\n }\n}\n\nimpl std::fmt::Display for ValueMapping {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result {\n match self {\n ValueMapping::Constant(v) => write!(\n f,\n \"{}\",\n serde_json::to_string(&v.value)\n .unwrap_or_else(|_| \"#(invalid json value)\".to_string())\n ),\n ValueMapping::Field(v) => {\n write!(f, \"{}.{}\", v.scope.as_deref().unwrap_or(\"\"), v.field_path)\n }\n ValueMapping::Struct(v) => write!(\n f,\n \"Struct({})\",\n v.fields\n .iter()\n .map(|f| format!(\"{}={}\", f.name, f.spec))\n .collect::>()\n .join(\", \")\n ),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct OpArgBinding {\n #[serde(default, skip_serializing_if = \"OpArgName::is_unnamed\")]\n pub arg_name: OpArgName,\n\n #[serde(flatten)]\n pub value: ValueMapping,\n}\n\nimpl fmt::Display for OpArgBinding {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if self.arg_name.is_unnamed() {\n write!(f, \"{}\", self.value)\n } else {\n write!(f, \"{}={}\", self.arg_name, self.value)\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct OpSpec {\n pub kind: String,\n #[serde(flatten, default)]\n pub spec: serde_json::Map,\n}\n\nimpl SpecFormatter for OpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match mode {\n OutputMode::Concise => self.kind.clone(),\n OutputMode::Verbose => {\n let spec_str = serde_json::to_string_pretty(&self.spec)\n .map(|s| {\n let lines: Vec<&str> = s.lines().collect();\n if lines.len() < s.lines().count() {\n lines\n .into_iter()\n .chain([\"...\"])\n .collect::>()\n .join(\"\\n \")\n } else {\n lines.join(\"\\n \")\n }\n })\n .unwrap_or(\"#serde_error\".to_string());\n format!(\"{}({})\", self.kind, spec_str)\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct ExecutionOptions {\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub max_inflight_rows: Option,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub max_inflight_bytes: Option,\n}\n\nimpl ExecutionOptions {\n pub fn get_concur_control_options(&self) -> concur_control::Options {\n concur_control::Options {\n max_inflight_rows: self.max_inflight_rows,\n max_inflight_bytes: self.max_inflight_bytes,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct SourceRefreshOptions {\n pub refresh_interval: Option,\n}\n\nimpl fmt::Display for SourceRefreshOptions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let refresh = self\n .refresh_interval\n .map(|d| format!(\"{d:?}\"))\n .unwrap_or(\"none\".to_string());\n write!(f, \"{refresh}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ImportOpSpec {\n pub source: OpSpec,\n\n #[serde(default)]\n pub refresh_options: SourceRefreshOptions,\n\n #[serde(default)]\n pub execution_options: ExecutionOptions,\n}\n\nimpl SpecFormatter for ImportOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let source = self.source.format(mode);\n format!(\"source={}, refresh={}\", source, self.refresh_options)\n }\n}\n\nimpl fmt::Display for ImportOpSpec {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}\", self.format(OutputMode::Concise))\n }\n}\n\n/// Transform data using a given operator.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TransformOpSpec {\n pub inputs: Vec,\n pub op: OpSpec,\n}\n\nimpl SpecFormatter for TransformOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let inputs = self\n .inputs\n .iter()\n .map(ToString::to_string)\n .collect::>()\n .join(\",\");\n let op_str = self.op.format(mode);\n match mode {\n OutputMode::Concise => format!(\"op={op_str}, inputs={inputs}\"),\n OutputMode::Verbose => format!(\"op={op_str}, inputs=[{inputs}]\"),\n }\n }\n}\n\n/// Apply reactive operations to each row of the input field.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ForEachOpSpec {\n /// Mapping that provides a table to apply reactive operations to.\n pub field_path: FieldPath,\n pub op_scope: ReactiveOpScope,\n\n #[serde(default)]\n pub execution_options: ExecutionOptions,\n}\n\nimpl ForEachOpSpec {\n pub fn get_label(&self) -> String {\n format!(\"Loop over {}\", self.field_path)\n }\n}\n\nimpl SpecFormatter for ForEachOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match mode {\n OutputMode::Concise => self.get_label(),\n OutputMode::Verbose => format!(\"field={}\", self.field_path),\n }\n }\n}\n\n/// Emit data to a given collector at the given scope.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CollectOpSpec {\n /// Field values to be collected.\n pub input: StructMapping,\n /// Scope for the collector.\n pub scope_name: ScopeName,\n /// Name of the collector.\n pub collector_name: FieldName,\n /// If specified, the collector will have an automatically generated UUID field with the given name.\n /// The uuid will remain stable when collected input values remain unchanged.\n pub auto_uuid_field: Option,\n}\n\nimpl SpecFormatter for CollectOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let uuid = self.auto_uuid_field.as_deref().unwrap_or(\"none\");\n match mode {\n OutputMode::Concise => {\n format!(\n \"collector={}, input={}, uuid={}\",\n self.collector_name, self.input, uuid\n )\n }\n OutputMode::Verbose => {\n format!(\n \"scope={}, collector={}, input=[{}], uuid={}\",\n self.scope_name, self.collector_name, self.input, uuid\n )\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum VectorSimilarityMetric {\n CosineSimilarity,\n L2Distance,\n InnerProduct,\n}\n\nimpl fmt::Display for VectorSimilarityMetric {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n match self {\n VectorSimilarityMetric::CosineSimilarity => write!(f, \"Cosine\"),\n VectorSimilarityMetric::L2Distance => write!(f, \"L2\"),\n VectorSimilarityMetric::InnerProduct => write!(f, \"InnerProduct\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct VectorIndexDef {\n pub field_name: FieldName,\n pub metric: VectorSimilarityMetric,\n}\n\nimpl fmt::Display for VectorIndexDef {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}:{}\", self.field_name, self.metric)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct IndexOptions {\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub primary_key_fields: Option>,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n pub vector_indexes: Vec,\n}\n\nimpl IndexOptions {\n pub fn primary_key_fields(&self) -> Result<&[FieldName]> {\n Ok(self\n .primary_key_fields\n .as_ref()\n .ok_or(api_error!(\"Primary key fields are not set\"))?\n .as_ref())\n }\n}\n\nimpl fmt::Display for IndexOptions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let primary_keys = self\n .primary_key_fields\n .as_ref()\n .map(|p| p.join(\",\"))\n .unwrap_or_default();\n let vector_indexes = self\n .vector_indexes\n .iter()\n .map(|v| v.to_string())\n .collect::>()\n .join(\",\");\n write!(f, \"keys={primary_keys}, indexes={vector_indexes}\")\n }\n}\n\n/// Store data to a given sink.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ExportOpSpec {\n pub collector_name: FieldName,\n pub target: OpSpec,\n pub index_options: IndexOptions,\n pub setup_by_user: bool,\n}\n\nimpl SpecFormatter for ExportOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let target_str = self.target.format(mode);\n let base = format!(\n \"collector={}, target={}, {}\",\n self.collector_name, target_str, self.index_options\n );\n match mode {\n OutputMode::Concise => base,\n OutputMode::Verbose => format!(\"{}, setup_by_user={}\", base, self.setup_by_user),\n }\n }\n}\n\n/// A reactive operation reacts on given input values.\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"action\")]\npub enum ReactiveOpSpec {\n Transform(TransformOpSpec),\n ForEach(ForEachOpSpec),\n Collect(CollectOpSpec),\n}\n\nimpl SpecFormatter for ReactiveOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match self {\n ReactiveOpSpec::Transform(t) => format!(\"Transform: {}\", t.format(mode)),\n ReactiveOpSpec::ForEach(fe) => match mode {\n OutputMode::Concise => fe.get_label().to_string(),\n OutputMode::Verbose => format!(\"ForEach: {}\", fe.format(mode)),\n },\n ReactiveOpSpec::Collect(c) => format!(\"Collect: {}\", c.format(mode)),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ReactiveOpScope {\n pub name: ScopeName,\n pub ops: Vec>,\n // TODO: Suport collectors\n}\n\nimpl fmt::Display for ReactiveOpScope {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"Scope: name={}\", self.name)\n }\n}\n\n/// A flow defines the rule to sync data from given sources to given sinks with given transformations.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FlowInstanceSpec {\n /// Name of the flow instance.\n pub name: String,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub import_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub reactive_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub export_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub declarations: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TransientFlowSpec {\n pub name: String,\n pub input_fields: Vec,\n pub reactive_ops: Vec>,\n pub output_value: ValueMapping,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SimpleSemanticsQueryHandlerSpec {\n pub name: String,\n pub flow_instance_name: String,\n pub export_target_name: String,\n pub query_transform_flow: TransientFlowSpec,\n pub default_similarity_metric: VectorSimilarityMetric,\n}\n\npub struct AuthEntryReference {\n pub key: String,\n _phantom: std::marker::PhantomData,\n}\n\nimpl fmt::Debug for AuthEntryReference {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"AuthEntryReference({})\", self.key)\n }\n}\n\nimpl fmt::Display for AuthEntryReference {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"AuthEntryReference({})\", self.key)\n }\n}\n\nimpl Clone for AuthEntryReference {\n fn clone(&self) -> Self {\n Self {\n key: self.key.clone(),\n _phantom: std::marker::PhantomData,\n }\n }\n}\n\n#[derive(Serialize, Deserialize)]\nstruct UntypedAuthEntryReference {\n key: T,\n}\n\nimpl Serialize for AuthEntryReference {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n UntypedAuthEntryReference { key: &self.key }.serialize(serializer)\n }\n}\n\nimpl<'de, T> Deserialize<'de> for AuthEntryReference {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let untyped_ref = UntypedAuthEntryReference::::deserialize(deserializer)?;\n Ok(AuthEntryReference {\n key: untyped_ref.key,\n _phantom: std::marker::PhantomData,\n })\n }\n}\n\nimpl PartialEq for AuthEntryReference {\n fn eq(&self, other: &Self) -> bool {\n self.key == other.key\n }\n}\n\nimpl Eq for AuthEntryReference {}\n\nimpl std::hash::Hash for AuthEntryReference {\n fn hash(&self, state: &mut H) {\n self.key.hash(state);\n }\n}\n"], ["/cocoindex/src/ops/sources/google_drive.rs", "use chrono::Duration;\nuse google_drive3::{\n DriveHub,\n api::{File, Scope},\n yup_oauth2::{ServiceAccountAuthenticator, read_service_account_key},\n};\nuse http_body_util::BodyExt;\nuse hyper_rustls::HttpsConnector;\nuse hyper_util::client::legacy::connect::HttpConnector;\nuse phf::phf_map;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\nstruct ExportMimeType {\n text: &'static str,\n binary: &'static str,\n}\n\nconst FOLDER_MIME_TYPE: &str = \"application/vnd.google-apps.folder\";\nconst FILE_MIME_TYPE: &str = \"application/vnd.google-apps.file\";\nstatic EXPORT_MIME_TYPES: phf::Map<&'static str, ExportMimeType> = phf_map! {\n \"application/vnd.google-apps.document\" =>\n ExportMimeType {\n text: \"text/markdown\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.spreadsheet\" =>\n ExportMimeType {\n text: \"text/csv\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.presentation\" =>\n ExportMimeType {\n text: \"text/plain\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.drawing\" =>\n ExportMimeType {\n text: \"image/svg+xml\",\n binary: \"image/png\",\n },\n \"application/vnd.google-apps.script\" =>\n ExportMimeType {\n text: \"application/vnd.google-apps.script+json\",\n binary: \"application/vnd.google-apps.script+json\",\n },\n};\n\nfn is_supported_file_type(mime_type: &str) -> bool {\n !mime_type.starts_with(\"application/vnd.google-apps.\")\n || EXPORT_MIME_TYPES.contains_key(mime_type)\n || mime_type == FILE_MIME_TYPE\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n service_account_credential_path: String,\n binary: bool,\n root_folder_ids: Vec,\n recent_changes_poll_interval: Option,\n}\n\nstruct Executor {\n drive_hub: DriveHub>,\n binary: bool,\n root_folder_ids: IndexSet>,\n recent_updates_poll_interval: Option,\n}\n\nimpl Executor {\n async fn new(spec: Spec) -> Result {\n let service_account_key =\n read_service_account_key(spec.service_account_credential_path).await?;\n let auth = ServiceAccountAuthenticator::builder(service_account_key)\n .build()\n .await?;\n let client =\n hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new())\n .build(\n hyper_rustls::HttpsConnectorBuilder::new()\n .with_provider_and_native_roots(\n rustls::crypto::aws_lc_rs::default_provider(),\n )?\n .https_only()\n .enable_http2()\n .build(),\n );\n let drive_hub = DriveHub::new(client, auth);\n Ok(Self {\n drive_hub,\n binary: spec.binary,\n root_folder_ids: spec.root_folder_ids.into_iter().map(Arc::from).collect(),\n recent_updates_poll_interval: spec.recent_changes_poll_interval,\n })\n }\n}\n\nfn escape_string(s: &str) -> String {\n let mut escaped = String::with_capacity(s.len());\n for c in s.chars() {\n match c {\n '\\'' | '\\\\' => escaped.push('\\\\'),\n _ => {}\n }\n escaped.push(c);\n }\n escaped\n}\n\nconst CUTOFF_TIME_BUFFER: Duration = Duration::seconds(1);\nimpl Executor {\n fn visit_file(\n &self,\n file: File,\n new_folder_ids: &mut Vec>,\n seen_ids: &mut HashSet>,\n ) -> Result> {\n if file.trashed == Some(true) {\n return Ok(None);\n }\n let (id, mime_type) = match (file.id, file.mime_type) {\n (Some(id), Some(mime_type)) => (Arc::::from(id), mime_type),\n (id, mime_type) => {\n warn!(\"Skipping file with incomplete metadata: id={id:?}, mime_type={mime_type:?}\",);\n return Ok(None);\n }\n };\n if !seen_ids.insert(id.clone()) {\n return Ok(None);\n }\n let result = if mime_type == FOLDER_MIME_TYPE {\n new_folder_ids.push(id);\n None\n } else if is_supported_file_type(&mime_type) {\n Some(PartialSourceRowMetadata {\n key: KeyValue::Str(id),\n ordinal: file.modified_time.map(|t| t.try_into()).transpose()?,\n })\n } else {\n None\n };\n Ok(result)\n }\n\n async fn list_files(\n &self,\n folder_id: &str,\n fields: &str,\n next_page_token: &mut Option,\n ) -> Result> {\n let query = format!(\"'{}' in parents\", escape_string(folder_id));\n let mut list_call = self\n .drive_hub\n .files()\n .list()\n .add_scope(Scope::Readonly)\n .q(&query)\n .param(\"fields\", fields);\n if let Some(next_page_token) = &next_page_token {\n list_call = list_call.page_token(next_page_token);\n }\n let (_, files) = list_call.doit().await?;\n *next_page_token = files.next_page_token;\n let file_iter = files.files.into_iter().flat_map(|file| file.into_iter());\n Ok(file_iter)\n }\n\n fn make_cutoff_time(\n most_recent_modified_time: Option>,\n list_start_time: DateTime,\n ) -> DateTime {\n let safe_upperbound = list_start_time - CUTOFF_TIME_BUFFER;\n most_recent_modified_time\n .map(|t| t.min(safe_upperbound))\n .unwrap_or(safe_upperbound)\n }\n\n async fn get_recent_updates(\n &self,\n cutoff_time: &mut DateTime,\n ) -> Result {\n let mut page_size: i32 = 10;\n let mut next_page_token: Option = None;\n let mut changes = Vec::new();\n let mut most_recent_modified_time = None;\n let start_time = Utc::now();\n 'paginate: loop {\n let mut list_call = self\n .drive_hub\n .files()\n .list()\n .add_scope(Scope::Readonly)\n .param(\"fields\", \"files(id,modifiedTime,parents,trashed)\")\n .order_by(\"modifiedTime desc\")\n .page_size(page_size);\n if let Some(token) = next_page_token {\n list_call = list_call.page_token(token.as_str());\n }\n let (_, files) = list_call.doit().await?;\n for file in files.files.into_iter().flat_map(|files| files.into_iter()) {\n let modified_time = file.modified_time.unwrap_or_default();\n if most_recent_modified_time.is_none() {\n most_recent_modified_time = Some(modified_time);\n }\n if modified_time <= *cutoff_time {\n break 'paginate;\n }\n let file_id = file.id.ok_or_else(|| anyhow!(\"File has no id\"))?;\n if self.is_file_covered(&file_id).await? {\n changes.push(SourceChange {\n key: KeyValue::Str(Arc::from(file_id)),\n data: None,\n });\n }\n }\n if let Some(token) = files.next_page_token {\n next_page_token = Some(token);\n } else {\n break;\n }\n // List more in a page since 2nd.\n page_size = 100;\n }\n *cutoff_time = Self::make_cutoff_time(most_recent_modified_time, start_time);\n Ok(SourceChangeMessage {\n changes,\n ack_fn: None,\n })\n }\n\n async fn is_file_covered(&self, file_id: &str) -> Result {\n let mut next_file_id = Some(Cow::Borrowed(file_id));\n while let Some(file_id) = next_file_id {\n if self.root_folder_ids.contains(file_id.as_ref()) {\n return Ok(true);\n }\n let (_, file) = self\n .drive_hub\n .files()\n .get(&file_id)\n .add_scope(Scope::Readonly)\n .param(\"fields\", \"parents\")\n .doit()\n .await?;\n next_file_id = file\n .parents\n .into_iter()\n .flat_map(|parents| parents.into_iter())\n .map(Cow::Owned)\n .next();\n }\n Ok(false)\n }\n}\n\ntrait ResultExt {\n type OptResult;\n fn or_not_found(self) -> Self::OptResult;\n}\n\nimpl ResultExt for google_drive3::Result {\n type OptResult = google_drive3::Result>;\n\n fn or_not_found(self) -> Self::OptResult {\n match self {\n Ok(value) => Ok(Some(value)),\n Err(google_drive3::Error::BadRequest(err_msg))\n if err_msg\n .get(\"error\")\n .and_then(|e| e.get(\"code\"))\n .and_then(|code| code.as_i64())\n == Some(404) =>\n {\n Ok(None)\n }\n Err(e) => Err(e),\n }\n }\n}\n\nfn optional_modified_time(include_ordinal: bool) -> &'static str {\n if include_ordinal { \",modifiedTime\" } else { \"\" }\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n let mut seen_ids = HashSet::new();\n let mut folder_ids = self.root_folder_ids.clone();\n let fields = format!(\n \"files(id,name,mimeType,trashed{})\",\n optional_modified_time(options.include_ordinal)\n );\n let mut new_folder_ids = Vec::new();\n try_stream! {\n while let Some(folder_id) = folder_ids.pop() {\n let mut next_page_token = None;\n loop {\n let mut curr_rows = Vec::new();\n let files = self\n .list_files(&folder_id, &fields, &mut next_page_token)\n .await?;\n for file in files {\n curr_rows.extend(self.visit_file(file, &mut new_folder_ids, &mut seen_ids)?);\n }\n if !curr_rows.is_empty() {\n yield curr_rows;\n }\n if next_page_token.is_none() {\n break;\n }\n }\n folder_ids.extend(new_folder_ids.drain(..).rev());\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let file_id = key.str_value()?;\n let fields = format!(\n \"id,name,mimeType,trashed{}\",\n optional_modified_time(options.include_ordinal)\n );\n let resp = self\n .drive_hub\n .files()\n .get(file_id)\n .add_scope(Scope::Readonly)\n .param(\"fields\", &fields)\n .doit()\n .await\n .or_not_found()?;\n let file = match resp {\n Some((_, file)) if file.trashed != Some(true) => file,\n _ => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n };\n let ordinal = if options.include_ordinal {\n file.modified_time.map(|t| t.try_into()).transpose()?\n } else {\n None\n };\n let type_n_body = if let Some(export_mime_type) = file\n .mime_type\n .as_ref()\n .and_then(|mime_type| EXPORT_MIME_TYPES.get(mime_type.as_str()))\n {\n let target_mime_type = if self.binary {\n export_mime_type.binary\n } else {\n export_mime_type.text\n };\n self.drive_hub\n .files()\n .export(file_id, target_mime_type)\n .add_scope(Scope::Readonly)\n .doit()\n .await\n .or_not_found()?\n .map(|content| (Some(target_mime_type.to_string()), content.into_body()))\n } else {\n self.drive_hub\n .files()\n .get(file_id)\n .add_scope(Scope::Readonly)\n .param(\"alt\", \"media\")\n .doit()\n .await\n .or_not_found()?\n .map(|(resp, _)| (file.mime_type, resp.into_body()))\n };\n let value = match type_n_body {\n Some((mime_type, resp_body)) => {\n let content = resp_body.collect().await?;\n\n let fields = vec![\n file.name.unwrap_or_default().into(),\n mime_type.into(),\n if self.binary {\n content.to_bytes().to_vec().into()\n } else {\n String::from_utf8_lossy(&content.to_bytes())\n .to_string()\n .into()\n },\n ];\n Some(SourceValue::Existence(FieldValues { fields }))\n }\n None => None,\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n let poll_interval = if let Some(poll_interval) = self.recent_updates_poll_interval {\n poll_interval\n } else {\n return Ok(None);\n };\n let mut cutoff_time = Utc::now() - CUTOFF_TIME_BUFFER;\n let mut interval = tokio::time::interval(poll_interval);\n interval.tick().await;\n let stream = stream! {\n loop {\n interval.tick().await;\n yield self.get_recent_updates(&mut cutoff_time).await;\n }\n };\n Ok(Some(stream.boxed()))\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"GoogleDrive\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n schema_builder.add_field(FieldSchema::new(\n \"file_id\",\n make_output_type(BasicValueType::Str),\n ));\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n let mime_type_field = schema_builder.add_field(FieldSchema::new(\n \"mime_type\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n )\n .with_attr(\n field_attrs::CONTENT_MIME_TYPE,\n serde_json::to_value(mime_type_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(spec).await?))\n }\n}\n"], ["/cocoindex/src/server.rs", "use crate::prelude::*;\n\nuse crate::{lib_context::LibContext, service};\nuse axum::{Router, routing};\nuse tower::ServiceBuilder;\nuse tower_http::{\n cors::{AllowOrigin, CorsLayer},\n trace::TraceLayer,\n};\n\n#[derive(Deserialize, Debug)]\npub struct ServerSettings {\n pub address: String,\n #[serde(default)]\n pub cors_origins: Vec,\n}\n\n/// Initialize the server and return a future that will actually handle requests.\npub async fn init_server(\n lib_context: Arc,\n settings: ServerSettings,\n) -> Result> {\n let mut cors = CorsLayer::default();\n if !settings.cors_origins.is_empty() {\n let origins: Vec<_> = settings\n .cors_origins\n .iter()\n .map(|origin| origin.parse())\n .collect::>()?;\n cors = cors\n .allow_origin(AllowOrigin::list(origins))\n .allow_methods([\n axum::http::Method::GET,\n axum::http::Method::POST,\n axum::http::Method::DELETE,\n ])\n .allow_headers([axum::http::header::CONTENT_TYPE]);\n }\n let app = Router::new()\n .route(\n \"/cocoindex\",\n routing::get(|| async { \"CocoIndex is running!\" }),\n )\n .nest(\n \"/cocoindex/api\",\n Router::new()\n .route(\"/flows\", routing::get(service::flows::list_flows))\n .route(\n \"/flows/{flowInstName}\",\n routing::get(service::flows::get_flow),\n )\n .route(\n \"/flows/{flowInstName}/schema\",\n routing::get(service::flows::get_flow_schema),\n )\n .route(\n \"/flows/{flowInstName}/keys\",\n routing::get(service::flows::get_keys),\n )\n .route(\n \"/flows/{flowInstName}/data\",\n routing::get(service::flows::evaluate_data),\n )\n .route(\n \"/flows/{flowInstName}/rowStatus\",\n routing::get(service::flows::get_row_indexing_status),\n )\n .route(\n \"/flows/{flowInstName}/update\",\n routing::post(service::flows::update),\n )\n .layer(\n ServiceBuilder::new()\n .layer(TraceLayer::new_for_http())\n .layer(cors),\n )\n .with_state(lib_context.clone()),\n );\n\n let listener = tokio::net::TcpListener::bind(&settings.address)\n .await\n .context(format!(\"Failed to bind to address: {}\", settings.address))?;\n\n println!(\n \"Server running at http://{}/cocoindex\",\n listener.local_addr()?\n );\n let serve_fut = async { axum::serve(listener, app).await.unwrap() };\n Ok(serve_fut.boxed())\n}\n"], ["/cocoindex/src/ops/sources/azure_blob.rs", "use crate::fields_value;\nuse async_stream::try_stream;\nuse azure_core::prelude::NextMarker;\nuse azure_identity::{DefaultAzureCredential, TokenCredentialOptions};\nuse azure_storage::StorageCredentials;\nuse azure_storage_blobs::prelude::*;\nuse futures::StreamExt;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse std::sync::Arc;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n account_name: String,\n container_name: String,\n prefix: Option,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n\n /// SAS token for authentication. Takes precedence over account_access_key.\n sas_token: Option>,\n /// Account access key for authentication. If not provided, will use default Azure credential.\n account_access_key: Option>,\n}\n\nstruct Executor {\n client: BlobServiceClient,\n container_name: String,\n prefix: Option,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n}\n\nimpl Executor {\n fn is_excluded(&self, key: &str) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(key))\n }\n\n fn is_file_included(&self, key: &str) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(key))\n && !self.is_excluded(key)\n }\n}\n\nfn datetime_to_ordinal(dt: &time::OffsetDateTime) -> Ordinal {\n Ordinal(Some(dt.unix_timestamp_nanos() as i64 / 1000))\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n _options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n try_stream! {\n let mut continuation_token: Option = None;\n loop {\n let mut list_builder = self.client\n .container_client(&self.container_name)\n .list_blobs();\n\n if let Some(p) = &self.prefix {\n list_builder = list_builder.prefix(p.clone());\n }\n\n if let Some(token) = continuation_token.take() {\n list_builder = list_builder.marker(token);\n }\n\n let mut page_stream = list_builder.into_stream();\n let Some(page_result) = page_stream.next().await else {\n break;\n };\n\n let page = page_result?;\n let mut batch = Vec::new();\n\n for blob in page.blobs.blobs() {\n let key = &blob.name;\n\n // Only include files (not directories)\n if key.ends_with('/') { continue; }\n\n if self.is_file_included(key) {\n let ordinal = Some(datetime_to_ordinal(&blob.properties.last_modified));\n batch.push(PartialSourceRowMetadata {\n key: KeyValue::Str(key.clone().into()),\n ordinal,\n });\n }\n }\n\n if !batch.is_empty() {\n yield batch;\n }\n\n continuation_token = page.next_marker;\n if continuation_token.is_none() {\n break;\n }\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let key_str = key.str_value()?;\n if !self.is_file_included(key_str) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n\n let blob_client = self\n .client\n .container_client(&self.container_name)\n .blob_client(key_str.as_ref());\n\n let mut stream = blob_client.get().into_stream();\n let result = stream.next().await;\n\n let blob_response = match result {\n Some(response) => response?,\n None => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n };\n\n let ordinal = if options.include_ordinal {\n Some(datetime_to_ordinal(\n &blob_response.blob.properties.last_modified,\n ))\n } else {\n None\n };\n\n let value = if options.include_value {\n let bytes = blob_response.data.collect().await?;\n Some(SourceValue::Existence(if self.binary {\n fields_value!(bytes)\n } else {\n fields_value!(String::from_utf8_lossy(&bytes).to_string())\n }))\n } else {\n None\n };\n\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n // Azure Blob Storage doesn't have built-in change notifications like S3+SQS\n Ok(None)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"AzureBlob\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n context: Arc,\n ) -> Result> {\n let credential = if let Some(sas_token) = spec.sas_token {\n let sas_token = context.auth_registry.get(&sas_token)?;\n StorageCredentials::sas_token(sas_token)?\n } else if let Some(account_access_key) = spec.account_access_key {\n let account_access_key = context.auth_registry.get(&account_access_key)?;\n StorageCredentials::access_key(spec.account_name.clone(), account_access_key)\n } else {\n let default_credential = Arc::new(DefaultAzureCredential::create(\n TokenCredentialOptions::default(),\n )?);\n StorageCredentials::token_credential(default_credential)\n };\n\n let client = BlobServiceClient::new(&spec.account_name, credential);\n Ok(Box::new(Executor {\n client,\n container_name: spec.container_name,\n prefix: spec.prefix,\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/ops/functions/split_recursively.rs", "use anyhow::anyhow;\nuse log::{error, trace};\nuse regex::{Matches, Regex};\nuse std::collections::HashSet;\nuse std::sync::LazyLock;\nuse std::{collections::HashMap, sync::Arc};\nuse unicase::UniCase;\n\nuse crate::base::field_attrs;\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::{fields_value, ops::sdk::*};\n\n#[derive(Deserialize)]\nstruct CustomLanguageSpec {\n language_name: String,\n #[serde(default)]\n aliases: Vec,\n separators_regex: Vec,\n}\n\n#[derive(Deserialize)]\nstruct Spec {\n #[serde(default)]\n custom_languages: Vec,\n}\n\nconst SYNTAX_LEVEL_GAP_COST: usize = 512;\nconst MISSING_OVERLAP_COST: usize = 512;\nconst PER_LINE_BREAK_LEVEL_GAP_COST: usize = 64;\nconst TOO_SMALL_CHUNK_COST: usize = 1048576;\n\npub struct Args {\n text: ResolvedOpArg,\n chunk_size: ResolvedOpArg,\n min_chunk_size: Option,\n chunk_overlap: Option,\n language: Option,\n}\n\nstruct SimpleLanguageConfig {\n name: String,\n aliases: Vec,\n separator_regex: Vec,\n}\n\nstatic DEFAULT_LANGUAGE_CONFIG: LazyLock =\n LazyLock::new(|| SimpleLanguageConfig {\n name: \"_DEFAULT\".to_string(),\n aliases: vec![],\n separator_regex: [r\"\\n\\n+\", r\"\\n\", r\"\\s+\"]\n .into_iter()\n .map(|s| Regex::new(s).unwrap())\n .collect(),\n });\n\nstruct TreesitterLanguageConfig {\n name: String,\n tree_sitter_lang: tree_sitter::Language,\n terminal_node_kind_ids: HashSet,\n}\n\nfn add_treesitter_language<'a>(\n output: &'a mut HashMap, Arc>,\n name: &'static str,\n aliases: impl IntoIterator,\n lang_fn: impl Into,\n terminal_node_kinds: impl IntoIterator,\n) {\n let tree_sitter_lang: tree_sitter::Language = lang_fn.into();\n let terminal_node_kind_ids = terminal_node_kinds\n .into_iter()\n .filter_map(|kind| {\n let id = tree_sitter_lang.id_for_node_kind(kind, true);\n if id != 0 {\n trace!(\"Got id for node kind: `{kind}` -> {id}\");\n Some(id)\n } else {\n error!(\"Failed in getting id for node kind: `{kind}`\");\n None\n }\n })\n .collect();\n\n let config = Arc::new(TreesitterLanguageConfig {\n name: name.to_string(),\n tree_sitter_lang,\n terminal_node_kind_ids,\n });\n for name in std::iter::once(name).chain(aliases.into_iter()) {\n if output.insert(name.into(), config.clone()).is_some() {\n panic!(\"Language `{name}` already exists\");\n }\n }\n}\n\nstatic TREE_SITTER_LANGUAGE_BY_LANG: LazyLock<\n HashMap, Arc>,\n> = LazyLock::new(|| {\n let mut map = HashMap::new();\n add_treesitter_language(&mut map, \"C\", [\".c\"], tree_sitter_c::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"C++\",\n [\".cpp\", \".cc\", \".cxx\", \".h\", \".hpp\", \"cpp\"],\n tree_sitter_c::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"C#\",\n [\".cs\", \"cs\", \"csharp\"],\n tree_sitter_c_sharp::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"CSS\",\n [\".css\", \".scss\"],\n tree_sitter_css::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Fortran\",\n [\".f\", \".f90\", \".f95\", \".f03\", \"f\", \"f90\", \"f95\", \"f03\"],\n tree_sitter_fortran::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Go\",\n [\".go\", \"golang\"],\n tree_sitter_go::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"HTML\",\n [\".html\", \".htm\"],\n tree_sitter_html::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"Java\", [\".java\"], tree_sitter_java::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"JavaScript\",\n [\".js\", \"js\"],\n tree_sitter_javascript::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"JSON\", [\".json\"], tree_sitter_json::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Kotlin\",\n [\".kt\", \".kts\"],\n tree_sitter_kotlin_ng::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Markdown\",\n [\".md\", \".mdx\", \"md\"],\n tree_sitter_md::LANGUAGE,\n [\"inline\"],\n );\n add_treesitter_language(\n &mut map,\n \"Pascal\",\n [\".pas\", \"pas\", \".dpr\", \"dpr\", \"Delphi\"],\n tree_sitter_pascal::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"PHP\", [\".php\"], tree_sitter_php::LANGUAGE_PHP, []);\n add_treesitter_language(\n &mut map,\n \"Python\",\n [\".py\"],\n tree_sitter_python::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"R\", [\".r\"], tree_sitter_r::LANGUAGE, []);\n add_treesitter_language(&mut map, \"Ruby\", [\".rb\"], tree_sitter_ruby::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Rust\",\n [\".rs\", \"rs\"],\n tree_sitter_rust::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Scala\",\n [\".scala\"],\n tree_sitter_scala::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"SQL\", [\".sql\"], tree_sitter_sequel::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Swift\",\n [\".swift\"],\n tree_sitter_swift::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TOML\",\n [\".toml\"],\n tree_sitter_toml_ng::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TSX\",\n [\".tsx\"],\n tree_sitter_typescript::LANGUAGE_TSX,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TypeScript\",\n [\".ts\", \"ts\"],\n tree_sitter_typescript::LANGUAGE_TYPESCRIPT,\n [],\n );\n add_treesitter_language(&mut map, \"XML\", [\".xml\"], tree_sitter_xml::LANGUAGE_XML, []);\n add_treesitter_language(&mut map, \"DTD\", [\".dtd\"], tree_sitter_xml::LANGUAGE_DTD, []);\n add_treesitter_language(\n &mut map,\n \"YAML\",\n [\".yaml\", \".yml\"],\n tree_sitter_yaml::LANGUAGE,\n [],\n );\n map\n});\n\nenum ChunkKind<'t> {\n TreeSitterNode {\n lang_config: &'t TreesitterLanguageConfig,\n node: tree_sitter::Node<'t>,\n },\n RegexpSepChunk {\n lang_config: &'t SimpleLanguageConfig,\n next_regexp_sep_id: usize,\n },\n}\n\nstruct Chunk<'t, 's: 't> {\n full_text: &'s str,\n range: RangeValue,\n kind: ChunkKind<'t>,\n}\n\nimpl<'t, 's: 't> Chunk<'t, 's> {\n fn text(&self) -> &'s str {\n self.range.extract_str(self.full_text)\n }\n}\n\nstruct TextChunksIter<'t, 's: 't> {\n lang_config: &'t SimpleLanguageConfig,\n parent: &'t Chunk<'t, 's>,\n matches_iter: Matches<'t, 's>,\n regexp_sep_id: usize,\n next_start_pos: Option,\n}\n\nimpl<'t, 's: 't> TextChunksIter<'t, 's> {\n fn new(\n lang_config: &'t SimpleLanguageConfig,\n parent: &'t Chunk<'t, 's>,\n regexp_sep_id: usize,\n ) -> Self {\n Self {\n lang_config,\n parent,\n matches_iter: lang_config.separator_regex[regexp_sep_id].find_iter(parent.text()),\n regexp_sep_id,\n next_start_pos: Some(parent.range.start),\n }\n }\n}\n\nimpl<'t, 's: 't> Iterator for TextChunksIter<'t, 's> {\n type Item = Chunk<'t, 's>;\n\n fn next(&mut self) -> Option {\n let start_pos = self.next_start_pos?;\n let end_pos = match self.matches_iter.next() {\n Some(grp) => {\n self.next_start_pos = Some(self.parent.range.start + grp.end());\n self.parent.range.start + grp.start()\n }\n None => {\n self.next_start_pos = None;\n if start_pos >= self.parent.range.end {\n return None;\n }\n self.parent.range.end\n }\n };\n Some(Chunk {\n full_text: self.parent.full_text,\n range: RangeValue::new(start_pos, end_pos),\n kind: ChunkKind::RegexpSepChunk {\n lang_config: self.lang_config,\n next_regexp_sep_id: self.regexp_sep_id + 1,\n },\n })\n }\n}\n\nstruct TreeSitterNodeIter<'t, 's: 't> {\n lang_config: &'t TreesitterLanguageConfig,\n full_text: &'s str,\n cursor: Option>,\n next_start_pos: usize,\n end_pos: usize,\n}\n\nimpl<'t, 's: 't> TreeSitterNodeIter<'t, 's> {\n fn fill_gap(\n next_start_pos: &mut usize,\n gap_end_pos: usize,\n full_text: &'s str,\n ) -> Option> {\n let start_pos = *next_start_pos;\n if start_pos < gap_end_pos {\n *next_start_pos = gap_end_pos;\n Some(Chunk {\n full_text,\n range: RangeValue::new(start_pos, gap_end_pos),\n kind: ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n },\n })\n } else {\n None\n }\n }\n}\n\nimpl<'t, 's: 't> Iterator for TreeSitterNodeIter<'t, 's> {\n type Item = Chunk<'t, 's>;\n\n fn next(&mut self) -> Option {\n let cursor = if let Some(cursor) = &mut self.cursor {\n cursor\n } else {\n return Self::fill_gap(&mut self.next_start_pos, self.end_pos, self.full_text);\n };\n let node = cursor.node();\n if let Some(gap) =\n Self::fill_gap(&mut self.next_start_pos, node.start_byte(), self.full_text)\n {\n return Some(gap);\n }\n if !cursor.goto_next_sibling() {\n self.cursor = None;\n }\n self.next_start_pos = node.end_byte();\n Some(Chunk {\n full_text: self.full_text,\n range: RangeValue::new(node.start_byte(), node.end_byte()),\n kind: ChunkKind::TreeSitterNode {\n lang_config: self.lang_config,\n node,\n },\n })\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]\nenum LineBreakLevel {\n Inline,\n Newline,\n DoubleNewline,\n}\n\nimpl LineBreakLevel {\n fn ord(self) -> usize {\n match self {\n LineBreakLevel::Inline => 0,\n LineBreakLevel::Newline => 1,\n LineBreakLevel::DoubleNewline => 2,\n }\n }\n}\n\nfn line_break_level(c: &str) -> LineBreakLevel {\n let mut lb_level = LineBreakLevel::Inline;\n let mut iter = c.chars();\n while let Some(c) = iter.next() {\n if c == '\\n' || c == '\\r' {\n lb_level = LineBreakLevel::Newline;\n for c2 in iter.by_ref() {\n if c2 == '\\n' || c2 == '\\r' {\n if c == c2 {\n return LineBreakLevel::DoubleNewline;\n }\n } else {\n break;\n }\n }\n }\n }\n lb_level\n}\n\nconst INLINE_SPACE_CHARS: [char; 2] = [' ', '\\t'];\n\nstruct AtomChunk {\n range: RangeValue,\n boundary_syntax_level: usize,\n\n internal_lb_level: LineBreakLevel,\n boundary_lb_level: LineBreakLevel,\n}\n\nstruct AtomChunksCollector<'s> {\n full_text: &'s str,\n\n curr_level: usize,\n min_level: usize,\n atom_chunks: Vec,\n}\nimpl<'s> AtomChunksCollector<'s> {\n fn collect(&mut self, range: RangeValue) {\n // Trim trailing whitespaces.\n let end_trimmed_text = &self.full_text[range.start..range.end].trim_end();\n if end_trimmed_text.is_empty() {\n return;\n }\n\n // Trim leading whitespaces.\n let trimmed_text = end_trimmed_text.trim_start();\n let new_start = range.start + (end_trimmed_text.len() - trimmed_text.len());\n let new_end = new_start + trimmed_text.len();\n\n // Align to beginning of the line if possible.\n let prev_end = self.atom_chunks.last().map_or(0, |chunk| chunk.range.end);\n let gap = &self.full_text[prev_end..new_start];\n let boundary_lb_level = line_break_level(gap);\n let range = if boundary_lb_level != LineBreakLevel::Inline {\n let trimmed_gap = gap.trim_end_matches(INLINE_SPACE_CHARS);\n RangeValue::new(prev_end + trimmed_gap.len(), new_end)\n } else {\n RangeValue::new(new_start, new_end)\n };\n\n self.atom_chunks.push(AtomChunk {\n range,\n boundary_syntax_level: self.min_level,\n internal_lb_level: line_break_level(trimmed_text),\n boundary_lb_level,\n });\n self.min_level = self.curr_level;\n }\n\n fn into_atom_chunks(mut self) -> Vec {\n self.atom_chunks.push(AtomChunk {\n range: RangeValue::new(self.full_text.len(), self.full_text.len()),\n boundary_syntax_level: self.min_level,\n internal_lb_level: LineBreakLevel::Inline,\n boundary_lb_level: LineBreakLevel::DoubleNewline,\n });\n self.atom_chunks\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Eq)]\nstruct OutputPosition {\n char_offset: usize,\n line: u32,\n column: u32,\n}\n\nimpl OutputPosition {\n fn into_output(self) -> value::Value {\n value::Value::Struct(fields_value!(\n self.char_offset as i64,\n self.line as i64,\n self.column as i64\n ))\n }\n}\nstruct Position {\n byte_offset: usize,\n output: Option,\n}\n\nimpl Position {\n fn new(byte_offset: usize) -> Self {\n Self {\n byte_offset,\n output: None,\n }\n }\n}\n\nstruct ChunkOutput<'s> {\n start_pos: Position,\n end_pos: Position,\n text: &'s str,\n}\n\nstruct RecursiveChunker<'s> {\n full_text: &'s str,\n chunk_size: usize,\n chunk_overlap: usize,\n min_chunk_size: usize,\n}\n\nimpl<'t, 's: 't> RecursiveChunker<'s> {\n fn collect_atom_chunks_from_iter(\n &self,\n sub_chunks_iter: impl Iterator>,\n atom_collector: &mut AtomChunksCollector<'s>,\n ) -> Result<()> {\n atom_collector.curr_level += 1;\n for sub_chunk in sub_chunks_iter {\n let range = sub_chunk.range;\n if range.len() <= self.min_chunk_size {\n atom_collector.collect(range);\n } else {\n self.collect_atom_chunks(sub_chunk, atom_collector)?;\n }\n }\n atom_collector.curr_level -= 1;\n if atom_collector.curr_level < atom_collector.min_level {\n atom_collector.min_level = atom_collector.curr_level;\n }\n Ok(())\n }\n\n fn collect_atom_chunks(\n &self,\n chunk: Chunk<'t, 's>,\n atom_collector: &mut AtomChunksCollector<'s>,\n ) -> Result<()> {\n match chunk.kind {\n ChunkKind::TreeSitterNode { lang_config, node } => {\n if !lang_config.terminal_node_kind_ids.contains(&node.kind_id()) {\n let mut cursor = node.walk();\n if cursor.goto_first_child() {\n return self.collect_atom_chunks_from_iter(\n TreeSitterNodeIter {\n lang_config,\n full_text: self.full_text,\n cursor: Some(cursor),\n next_start_pos: node.start_byte(),\n end_pos: node.end_byte(),\n },\n atom_collector,\n );\n }\n }\n self.collect_atom_chunks(\n Chunk {\n full_text: self.full_text,\n range: chunk.range,\n kind: ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n },\n },\n atom_collector,\n )\n }\n ChunkKind::RegexpSepChunk {\n lang_config,\n next_regexp_sep_id,\n } => {\n if next_regexp_sep_id >= lang_config.separator_regex.len() {\n atom_collector.collect(chunk.range);\n Ok(())\n } else {\n self.collect_atom_chunks_from_iter(\n TextChunksIter::new(lang_config, &chunk, next_regexp_sep_id),\n atom_collector,\n )\n }\n }\n }\n }\n\n fn get_overlap_cost_base(&self, offset: usize) -> usize {\n if self.chunk_overlap == 0 {\n 0\n } else {\n (self.full_text.len() - offset) * MISSING_OVERLAP_COST / self.chunk_overlap\n }\n }\n\n fn merge_atom_chunks(&self, atom_chunks: Vec) -> Vec> {\n struct AtomRoutingPlan {\n start_idx: usize, // index of `atom_chunks` for the start chunk\n prev_plan_idx: usize, // index of `plans` for the previous plan\n cost: usize,\n overlap_cost_base: usize,\n }\n type PrevPlanCandidate = (std::cmp::Reverse, usize); // (cost, start_idx)\n\n let mut plans = Vec::with_capacity(atom_chunks.len());\n // Janitor\n plans.push(AtomRoutingPlan {\n start_idx: 0,\n prev_plan_idx: 0,\n cost: 0,\n overlap_cost_base: self.get_overlap_cost_base(0),\n });\n let mut prev_plan_candidates = std::collections::BinaryHeap::::new();\n\n let mut gap_cost_cache = vec![0];\n let mut syntax_level_gap_cost = |boundary: usize, internal: usize| -> usize {\n if boundary > internal {\n let gap = boundary - internal;\n for i in gap_cost_cache.len()..=gap {\n gap_cost_cache.push(gap_cost_cache[i - 1] + SYNTAX_LEVEL_GAP_COST / i);\n }\n gap_cost_cache[gap]\n } else {\n 0\n }\n };\n\n for (i, chunk) in atom_chunks[0..atom_chunks.len() - 1].iter().enumerate() {\n let mut min_cost = usize::MAX;\n let mut arg_min_start_idx: usize = 0;\n let mut arg_min_prev_plan_idx: usize = 0;\n let mut start_idx = i;\n\n let end_syntax_level = atom_chunks[i + 1].boundary_syntax_level;\n let end_lb_level = atom_chunks[i + 1].boundary_lb_level;\n\n let mut internal_syntax_level = usize::MAX;\n let mut internal_lb_level = LineBreakLevel::Inline;\n\n fn lb_level_gap(boundary: LineBreakLevel, internal: LineBreakLevel) -> usize {\n if boundary.ord() < internal.ord() {\n internal.ord() - boundary.ord()\n } else {\n 0\n }\n }\n loop {\n let start_chunk = &atom_chunks[start_idx];\n let chunk_size = chunk.range.end - start_chunk.range.start;\n\n let mut cost = 0;\n cost +=\n syntax_level_gap_cost(start_chunk.boundary_syntax_level, internal_syntax_level);\n cost += syntax_level_gap_cost(end_syntax_level, internal_syntax_level);\n cost += (lb_level_gap(start_chunk.boundary_lb_level, internal_lb_level)\n + lb_level_gap(end_lb_level, internal_lb_level))\n * PER_LINE_BREAK_LEVEL_GAP_COST;\n if chunk_size < self.min_chunk_size {\n cost += TOO_SMALL_CHUNK_COST;\n }\n\n if chunk_size > self.chunk_size {\n if min_cost == usize::MAX {\n min_cost = cost + plans[start_idx].cost;\n arg_min_start_idx = start_idx;\n arg_min_prev_plan_idx = start_idx;\n }\n break;\n }\n\n let prev_plan_idx = if self.chunk_overlap > 0 {\n while let Some(top_prev_plan) = prev_plan_candidates.peek() {\n let overlap_size =\n atom_chunks[top_prev_plan.1].range.end - start_chunk.range.start;\n if overlap_size <= self.chunk_overlap {\n break;\n }\n prev_plan_candidates.pop();\n }\n prev_plan_candidates.push((\n std::cmp::Reverse(\n plans[start_idx].cost + plans[start_idx].overlap_cost_base,\n ),\n start_idx,\n ));\n prev_plan_candidates.peek().unwrap().1\n } else {\n start_idx\n };\n let prev_plan = &plans[prev_plan_idx];\n cost += prev_plan.cost;\n if self.chunk_overlap == 0 {\n cost += MISSING_OVERLAP_COST / 2;\n } else {\n let start_cost_base = self.get_overlap_cost_base(start_chunk.range.start);\n cost += if prev_plan.overlap_cost_base < start_cost_base {\n MISSING_OVERLAP_COST + prev_plan.overlap_cost_base - start_cost_base\n } else {\n MISSING_OVERLAP_COST\n };\n }\n if cost < min_cost {\n min_cost = cost;\n arg_min_start_idx = start_idx;\n arg_min_prev_plan_idx = prev_plan_idx;\n }\n\n if start_idx == 0 {\n break;\n }\n\n start_idx -= 1;\n internal_syntax_level =\n internal_syntax_level.min(start_chunk.boundary_syntax_level);\n internal_lb_level = internal_lb_level.max(start_chunk.internal_lb_level);\n }\n plans.push(AtomRoutingPlan {\n start_idx: arg_min_start_idx,\n prev_plan_idx: arg_min_prev_plan_idx,\n cost: min_cost,\n overlap_cost_base: self.get_overlap_cost_base(chunk.range.end),\n });\n prev_plan_candidates.clear();\n }\n\n let mut output = Vec::new();\n let mut plan_idx = plans.len() - 1;\n while plan_idx > 0 {\n let plan = &plans[plan_idx];\n let start_chunk = &atom_chunks[plan.start_idx];\n let end_chunk = &atom_chunks[plan_idx - 1];\n output.push(ChunkOutput {\n start_pos: Position::new(start_chunk.range.start),\n end_pos: Position::new(end_chunk.range.end),\n text: &self.full_text[start_chunk.range.start..end_chunk.range.end],\n });\n plan_idx = plan.prev_plan_idx;\n }\n output.reverse();\n output\n }\n\n fn split_root_chunk(&self, kind: ChunkKind<'t>) -> Result>> {\n let mut atom_collector = AtomChunksCollector {\n full_text: self.full_text,\n min_level: 0,\n curr_level: 0,\n atom_chunks: Vec::new(),\n };\n self.collect_atom_chunks(\n Chunk {\n full_text: self.full_text,\n range: RangeValue::new(0, self.full_text.len()),\n kind,\n },\n &mut atom_collector,\n )?;\n let atom_chunks = atom_collector.into_atom_chunks();\n let output = self.merge_atom_chunks(atom_chunks);\n Ok(output)\n }\n}\n\nstruct Executor {\n args: Args,\n custom_languages: HashMap, Arc>,\n}\n\nimpl Executor {\n fn new(args: Args, spec: Spec) -> Result {\n let mut custom_languages = HashMap::new();\n for lang in spec.custom_languages {\n let separator_regex = lang\n .separators_regex\n .iter()\n .map(|s| Regex::new(s))\n .collect::>()\n .with_context(|| {\n format!(\n \"failed in parsing regexp for language `{}`\",\n lang.language_name\n )\n })?;\n let language_config = Arc::new(SimpleLanguageConfig {\n name: lang.language_name,\n aliases: lang.aliases,\n separator_regex,\n });\n if custom_languages\n .insert(\n UniCase::new(language_config.name.clone()),\n language_config.clone(),\n )\n .is_some()\n {\n api_bail!(\n \"duplicate language name / alias: `{}`\",\n language_config.name\n );\n }\n for alias in &language_config.aliases {\n if custom_languages\n .insert(UniCase::new(alias.clone()), language_config.clone())\n .is_some()\n {\n api_bail!(\"duplicate language name / alias: `{}`\", alias);\n }\n }\n }\n Ok(Self {\n args,\n custom_languages,\n })\n }\n}\n\nfn set_output_positions<'a>(text: &str, positions: impl Iterator) {\n let mut positions = positions.collect::>();\n positions.sort_by_key(|o| o.byte_offset);\n\n let mut positions_iter = positions.iter_mut();\n let Some(mut next_position) = positions_iter.next() else {\n return;\n };\n\n let mut char_offset = 0;\n let mut line = 1;\n let mut column = 1;\n for (byte_offset, ch) in text.char_indices() {\n while next_position.byte_offset == byte_offset {\n next_position.output = Some(OutputPosition {\n char_offset,\n line,\n column,\n });\n if let Some(position) = positions_iter.next() {\n next_position = position;\n } else {\n return;\n }\n }\n char_offset += 1;\n if ch == '\\n' {\n line += 1;\n column = 1;\n } else {\n column += 1;\n }\n }\n\n // Offsets after the last char.\n loop {\n next_position.output = Some(OutputPosition {\n char_offset,\n line,\n column,\n });\n if let Some(position) = positions_iter.next() {\n next_position = position;\n } else {\n return;\n }\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n async fn evaluate(&self, input: Vec) -> Result {\n let full_text = self.args.text.value(&input)?.as_str()?;\n let chunk_size = self.args.chunk_size.value(&input)?.as_int64()?;\n let recursive_chunker = RecursiveChunker {\n full_text,\n chunk_size: chunk_size as usize,\n chunk_overlap: (self.args.chunk_overlap.value(&input)?)\n .optional()\n .map(|v| v.as_int64())\n .transpose()?\n .unwrap_or(0) as usize,\n min_chunk_size: (self.args.min_chunk_size.value(&input)?)\n .optional()\n .map(|v| v.as_int64())\n .transpose()?\n .unwrap_or(chunk_size / 2) as usize,\n };\n\n let language = UniCase::new(\n (if let Some(language) = self.args.language.value(&input)?.optional() {\n language.as_str()?\n } else {\n \"\"\n })\n .to_string(),\n );\n let mut output = if let Some(lang_config) = self.custom_languages.get(&language) {\n recursive_chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config,\n next_regexp_sep_id: 0,\n })?\n } else if let Some(lang_config) = TREE_SITTER_LANGUAGE_BY_LANG.get(&language) {\n let mut parser = tree_sitter::Parser::new();\n parser.set_language(&lang_config.tree_sitter_lang)?;\n let tree = parser.parse(full_text.as_ref(), None).ok_or_else(|| {\n anyhow!(\"failed in parsing text in language: {}\", lang_config.name)\n })?;\n recursive_chunker.split_root_chunk(ChunkKind::TreeSitterNode {\n lang_config,\n node: tree.root_node(),\n })?\n } else {\n recursive_chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n })?\n };\n\n set_output_positions(\n full_text,\n output.iter_mut().flat_map(|chunk_output| {\n std::iter::once(&mut chunk_output.start_pos)\n .chain(std::iter::once(&mut chunk_output.end_pos))\n }),\n );\n\n let table = output\n .into_iter()\n .map(|chunk_output| {\n let output_start = chunk_output.start_pos.output.unwrap();\n let output_end = chunk_output.end_pos.output.unwrap();\n (\n RangeValue::new(output_start.char_offset, output_end.char_offset).into(),\n fields_value!(\n Arc::::from(chunk_output.text),\n output_start.into_output(),\n output_end.into_output()\n )\n .into(),\n )\n })\n .collect();\n\n Ok(Value::KTable(table))\n }\n}\n\nstruct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"SplitRecursively\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n _spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n chunk_size: args_resolver\n .next_arg(\"chunk_size\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n min_chunk_size: args_resolver\n .next_optional_arg(\"min_chunk_size\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n chunk_overlap: args_resolver\n .next_optional_arg(\"chunk_overlap\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n language: args_resolver\n .next_optional_arg(\"language\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n };\n\n let pos_struct = schema::ValueType::Struct(schema::StructSchema {\n fields: Arc::new(vec![\n schema::FieldSchema::new(\"offset\", make_output_type(BasicValueType::Int64)),\n schema::FieldSchema::new(\"line\", make_output_type(BasicValueType::Int64)),\n schema::FieldSchema::new(\"column\", make_output_type(BasicValueType::Int64)),\n ]),\n description: None,\n });\n\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n schema_builder.add_field(FieldSchema::new(\n \"location\",\n make_output_type(BasicValueType::Range),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"text\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"start\",\n schema::EnrichedValueType {\n typ: pos_struct.clone(),\n nullable: false,\n attrs: Default::default(),\n },\n ));\n schema_builder.add_field(FieldSchema::new(\n \"end\",\n schema::EnrichedValueType {\n typ: pos_struct,\n nullable: false,\n attrs: Default::default(),\n },\n ));\n let output_schema = make_output_type(TableSchema::new(TableKind::KTable, struct_schema))\n .with_attr(\n field_attrs::CHUNK_BASE_TEXT,\n serde_json::to_value(args_resolver.get_analyze_value(&args.text))?,\n );\n Ok((args, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(args, spec)?))\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory.register(registry)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n // Helper function to assert chunk text and its consistency with the range within the original text.\n fn assert_chunk_text_consistency(\n full_text: &str, // Added full text\n actual_chunk: &ChunkOutput<'_>,\n expected_text: &str,\n context: &str,\n ) {\n // Extract text using the chunk's range from the original full text.\n let extracted_text = full_text\n .get(actual_chunk.start_pos.byte_offset..actual_chunk.end_pos.byte_offset)\n .unwrap();\n // Assert that the expected text matches the text provided in the chunk.\n assert_eq!(\n actual_chunk.text, expected_text,\n \"Provided chunk text mismatch - {context}\"\n );\n // Assert that the expected text also matches the text extracted using the chunk's range.\n assert_eq!(\n extracted_text, expected_text,\n \"Range inconsistency: extracted text mismatch - {context}\"\n );\n }\n\n // Creates a default RecursiveChunker for testing, assuming no language-specific parsing.\n fn create_test_chunker(\n text: &str,\n chunk_size: usize,\n min_chunk_size: usize,\n chunk_overlap: usize,\n ) -> RecursiveChunker {\n RecursiveChunker {\n full_text: text,\n chunk_size,\n chunk_overlap,\n min_chunk_size,\n }\n }\n\n #[tokio::test]\n async fn test_split_recursively() {\n let spec = Spec {\n custom_languages: vec![],\n };\n let factory = Arc::new(Factory);\n let text_content = \"Linea 1.\\nLinea 2.\\n\\nLinea 3.\";\n\n let input_args_values = vec![\n text_content.to_string().into(),\n (15i64).into(),\n (5i64).into(),\n (0i64).into(),\n Value::Null,\n ];\n\n let input_arg_schemas = vec![\n build_arg_schema(\"text\", BasicValueType::Str),\n build_arg_schema(\"chunk_size\", BasicValueType::Int64),\n build_arg_schema(\"min_chunk_size\", BasicValueType::Int64),\n build_arg_schema(\"chunk_overlap\", BasicValueType::Int64),\n build_arg_schema(\"language\", BasicValueType::Str),\n ];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed: {:?}\",\n result.err()\n );\n let value = result.unwrap();\n\n match value {\n Value::KTable(table) => {\n let expected_chunks = vec![\n (RangeValue::new(0, 8), \"Linea 1.\"),\n (RangeValue::new(9, 17), \"Linea 2.\"),\n (RangeValue::new(19, 27), \"Linea 3.\"),\n ];\n\n for (range, expected_text) in expected_chunks {\n let key: KeyValue = range.into();\n match table.get(&key) {\n Some(scope_value_ref) => {\n let chunk_text =\n scope_value_ref.0.fields[0].as_str().unwrap_or_else(|_| {\n panic!(\"Chunk text not a string for key {key:?}\")\n });\n assert_eq!(**chunk_text, *expected_text);\n }\n None => panic!(\"Expected row value for key {key:?}, not found\"),\n }\n }\n }\n other => panic!(\"Expected Value::KTable, got {other:?}\"),\n }\n }\n\n #[test]\n fn test_translate_bytes_to_chars_simple() {\n let text = \"abc😄def\";\n let mut start1 = Position::new(0);\n let mut end1 = Position::new(3);\n let mut start2 = Position::new(3);\n let mut end2 = Position::new(7);\n let mut start3 = Position::new(7);\n let mut end3 = Position::new(10);\n let mut end_full = Position::new(text.len());\n\n let offsets = vec![\n &mut start1,\n &mut end1,\n &mut start2,\n &mut end2,\n &mut start3,\n &mut end3,\n &mut end_full,\n ];\n\n set_output_positions(text, offsets.into_iter());\n\n assert_eq!(\n start1.output,\n Some(OutputPosition {\n char_offset: 0,\n line: 1,\n column: 1,\n })\n );\n assert_eq!(\n end1.output,\n Some(OutputPosition {\n char_offset: 3,\n line: 1,\n column: 4,\n })\n );\n assert_eq!(\n start2.output,\n Some(OutputPosition {\n char_offset: 3,\n line: 1,\n column: 4,\n })\n );\n assert_eq!(\n end2.output,\n Some(OutputPosition {\n char_offset: 4,\n line: 1,\n column: 5,\n })\n );\n assert_eq!(\n end3.output,\n Some(OutputPosition {\n char_offset: 7,\n line: 1,\n column: 8,\n })\n );\n assert_eq!(\n end_full.output,\n Some(OutputPosition {\n char_offset: 7,\n line: 1,\n column: 8,\n })\n );\n }\n\n #[test]\n fn test_basic_split_no_overlap() {\n let text = \"Linea 1.\\nLinea 2.\\n\\nLinea 3.\";\n let chunker = create_test_chunker(text, 15, 5, 0);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert_eq!(chunks.len(), 3);\n assert_chunk_text_consistency(text, &chunks[0], \"Linea 1.\", \"Test 1, Chunk 0\");\n assert_chunk_text_consistency(text, &chunks[1], \"Linea 2.\", \"Test 1, Chunk 1\");\n assert_chunk_text_consistency(text, &chunks[2], \"Linea 3.\", \"Test 1, Chunk 2\");\n\n // Test splitting when chunk_size forces breaks within segments.\n let text2 = \"A very very long text that needs to be split.\";\n let chunker2 = create_test_chunker(text2, 20, 12, 0);\n let result2 = chunker2.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result2.is_ok());\n let chunks2 = result2.unwrap();\n\n // Expect multiple chunks, likely split by spaces due to chunk_size.\n assert!(chunks2.len() > 1);\n assert_chunk_text_consistency(text2, &chunks2[0], \"A very very long\", \"Test 2, Chunk 0\");\n assert!(chunks2[0].text.len() <= 20);\n }\n\n #[test]\n fn test_basic_split_with_overlap() {\n let text = \"This is a test text that is a bit longer to see how the overlap works.\";\n let chunker = create_test_chunker(text, 20, 10, 5);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert!(chunks.len() > 1);\n\n if chunks.len() >= 2 {\n assert!(chunks[0].text.len() <= 25);\n }\n }\n\n #[test]\n fn test_split_trims_whitespace() {\n let text = \" \\n First chunk. \\n\\n Second chunk with spaces at the end. \\n\";\n let chunker = create_test_chunker(text, 30, 10, 0);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert_eq!(chunks.len(), 3);\n\n assert_chunk_text_consistency(\n text,\n &chunks[0],\n \" First chunk.\",\n \"Whitespace Test, Chunk 0\",\n );\n assert_chunk_text_consistency(\n text,\n &chunks[1],\n \" Second chunk with spaces\",\n \"Whitespace Test, Chunk 1\",\n );\n assert_chunk_text_consistency(text, &chunks[2], \"at the end.\", \"Whitespace Test, Chunk 2\");\n }\n}\n"], ["/cocoindex/src/setup/states.rs", "/// Concepts:\n/// - Resource: some setup that needs to be tracked and maintained.\n/// - Setup State: current state of a resource.\n/// - Staging Change: states changes that may not be really applied yet.\n/// - Combined Setup State: Setup State + Staging Change.\n/// - Status Check: information about changes that are being applied / need to be applied.\n///\n/// Resource hierarchy:\n/// - [resource: setup metadata table] /// - Flow\n/// - [resource: metadata]\n/// - [resource: tracking table]\n/// - Target\n/// - [resource: target-specific stuff]\nuse crate::prelude::*;\n\nuse indenter::indented;\nuse owo_colors::{AnsiColors, OwoColorize};\nuse std::any::Any;\nuse std::fmt::Debug;\nuse std::fmt::{Display, Write};\nuse std::hash::Hash;\n\nuse super::db_metadata;\nuse crate::execution::db_tracking_setup::{\n self, TrackingTableSetupState, TrackingTableSetupStatus,\n};\n\nconst INDENT: &str = \" \";\n\npub trait StateMode: Clone + Copy {\n type State: Debug + Clone;\n type DefaultState: Debug + Clone + Default;\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct DesiredMode;\nimpl StateMode for DesiredMode {\n type State = T;\n type DefaultState = T;\n}\n\n#[derive(Debug, Clone)]\npub struct CombinedState {\n pub current: Option,\n pub staging: Vec>,\n /// Legacy state keys that no longer identical to the latest serialized form (usually caused by code change).\n /// They will be deleted when the next change is applied.\n pub legacy_state_key: Option,\n}\n\nimpl CombinedState {\n pub fn from_desired(desired: T) -> Self {\n Self {\n current: Some(desired),\n staging: vec![],\n legacy_state_key: None,\n }\n }\n\n pub fn from_change(prev: Option>, change: Option>) -> Self\n where\n T: Clone,\n {\n Self {\n current: match change {\n Some(Some(state)) => Some(state.clone()),\n Some(None) => None,\n None => prev.and_then(|v| v.current),\n },\n staging: vec![],\n legacy_state_key: None,\n }\n }\n\n pub fn possible_versions(&self) -> impl Iterator {\n self.current\n .iter()\n .chain(self.staging.iter().flat_map(|s| s.state().into_iter()))\n }\n\n pub fn always_exists(&self) -> bool {\n self.current.is_some() && self.staging.iter().all(|s| !s.is_delete())\n }\n\n pub fn legacy_values &V>(\n &self,\n desired: Option<&T>,\n f: F,\n ) -> BTreeSet<&V> {\n let desired_value = desired.map(&f);\n self.possible_versions()\n .map(f)\n .filter(|v| Some(*v) != desired_value)\n .collect()\n }\n}\n\nimpl Default for CombinedState {\n fn default() -> Self {\n Self {\n current: None,\n staging: vec![],\n legacy_state_key: None,\n }\n }\n}\n\nimpl PartialEq for CombinedState {\n fn eq(&self, other: &T) -> bool {\n self.staging.is_empty() && self.current.as_ref() == Some(other)\n }\n}\n\n#[derive(Clone, Copy)]\npub struct ExistingMode;\nimpl StateMode for ExistingMode {\n type State = CombinedState;\n type DefaultState = CombinedState;\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub enum StateChange {\n Upsert(State),\n Delete,\n}\n\nimpl StateChange {\n pub fn is_delete(&self) -> bool {\n matches!(self, StateChange::Delete)\n }\n\n pub fn desired_state(&self) -> Option<&State> {\n match self {\n StateChange::Upsert(state) => Some(state),\n StateChange::Delete => None,\n }\n }\n\n pub fn state(&self) -> Option<&State> {\n match self {\n StateChange::Upsert(state) => Some(state),\n StateChange::Delete => None,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct SourceSetupState {\n pub source_id: i32,\n pub key_schema: schema::ValueType,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct ResourceIdentifier {\n pub key: serde_json::Value,\n pub target_kind: String,\n}\n\nimpl Display for ResourceIdentifier {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}:{}\", self.target_kind, self.key)\n }\n}\n\n/// Common state (i.e. not specific to a target kind) for a target.\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TargetSetupStateCommon {\n pub target_id: i32,\n pub schema_version_id: i32,\n pub max_schema_version_id: i32,\n #[serde(default)]\n pub setup_by_user: bool,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TargetSetupState {\n pub common: TargetSetupStateCommon,\n\n pub state: serde_json::Value,\n}\n\nimpl TargetSetupState {\n pub fn state_unless_setup_by_user(self) -> Option {\n (!self.common.setup_by_user).then_some(self.state)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]\npub struct FlowSetupMetadata {\n pub last_source_id: i32,\n pub last_target_id: i32,\n pub sources: BTreeMap,\n}\n\n#[derive(Debug, Clone)]\npub struct FlowSetupState {\n // The version number for the flow, last seen in the metadata table.\n pub seen_flow_metadata_version: Option,\n pub metadata: Mode::DefaultState,\n pub tracking_table: Mode::State,\n pub targets: IndexMap>,\n}\n\nimpl Default for FlowSetupState {\n fn default() -> Self {\n Self {\n seen_flow_metadata_version: None,\n metadata: Default::default(),\n tracking_table: Default::default(),\n targets: IndexMap::new(),\n }\n }\n}\n\nimpl PartialEq for FlowSetupState {\n fn eq(&self, other: &Self) -> bool {\n self.metadata == other.metadata\n && self.tracking_table == other.tracking_table\n && self.targets == other.targets\n }\n}\n\n#[derive(Debug, Clone)]\npub struct AllSetupStates {\n pub has_metadata_table: bool,\n pub flows: BTreeMap>,\n}\n\nimpl Default for AllSetupStates {\n fn default() -> Self {\n Self {\n has_metadata_table: false,\n flows: BTreeMap::new(),\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum SetupChangeType {\n NoChange,\n Create,\n Update,\n Delete,\n Invalid,\n}\n\npub enum ChangeDescription {\n Action(String),\n Note(String),\n}\n\npub trait ResourceSetupStatus: Send + Sync + Debug + Any + 'static {\n fn describe_changes(&self) -> Vec;\n\n fn change_type(&self) -> SetupChangeType;\n}\n\nimpl ResourceSetupStatus for Box {\n fn describe_changes(&self) -> Vec {\n self.as_ref().describe_changes()\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.as_ref().change_type()\n }\n}\n\nimpl ResourceSetupStatus for std::convert::Infallible {\n fn describe_changes(&self) -> Vec {\n unreachable!()\n }\n\n fn change_type(&self) -> SetupChangeType {\n unreachable!()\n }\n}\n\n#[derive(Debug)]\npub struct ResourceSetupInfo {\n pub key: K,\n pub state: Option,\n pub description: String,\n\n /// If `None`, the resource is managed by users.\n pub setup_status: Option,\n\n pub legacy_key: Option,\n}\n\nimpl std::fmt::Display for ResourceSetupInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let status_code = match self.setup_status.as_ref().map(|c| c.change_type()) {\n Some(SetupChangeType::NoChange) => \"READY\",\n Some(SetupChangeType::Create) => \"TO CREATE\",\n Some(SetupChangeType::Update) => \"TO UPDATE\",\n Some(SetupChangeType::Delete) => \"TO DELETE\",\n Some(SetupChangeType::Invalid) => \"INVALID\",\n None => \"USER MANAGED\",\n };\n let status_str = format!(\"[ {status_code:^9} ]\");\n let status_full = status_str.color(AnsiColors::Cyan);\n let desc_colored = &self.description;\n writeln!(f, \"{status_full} {desc_colored}\")?;\n if let Some(setup_status) = &self.setup_status {\n let changes = setup_status.describe_changes();\n if !changes.is_empty() {\n let mut f = indented(f).with_str(INDENT);\n writeln!(f, \"\")?;\n for change in changes {\n match change {\n ChangeDescription::Action(action) => {\n writeln!(\n f,\n \"{} {}\",\n \"TODO:\".color(AnsiColors::BrightBlack).bold(),\n action.color(AnsiColors::BrightBlack)\n )?;\n }\n ChangeDescription::Note(note) => {\n writeln!(\n f,\n \"{} {}\",\n \"NOTE:\".color(AnsiColors::Yellow).bold(),\n note.color(AnsiColors::Yellow)\n )?;\n }\n }\n }\n writeln!(f)?;\n }\n }\n Ok(())\n }\n}\n\nimpl ResourceSetupInfo {\n pub fn is_up_to_date(&self) -> bool {\n self.setup_status\n .as_ref()\n .is_none_or(|c| c.change_type() == SetupChangeType::NoChange)\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum ObjectStatus {\n Invalid,\n New,\n Existing,\n Deleted,\n}\n\npub trait ObjectSetupStatus {\n fn status(&self) -> Option;\n fn is_up_to_date(&self) -> bool;\n}\n\n#[derive(Debug)]\npub struct FlowSetupStatus {\n pub status: Option,\n pub seen_flow_metadata_version: Option,\n\n pub metadata_change: Option>,\n\n pub tracking_table:\n Option>,\n pub target_resources:\n Vec>>,\n\n pub unknown_resources: Vec,\n}\n\nimpl ObjectSetupStatus for FlowSetupStatus {\n fn status(&self) -> Option {\n self.status\n }\n\n fn is_up_to_date(&self) -> bool {\n self.metadata_change.is_none()\n && self\n .tracking_table\n .as_ref()\n .is_none_or(|t| t.is_up_to_date())\n && self\n .target_resources\n .iter()\n .all(|target| target.is_up_to_date())\n }\n}\n\n#[derive(Debug)]\npub struct GlobalSetupStatus {\n pub metadata_table: ResourceSetupInfo<(), (), db_metadata::MetadataTableSetup>,\n}\n\nimpl GlobalSetupStatus {\n pub fn from_setup_states(setup_states: &AllSetupStates) -> Self {\n Self {\n metadata_table: db_metadata::MetadataTableSetup {\n metadata_table_missing: !setup_states.has_metadata_table,\n }\n .into_setup_info(),\n }\n }\n\n pub fn is_up_to_date(&self) -> bool {\n self.metadata_table.is_up_to_date()\n }\n}\n\npub struct ObjectSetupStatusCode<'a, Status: ObjectSetupStatus>(&'a Status);\nimpl std::fmt::Display for ObjectSetupStatusCode<'_, Status> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let Some(status) = self.0.status() else {\n return Ok(());\n };\n write!(\n f,\n \"[ {:^9} ]\",\n match status {\n ObjectStatus::New => \"TO CREATE\",\n ObjectStatus::Existing =>\n if self.0.is_up_to_date() {\n \"READY\"\n } else {\n \"TO UPDATE\"\n },\n ObjectStatus::Deleted => \"TO DELETE\",\n ObjectStatus::Invalid => \"INVALID\",\n }\n )\n }\n}\n\nimpl std::fmt::Display for GlobalSetupStatus {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n writeln!(f, \"{}\", self.metadata_table)\n }\n}\n\npub struct FormattedFlowSetupStatus<'a>(pub &'a str, pub &'a FlowSetupStatus);\n\nimpl std::fmt::Display for FormattedFlowSetupStatus<'_> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let flow_ssc = self.1;\n if flow_ssc.status.is_none() {\n return Ok(());\n }\n\n writeln!(\n f,\n \"{} Flow: {}\",\n ObjectSetupStatusCode(flow_ssc)\n .to_string()\n .color(AnsiColors::Cyan),\n self.0\n )?;\n\n let mut f = indented(f).with_str(INDENT);\n if let Some(tracking_table) = &flow_ssc.tracking_table {\n write!(f, \"{tracking_table}\")?;\n }\n for target_resource in &flow_ssc.target_resources {\n write!(f, \"{target_resource}\")?;\n }\n for resource in &flow_ssc.unknown_resources {\n writeln!(f, \"[ UNKNOWN ] {resource}\")?;\n }\n\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/functions/extract_by_llm.rs", "use crate::llm::{\n LlmGenerateRequest, LlmGenerationClient, LlmSpec, OutputFormat, new_llm_generation_client,\n};\nuse crate::ops::sdk::*;\nuse crate::prelude::*;\nuse base::json_schema::build_json_schema;\nuse schemars::schema::SchemaObject;\nuse std::borrow::Cow;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Spec {\n llm_spec: LlmSpec,\n output_type: EnrichedValueType,\n instruction: Option,\n}\n\npub struct Args {\n text: Option,\n image: Option,\n}\n\nstruct Executor {\n args: Args,\n client: Box,\n model: String,\n output_json_schema: SchemaObject,\n system_prompt: String,\n value_extractor: base::json_schema::ValueExtractor,\n}\n\nfn get_system_prompt(instructions: &Option, extra_instructions: Option) -> String {\n let mut message =\n \"You are a helpful assistant that processes user-provided inputs (text, images, or both) to produce structured outputs. \\\nYour task is to follow the provided instructions to generate or extract information and output valid JSON matching the specified schema. \\\nBase your response solely on the content of the input. \\\nFor generative tasks, respond accurately and relevantly based on what is provided. \\\nUnless explicitly instructed otherwise, output only the JSON. DO NOT include explanations, descriptions, or formatting outside the JSON.\"\n .to_string();\n\n if let Some(custom_instructions) = instructions {\n message.push_str(\"\\n\\n\");\n message.push_str(custom_instructions);\n }\n\n if let Some(extra_instructions) = extra_instructions {\n message.push_str(\"\\n\\n\");\n message.push_str(&extra_instructions);\n }\n\n message\n}\n\nimpl Executor {\n async fn new(spec: Spec, args: Args) -> Result {\n let client = new_llm_generation_client(\n spec.llm_spec.api_type,\n spec.llm_spec.address,\n spec.llm_spec.api_config,\n )\n .await?;\n let schema_output = build_json_schema(spec.output_type, client.json_schema_options())?;\n Ok(Self {\n args,\n client,\n model: spec.llm_spec.model,\n output_json_schema: schema_output.schema,\n system_prompt: get_system_prompt(&spec.instruction, schema_output.extra_instructions),\n value_extractor: schema_output.value_extractor,\n })\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n fn behavior_version(&self) -> Option {\n Some(1)\n }\n\n fn enable_cache(&self) -> bool {\n true\n }\n\n async fn evaluate(&self, input: Vec) -> Result {\n let image_bytes: Option> = self\n .args\n .image\n .as_ref()\n .map(|arg| arg.value(&input)?.as_bytes())\n .transpose()?\n .map(|bytes| Cow::Borrowed(bytes.as_ref()));\n let text = self\n .args\n .text\n .as_ref()\n .map(|arg| arg.value(&input)?.as_str())\n .transpose()?;\n\n if text.is_none() && image_bytes.is_none() {\n api_bail!(\"At least one of `text` or `image` must be provided\");\n }\n\n let user_prompt = text.map_or(\"\", |v| v);\n let req = LlmGenerateRequest {\n model: &self.model,\n system_prompt: Some(Cow::Borrowed(&self.system_prompt)),\n user_prompt: Cow::Borrowed(user_prompt),\n image: image_bytes,\n output_format: Some(OutputFormat::JsonSchema {\n name: Cow::Borrowed(\"ExtractedData\"),\n schema: Cow::Borrowed(&self.output_json_schema),\n }),\n };\n let res = self.client.generate(req).await?;\n let json_value: serde_json::Value = serde_json::from_str(res.text.as_str())?;\n let value = self.value_extractor.extract_value(json_value)?;\n Ok(value)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"ExtractByLlm\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_optional_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n image: args_resolver\n .next_optional_arg(\"image\")?\n .expect_type(&ValueType::Basic(BasicValueType::Bytes))?,\n };\n\n if args.text.is_none() && args.image.is_none() {\n api_bail!(\"At least one of 'text' or 'image' must be provided\");\n }\n\n Ok((args, spec.output_type.clone()))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n resolved_input_schema: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(spec, resolved_input_schema).await?))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n #[tokio::test]\n #[ignore = \"This test requires an OpenAI API key or a configured local LLM and may make network calls.\"]\n async fn test_extract_by_llm() {\n // Define the expected output structure\n let target_output_schema = StructSchema {\n fields: Arc::new(vec![\n FieldSchema::new(\n \"extracted_field_name\",\n make_output_type(BasicValueType::Str),\n ),\n FieldSchema::new(\n \"extracted_field_value\",\n make_output_type(BasicValueType::Int64),\n ),\n ]),\n description: Some(\"A test structure for extraction\".into()),\n };\n\n let output_type_spec = EnrichedValueType {\n typ: ValueType::Struct(target_output_schema.clone()),\n nullable: false,\n attrs: Arc::new(BTreeMap::new()),\n };\n\n let spec = Spec {\n llm_spec: LlmSpec {\n api_type: crate::llm::LlmApiType::OpenAi,\n model: \"gpt-4o\".to_string(),\n address: None,\n api_config: None,\n },\n output_type: output_type_spec,\n instruction: Some(\"Extract the name and value from the text. The name is a string, the value is an integer.\".to_string()),\n };\n\n let factory = Arc::new(Factory);\n let text_content = \"The item is called 'CocoIndex Test' and its value is 42.\";\n\n let input_args_values = vec![text_content.to_string().into()];\n\n let input_arg_schemas = vec![build_arg_schema(\"text\", BasicValueType::Str)];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n if result.is_err() {\n eprintln!(\n \"test_extract_by_llm: test_flow_function returned error (potentially expected for evaluate): {:?}\",\n result.as_ref().err()\n );\n }\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed. NOTE: This test may require network access/API keys for OpenAI. Error: {:?}\",\n result.err()\n );\n\n let value = result.unwrap();\n\n match value {\n Value::Struct(field_values) => {\n assert_eq!(\n field_values.fields.len(),\n target_output_schema.fields.len(),\n \"Mismatched number of fields in output struct\"\n );\n for (idx, field_schema) in target_output_schema.fields.iter().enumerate() {\n match (&field_values.fields[idx], &field_schema.value_type.typ) {\n (\n Value::Basic(BasicValue::Str(_)),\n ValueType::Basic(BasicValueType::Str),\n ) => {}\n (\n Value::Basic(BasicValue::Int64(_)),\n ValueType::Basic(BasicValueType::Int64),\n ) => {}\n (val, expected_type) => panic!(\n \"Field '{}' type mismatch. Got {:?}, expected type compatible with {:?}\",\n field_schema.name,\n val.kind(),\n expected_type\n ),\n }\n }\n }\n _ => panic!(\"Expected Value::Struct, got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/base/value.rs", "use super::schema::*;\nuse crate::base::duration::parse_duration;\nuse crate::prelude::invariance_violation;\nuse crate::{api_bail, api_error};\nuse anyhow::Result;\nuse base64::prelude::*;\nuse bytes::Bytes;\nuse chrono::Offset;\nuse log::warn;\nuse serde::{\n Deserialize, Serialize,\n de::{SeqAccess, Visitor},\n ser::{SerializeMap, SerializeSeq, SerializeTuple},\n};\nuse std::{collections::BTreeMap, ops::Deref, sync::Arc};\n\npub trait EstimatedByteSize: Sized {\n fn estimated_detached_byte_size(&self) -> usize;\n\n fn estimated_byte_size(&self) -> usize {\n self.estimated_detached_byte_size() + std::mem::size_of::()\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]\npub struct RangeValue {\n pub start: usize,\n pub end: usize,\n}\n\nimpl RangeValue {\n pub fn new(start: usize, end: usize) -> Self {\n RangeValue { start, end }\n }\n\n pub fn len(&self) -> usize {\n self.end - self.start\n }\n\n pub fn extract_str<'s>(&self, s: &'s (impl AsRef + ?Sized)) -> &'s str {\n let s = s.as_ref();\n &s[self.start..self.end]\n }\n}\n\nimpl Serialize for RangeValue {\n fn serialize(&self, serializer: S) -> Result {\n let mut tuple = serializer.serialize_tuple(2)?;\n tuple.serialize_element(&self.start)?;\n tuple.serialize_element(&self.end)?;\n tuple.end()\n }\n}\n\nimpl<'de> Deserialize<'de> for RangeValue {\n fn deserialize>(deserializer: D) -> Result {\n struct RangeVisitor;\n\n impl<'de> Visitor<'de> for RangeVisitor {\n type Value = RangeValue;\n\n fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {\n formatter.write_str(\"a tuple of two u64\")\n }\n\n fn visit_seq(self, mut seq: V) -> Result\n where\n V: SeqAccess<'de>,\n {\n let start = seq\n .next_element()?\n .ok_or_else(|| serde::de::Error::missing_field(\"missing begin\"))?;\n let end = seq\n .next_element()?\n .ok_or_else(|| serde::de::Error::missing_field(\"missing end\"))?;\n Ok(RangeValue { start, end })\n }\n }\n deserializer.deserialize_tuple(2, RangeVisitor)\n }\n}\n\n/// Value of key.\n#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize)]\npub enum KeyValue {\n Bytes(Bytes),\n Str(Arc),\n Bool(bool),\n Int64(i64),\n Range(RangeValue),\n Uuid(uuid::Uuid),\n Date(chrono::NaiveDate),\n Struct(Vec),\n}\n\nimpl From for KeyValue {\n fn from(value: Bytes) -> Self {\n KeyValue::Bytes(value)\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Vec) -> Self {\n KeyValue::Bytes(Bytes::from(value))\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Arc) -> Self {\n KeyValue::Str(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: String) -> Self {\n KeyValue::Str(Arc::from(value))\n }\n}\n\nimpl From for KeyValue {\n fn from(value: bool) -> Self {\n KeyValue::Bool(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: i64) -> Self {\n KeyValue::Int64(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: RangeValue) -> Self {\n KeyValue::Range(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: uuid::Uuid) -> Self {\n KeyValue::Uuid(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: chrono::NaiveDate) -> Self {\n KeyValue::Date(value)\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Vec) -> Self {\n KeyValue::Struct(value)\n }\n}\n\nimpl serde::Serialize for KeyValue {\n fn serialize(&self, serializer: S) -> Result {\n Value::from(self.clone()).serialize(serializer)\n }\n}\n\nimpl std::fmt::Display for KeyValue {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n KeyValue::Bytes(v) => write!(f, \"{}\", BASE64_STANDARD.encode(v)),\n KeyValue::Str(v) => write!(f, \"\\\"{}\\\"\", v.escape_default()),\n KeyValue::Bool(v) => write!(f, \"{v}\"),\n KeyValue::Int64(v) => write!(f, \"{v}\"),\n KeyValue::Range(v) => write!(f, \"[{}, {})\", v.start, v.end),\n KeyValue::Uuid(v) => write!(f, \"{v}\"),\n KeyValue::Date(v) => write!(f, \"{v}\"),\n KeyValue::Struct(v) => {\n write!(\n f,\n \"[{}]\",\n v.iter()\n .map(|v| v.to_string())\n .collect::>()\n .join(\", \")\n )\n }\n }\n }\n}\n\nimpl KeyValue {\n pub fn from_json(value: serde_json::Value, fields_schema: &[FieldSchema]) -> Result {\n let value = if fields_schema.len() == 1 {\n Value::from_json(value, &fields_schema[0].value_type.typ)?\n } else {\n let field_values: FieldValues = FieldValues::from_json(value, fields_schema)?;\n Value::Struct(field_values)\n };\n value.as_key()\n }\n\n pub fn from_values<'a>(values: impl ExactSizeIterator) -> Result {\n let key = if values.len() == 1 {\n let mut values = values;\n values.next().ok_or_else(invariance_violation)?.as_key()?\n } else {\n KeyValue::Struct(values.map(|v| v.as_key()).collect::>>()?)\n };\n Ok(key)\n }\n\n pub fn fields_iter(&self, num_fields: usize) -> Result> {\n let slice = if num_fields == 1 {\n std::slice::from_ref(self)\n } else {\n match self {\n KeyValue::Struct(v) => v,\n _ => api_bail!(\"Invalid key value type\"),\n }\n };\n Ok(slice.iter())\n }\n\n fn parts_from_str(\n values_iter: &mut impl Iterator,\n schema: &ValueType,\n ) -> Result {\n let result = match schema {\n ValueType::Basic(basic_type) => {\n let v = values_iter\n .next()\n .ok_or_else(|| api_error!(\"Key parts less than expected\"))?;\n match basic_type {\n BasicValueType::Bytes => {\n KeyValue::Bytes(Bytes::from(BASE64_STANDARD.decode(v)?))\n }\n BasicValueType::Str => KeyValue::Str(Arc::from(v)),\n BasicValueType::Bool => KeyValue::Bool(v.parse()?),\n BasicValueType::Int64 => KeyValue::Int64(v.parse()?),\n BasicValueType::Range => {\n let v2 = values_iter\n .next()\n .ok_or_else(|| api_error!(\"Key parts less than expected\"))?;\n KeyValue::Range(RangeValue {\n start: v.parse()?,\n end: v2.parse()?,\n })\n }\n BasicValueType::Uuid => KeyValue::Uuid(v.parse()?),\n BasicValueType::Date => KeyValue::Date(v.parse()?),\n schema => api_bail!(\"Invalid key type {schema}\"),\n }\n }\n ValueType::Struct(s) => KeyValue::Struct(\n s.fields\n .iter()\n .map(|f| KeyValue::parts_from_str(values_iter, &f.value_type.typ))\n .collect::>>()?,\n ),\n _ => api_bail!(\"Invalid key type {schema}\"),\n };\n Ok(result)\n }\n\n fn parts_to_strs(&self, output: &mut Vec) {\n match self {\n KeyValue::Bytes(v) => output.push(BASE64_STANDARD.encode(v)),\n KeyValue::Str(v) => output.push(v.to_string()),\n KeyValue::Bool(v) => output.push(v.to_string()),\n KeyValue::Int64(v) => output.push(v.to_string()),\n KeyValue::Range(v) => {\n output.push(v.start.to_string());\n output.push(v.end.to_string());\n }\n KeyValue::Uuid(v) => output.push(v.to_string()),\n KeyValue::Date(v) => output.push(v.to_string()),\n KeyValue::Struct(v) => {\n for part in v {\n part.parts_to_strs(output);\n }\n }\n }\n }\n\n pub fn from_strs(value: impl IntoIterator, schema: &ValueType) -> Result {\n let mut values_iter = value.into_iter();\n let result = Self::parts_from_str(&mut values_iter, schema)?;\n if values_iter.next().is_some() {\n api_bail!(\"Key parts more than expected\");\n }\n Ok(result)\n }\n\n pub fn to_strs(&self) -> Vec {\n let mut output = Vec::with_capacity(self.num_parts());\n self.parts_to_strs(&mut output);\n output\n }\n\n pub fn kind_str(&self) -> &'static str {\n match self {\n KeyValue::Bytes(_) => \"bytes\",\n KeyValue::Str(_) => \"str\",\n KeyValue::Bool(_) => \"bool\",\n KeyValue::Int64(_) => \"int64\",\n KeyValue::Range { .. } => \"range\",\n KeyValue::Uuid(_) => \"uuid\",\n KeyValue::Date(_) => \"date\",\n KeyValue::Struct(_) => \"struct\",\n }\n }\n\n pub fn bytes_value(&self) -> Result<&Bytes> {\n match self {\n KeyValue::Bytes(v) => Ok(v),\n _ => anyhow::bail!(\"expected bytes value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn str_value(&self) -> Result<&Arc> {\n match self {\n KeyValue::Str(v) => Ok(v),\n _ => anyhow::bail!(\"expected str value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn bool_value(&self) -> Result {\n match self {\n KeyValue::Bool(v) => Ok(*v),\n _ => anyhow::bail!(\"expected bool value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn int64_value(&self) -> Result {\n match self {\n KeyValue::Int64(v) => Ok(*v),\n _ => anyhow::bail!(\"expected int64 value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn range_value(&self) -> Result {\n match self {\n KeyValue::Range(v) => Ok(*v),\n _ => anyhow::bail!(\"expected range value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn uuid_value(&self) -> Result {\n match self {\n KeyValue::Uuid(v) => Ok(*v),\n _ => anyhow::bail!(\"expected uuid value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn date_value(&self) -> Result {\n match self {\n KeyValue::Date(v) => Ok(*v),\n _ => anyhow::bail!(\"expected date value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn struct_value(&self) -> Result<&Vec> {\n match self {\n KeyValue::Struct(v) => Ok(v),\n _ => anyhow::bail!(\"expected struct value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn num_parts(&self) -> usize {\n match self {\n KeyValue::Range(_) => 2,\n KeyValue::Struct(v) => v.iter().map(|v| v.num_parts()).sum(),\n _ => 1,\n }\n }\n\n fn estimated_detached_byte_size(&self) -> usize {\n match self {\n KeyValue::Bytes(v) => v.len(),\n KeyValue::Str(v) => v.len(),\n KeyValue::Struct(v) => {\n v.iter()\n .map(KeyValue::estimated_detached_byte_size)\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n KeyValue::Bool(_)\n | KeyValue::Int64(_)\n | KeyValue::Range(_)\n | KeyValue::Uuid(_)\n | KeyValue::Date(_) => 0,\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Deserialize)]\npub enum BasicValue {\n Bytes(Bytes),\n Str(Arc),\n Bool(bool),\n Int64(i64),\n Float32(f32),\n Float64(f64),\n Range(RangeValue),\n Uuid(uuid::Uuid),\n Date(chrono::NaiveDate),\n Time(chrono::NaiveTime),\n LocalDateTime(chrono::NaiveDateTime),\n OffsetDateTime(chrono::DateTime),\n TimeDelta(chrono::Duration),\n Json(Arc),\n Vector(Arc<[BasicValue]>),\n UnionVariant {\n tag_id: usize,\n value: Box,\n },\n}\n\nimpl From for BasicValue {\n fn from(value: Bytes) -> Self {\n BasicValue::Bytes(value)\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: Vec) -> Self {\n BasicValue::Bytes(Bytes::from(value))\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: Arc) -> Self {\n BasicValue::Str(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: String) -> Self {\n BasicValue::Str(Arc::from(value))\n }\n}\n\nimpl From for BasicValue {\n fn from(value: bool) -> Self {\n BasicValue::Bool(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: i64) -> Self {\n BasicValue::Int64(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: f32) -> Self {\n BasicValue::Float32(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: f64) -> Self {\n BasicValue::Float64(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: uuid::Uuid) -> Self {\n BasicValue::Uuid(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveDate) -> Self {\n BasicValue::Date(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveTime) -> Self {\n BasicValue::Time(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveDateTime) -> Self {\n BasicValue::LocalDateTime(value)\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: chrono::DateTime) -> Self {\n BasicValue::OffsetDateTime(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::Duration) -> Self {\n BasicValue::TimeDelta(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: serde_json::Value) -> Self {\n BasicValue::Json(Arc::from(value))\n }\n}\n\nimpl> From> for BasicValue {\n fn from(value: Vec) -> Self {\n BasicValue::Vector(Arc::from(\n value.into_iter().map(|v| v.into()).collect::>(),\n ))\n }\n}\n\nimpl BasicValue {\n pub fn into_key(self) -> Result {\n let result = match self {\n BasicValue::Bytes(v) => KeyValue::Bytes(v),\n BasicValue::Str(v) => KeyValue::Str(v),\n BasicValue::Bool(v) => KeyValue::Bool(v),\n BasicValue::Int64(v) => KeyValue::Int64(v),\n BasicValue::Range(v) => KeyValue::Range(v),\n BasicValue::Uuid(v) => KeyValue::Uuid(v),\n BasicValue::Date(v) => KeyValue::Date(v),\n BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_)\n | BasicValue::Json(_)\n | BasicValue::Vector(_)\n | BasicValue::UnionVariant { .. } => api_bail!(\"invalid key value type\"),\n };\n Ok(result)\n }\n\n pub fn as_key(&self) -> Result {\n let result = match self {\n BasicValue::Bytes(v) => KeyValue::Bytes(v.clone()),\n BasicValue::Str(v) => KeyValue::Str(v.clone()),\n BasicValue::Bool(v) => KeyValue::Bool(*v),\n BasicValue::Int64(v) => KeyValue::Int64(*v),\n BasicValue::Range(v) => KeyValue::Range(*v),\n BasicValue::Uuid(v) => KeyValue::Uuid(*v),\n BasicValue::Date(v) => KeyValue::Date(*v),\n BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_)\n | BasicValue::Json(_)\n | BasicValue::Vector(_)\n | BasicValue::UnionVariant { .. } => api_bail!(\"invalid key value type\"),\n };\n Ok(result)\n }\n\n pub fn kind(&self) -> &'static str {\n match &self {\n BasicValue::Bytes(_) => \"bytes\",\n BasicValue::Str(_) => \"str\",\n BasicValue::Bool(_) => \"bool\",\n BasicValue::Int64(_) => \"int64\",\n BasicValue::Float32(_) => \"float32\",\n BasicValue::Float64(_) => \"float64\",\n BasicValue::Range(_) => \"range\",\n BasicValue::Uuid(_) => \"uuid\",\n BasicValue::Date(_) => \"date\",\n BasicValue::Time(_) => \"time\",\n BasicValue::LocalDateTime(_) => \"local_datetime\",\n BasicValue::OffsetDateTime(_) => \"offset_datetime\",\n BasicValue::TimeDelta(_) => \"timedelta\",\n BasicValue::Json(_) => \"json\",\n BasicValue::Vector(_) => \"vector\",\n BasicValue::UnionVariant { .. } => \"union\",\n }\n }\n\n /// Returns the estimated byte size of the value, for detached data (i.e. allocated on heap).\n fn estimated_detached_byte_size(&self) -> usize {\n fn json_estimated_detached_byte_size(val: &serde_json::Value) -> usize {\n match val {\n serde_json::Value::String(s) => s.len(),\n serde_json::Value::Array(arr) => {\n arr.iter()\n .map(json_estimated_detached_byte_size)\n .sum::()\n + arr.len() * std::mem::size_of::()\n }\n serde_json::Value::Object(map) => map\n .iter()\n .map(|(k, v)| {\n std::mem::size_of::()\n + k.len()\n + json_estimated_detached_byte_size(v)\n })\n .sum(),\n serde_json::Value::Null\n | serde_json::Value::Bool(_)\n | serde_json::Value::Number(_) => 0,\n }\n }\n match self {\n BasicValue::Bytes(v) => v.len(),\n BasicValue::Str(v) => v.len(),\n BasicValue::Json(v) => json_estimated_detached_byte_size(v),\n BasicValue::Vector(v) => {\n v.iter()\n .map(BasicValue::estimated_detached_byte_size)\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n BasicValue::UnionVariant { value, .. } => {\n value.estimated_detached_byte_size() + std::mem::size_of::()\n }\n BasicValue::Bool(_)\n | BasicValue::Int64(_)\n | BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Range(_)\n | BasicValue::Uuid(_)\n | BasicValue::Date(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_) => 0,\n }\n }\n}\n\n#[derive(Debug, Clone, Default, PartialEq, Deserialize)]\npub enum Value {\n #[default]\n Null,\n Basic(BasicValue),\n Struct(FieldValues),\n UTable(Vec),\n KTable(BTreeMap),\n LTable(Vec),\n}\n\nimpl> From for Value {\n fn from(value: T) -> Self {\n Value::Basic(value.into())\n }\n}\n\nimpl From for Value {\n fn from(value: KeyValue) -> Self {\n match value {\n KeyValue::Bytes(v) => Value::Basic(BasicValue::Bytes(v)),\n KeyValue::Str(v) => Value::Basic(BasicValue::Str(v)),\n KeyValue::Bool(v) => Value::Basic(BasicValue::Bool(v)),\n KeyValue::Int64(v) => Value::Basic(BasicValue::Int64(v)),\n KeyValue::Range(v) => Value::Basic(BasicValue::Range(v)),\n KeyValue::Uuid(v) => Value::Basic(BasicValue::Uuid(v)),\n KeyValue::Date(v) => Value::Basic(BasicValue::Date(v)),\n KeyValue::Struct(v) => Value::Struct(FieldValues {\n fields: v.into_iter().map(Value::from).collect(),\n }),\n }\n }\n}\n\nimpl From<&KeyValue> for Value {\n fn from(value: &KeyValue) -> Self {\n match value {\n KeyValue::Bytes(v) => Value::Basic(BasicValue::Bytes(v.clone())),\n KeyValue::Str(v) => Value::Basic(BasicValue::Str(v.clone())),\n KeyValue::Bool(v) => Value::Basic(BasicValue::Bool(*v)),\n KeyValue::Int64(v) => Value::Basic(BasicValue::Int64(*v)),\n KeyValue::Range(v) => Value::Basic(BasicValue::Range(*v)),\n KeyValue::Uuid(v) => Value::Basic(BasicValue::Uuid(*v)),\n KeyValue::Date(v) => Value::Basic(BasicValue::Date(*v)),\n KeyValue::Struct(v) => Value::Struct(FieldValues {\n fields: v.iter().map(Value::from).collect(),\n }),\n }\n }\n}\n\nimpl From for Value {\n fn from(value: FieldValues) -> Self {\n Value::Struct(value)\n }\n}\n\nimpl> From> for Value {\n fn from(value: Option) -> Self {\n match value {\n Some(v) => v.into(),\n None => Value::Null,\n }\n }\n}\n\nimpl Value {\n pub fn from_alternative(value: Value) -> Self\n where\n AltVS: Into,\n {\n match value {\n Value::Null => Value::Null,\n Value::Basic(v) => Value::Basic(v),\n Value::Struct(v) => Value::Struct(FieldValues:: {\n fields: v\n .fields\n .into_iter()\n .map(|v| Value::::from_alternative(v))\n .collect(),\n }),\n Value::UTable(v) => Value::UTable(v.into_iter().map(|v| v.into()).collect()),\n Value::KTable(v) => {\n Value::KTable(v.into_iter().map(|(k, v)| (k.clone(), v.into())).collect())\n }\n Value::LTable(v) => Value::LTable(v.into_iter().map(|v| v.into()).collect()),\n }\n }\n\n pub fn from_alternative_ref(value: &Value) -> Self\n where\n for<'a> &'a AltVS: Into,\n {\n match value {\n Value::Null => Value::Null,\n Value::Basic(v) => Value::Basic(v.clone()),\n Value::Struct(v) => Value::Struct(FieldValues:: {\n fields: v\n .fields\n .iter()\n .map(|v| Value::::from_alternative_ref(v))\n .collect(),\n }),\n Value::UTable(v) => Value::UTable(v.iter().map(|v| v.into()).collect()),\n Value::KTable(v) => {\n Value::KTable(v.iter().map(|(k, v)| (k.clone(), v.into())).collect())\n }\n Value::LTable(v) => Value::LTable(v.iter().map(|v| v.into()).collect()),\n }\n }\n\n pub fn is_null(&self) -> bool {\n matches!(self, Value::Null)\n }\n\n pub fn into_key(self) -> Result {\n let result = match self {\n Value::Basic(v) => v.into_key()?,\n Value::Struct(v) => KeyValue::Struct(\n v.fields\n .into_iter()\n .map(|v| v.into_key())\n .collect::>>()?,\n ),\n Value::Null | Value::UTable(_) | Value::KTable(_) | Value::LTable(_) => {\n anyhow::bail!(\"invalid key value type\")\n }\n };\n Ok(result)\n }\n\n pub fn as_key(&self) -> Result {\n let result = match self {\n Value::Basic(v) => v.as_key()?,\n Value::Struct(v) => KeyValue::Struct(\n v.fields\n .iter()\n .map(|v| v.as_key())\n .collect::>>()?,\n ),\n Value::Null | Value::UTable(_) | Value::KTable(_) | Value::LTable(_) => {\n anyhow::bail!(\"invalid key value type\")\n }\n };\n Ok(result)\n }\n\n pub fn kind(&self) -> &'static str {\n match self {\n Value::Null => \"null\",\n Value::Basic(v) => v.kind(),\n Value::Struct(_) => \"Struct\",\n Value::UTable(_) => \"UTable\",\n Value::KTable(_) => \"KTable\",\n Value::LTable(_) => \"LTable\",\n }\n }\n\n pub fn optional(&self) -> Option<&Self> {\n match self {\n Value::Null => None,\n _ => Some(self),\n }\n }\n\n pub fn as_bytes(&self) -> Result<&Bytes> {\n match self {\n Value::Basic(BasicValue::Bytes(v)) => Ok(v),\n _ => anyhow::bail!(\"expected bytes value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_str(&self) -> Result<&Arc> {\n match self {\n Value::Basic(BasicValue::Str(v)) => Ok(v),\n _ => anyhow::bail!(\"expected str value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_bool(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Bool(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected bool value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_int64(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Int64(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected int64 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_float32(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Float32(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected float32 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_float64(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Float64(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected float64 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_range(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Range(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected range value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_json(&self) -> Result<&Arc> {\n match self {\n Value::Basic(BasicValue::Json(v)) => Ok(v),\n _ => anyhow::bail!(\"expected json value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_vector(&self) -> Result<&Arc<[BasicValue]>> {\n match self {\n Value::Basic(BasicValue::Vector(v)) => Ok(v),\n _ => anyhow::bail!(\"expected vector value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_struct(&self) -> Result<&FieldValues> {\n match self {\n Value::Struct(v) => Ok(v),\n _ => anyhow::bail!(\"expected struct value, but got {}\", self.kind()),\n }\n }\n}\n\nimpl Value {\n pub fn estimated_byte_size(&self) -> usize {\n std::mem::size_of::()\n + match self {\n Value::Null => 0,\n Value::Basic(v) => v.estimated_detached_byte_size(),\n Value::Struct(v) => v.estimated_detached_byte_size(),\n Value::UTable(v) | Value::LTable(v) => {\n v.iter()\n .map(|v| v.estimated_detached_byte_size())\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n Value::KTable(v) => {\n v.iter()\n .map(|(k, v)| {\n k.estimated_detached_byte_size() + v.estimated_detached_byte_size()\n })\n .sum::()\n + v.len() * std::mem::size_of::<(String, ScopeValue)>()\n }\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Deserialize)]\npub struct FieldValues {\n pub fields: Vec>,\n}\n\nimpl EstimatedByteSize for FieldValues {\n fn estimated_detached_byte_size(&self) -> usize {\n self.fields\n .iter()\n .map(Value::::estimated_byte_size)\n .sum::()\n + self.fields.len() * std::mem::size_of::>()\n }\n}\n\nimpl serde::Serialize for FieldValues {\n fn serialize(&self, serializer: S) -> Result {\n self.fields.serialize(serializer)\n }\n}\n\nimpl FieldValues\nwhere\n FieldValues: Into,\n{\n pub fn new(num_fields: usize) -> Self {\n let mut fields = Vec::with_capacity(num_fields);\n fields.resize(num_fields, Value::::Null);\n Self { fields }\n }\n\n fn from_json_values<'a>(\n fields: impl Iterator,\n ) -> Result {\n Ok(Self {\n fields: fields\n .map(|(s, v)| {\n let value = Value::::from_json(v, &s.value_type.typ)?;\n if value.is_null() && !s.value_type.nullable {\n api_bail!(\"expected non-null value for `{}`\", s.name);\n }\n Ok(value)\n })\n .collect::>>()?,\n })\n }\n\n fn from_json_object<'a>(\n values: serde_json::Map,\n fields_schema: impl Iterator,\n ) -> Result {\n let mut values = values;\n Ok(Self {\n fields: fields_schema\n .map(|field| {\n let value = match values.get_mut(&field.name) {\n Some(v) => {\n Value::::from_json(std::mem::take(v), &field.value_type.typ)?\n }\n None => Value::::default(),\n };\n if value.is_null() && !field.value_type.nullable {\n api_bail!(\"expected non-null value for `{}`\", field.name);\n }\n Ok(value)\n })\n .collect::>>()?,\n })\n }\n\n pub fn from_json(value: serde_json::Value, fields_schema: &[FieldSchema]) -> Result {\n match value {\n serde_json::Value::Array(v) => {\n if v.len() != fields_schema.len() {\n api_bail!(\"unmatched value length\");\n }\n Self::from_json_values(fields_schema.iter().zip(v))\n }\n serde_json::Value::Object(v) => Self::from_json_object(v, fields_schema.iter()),\n _ => api_bail!(\"invalid value type\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct ScopeValue(pub FieldValues);\n\nimpl EstimatedByteSize for ScopeValue {\n fn estimated_detached_byte_size(&self) -> usize {\n self.0.estimated_detached_byte_size()\n }\n}\n\nimpl Deref for ScopeValue {\n type Target = FieldValues;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl From for ScopeValue {\n fn from(value: FieldValues) -> Self {\n Self(value)\n }\n}\n\nimpl serde::Serialize for BasicValue {\n fn serialize(&self, serializer: S) -> Result {\n match self {\n BasicValue::Bytes(v) => serializer.serialize_str(&BASE64_STANDARD.encode(v)),\n BasicValue::Str(v) => serializer.serialize_str(v),\n BasicValue::Bool(v) => serializer.serialize_bool(*v),\n BasicValue::Int64(v) => serializer.serialize_i64(*v),\n BasicValue::Float32(v) => serializer.serialize_f32(*v),\n BasicValue::Float64(v) => serializer.serialize_f64(*v),\n BasicValue::Range(v) => v.serialize(serializer),\n BasicValue::Uuid(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Date(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Time(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::LocalDateTime(v) => {\n serializer.serialize_str(&v.format(\"%Y-%m-%dT%H:%M:%S%.6f\").to_string())\n }\n BasicValue::OffsetDateTime(v) => {\n serializer.serialize_str(&v.to_rfc3339_opts(chrono::SecondsFormat::AutoSi, true))\n }\n BasicValue::TimeDelta(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Json(v) => v.serialize(serializer),\n BasicValue::Vector(v) => v.serialize(serializer),\n BasicValue::UnionVariant { tag_id, value } => {\n let mut s = serializer.serialize_tuple(2)?;\n s.serialize_element(tag_id)?;\n s.serialize_element(value)?;\n s.end()\n }\n }\n }\n}\n\nimpl BasicValue {\n pub fn from_json(value: serde_json::Value, schema: &BasicValueType) -> Result {\n let result = match (value, schema) {\n (serde_json::Value::String(v), BasicValueType::Bytes) => {\n BasicValue::Bytes(Bytes::from(BASE64_STANDARD.decode(v)?))\n }\n (serde_json::Value::String(v), BasicValueType::Str) => BasicValue::Str(Arc::from(v)),\n (serde_json::Value::Bool(v), BasicValueType::Bool) => BasicValue::Bool(v),\n (serde_json::Value::Number(v), BasicValueType::Int64) => BasicValue::Int64(\n v.as_i64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid int64 value {v}\"))?,\n ),\n (serde_json::Value::Number(v), BasicValueType::Float32) => BasicValue::Float32(\n v.as_f64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid fp32 value {v}\"))?\n as f32,\n ),\n (serde_json::Value::Number(v), BasicValueType::Float64) => BasicValue::Float64(\n v.as_f64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid fp64 value {v}\"))?,\n ),\n (v, BasicValueType::Range) => BasicValue::Range(serde_json::from_value(v)?),\n (serde_json::Value::String(v), BasicValueType::Uuid) => BasicValue::Uuid(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::Date) => BasicValue::Date(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::Time) => BasicValue::Time(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::LocalDateTime) => {\n BasicValue::LocalDateTime(v.parse()?)\n }\n (serde_json::Value::String(v), BasicValueType::OffsetDateTime) => {\n match chrono::DateTime::parse_from_rfc3339(&v) {\n Ok(dt) => BasicValue::OffsetDateTime(dt),\n Err(e) => {\n if let Ok(dt) = v.parse::() {\n warn!(\"Datetime without timezone offset, assuming UTC\");\n BasicValue::OffsetDateTime(chrono::DateTime::from_naive_utc_and_offset(\n dt,\n chrono::Utc.fix(),\n ))\n } else {\n Err(e)?\n }\n }\n }\n }\n (serde_json::Value::String(v), BasicValueType::TimeDelta) => {\n BasicValue::TimeDelta(parse_duration(&v)?)\n }\n (v, BasicValueType::Json) => BasicValue::Json(Arc::from(v)),\n (\n serde_json::Value::Array(v),\n BasicValueType::Vector(VectorTypeSchema { element_type, .. }),\n ) => {\n let vec = v\n .into_iter()\n .map(|v| BasicValue::from_json(v, element_type))\n .collect::>>()?;\n BasicValue::Vector(Arc::from(vec))\n }\n (v, BasicValueType::Union(typ)) => {\n let arr = match v {\n serde_json::Value::Array(arr) => arr,\n _ => anyhow::bail!(\"Invalid JSON value for union, expect array\"),\n };\n\n if arr.len() != 2 {\n anyhow::bail!(\n \"Invalid union tuple: expect 2 values, received {}\",\n arr.len()\n );\n }\n\n let mut obj_iter = arr.into_iter();\n\n // Take first element\n let tag_id = obj_iter\n .next()\n .and_then(|value| value.as_u64().map(|num_u64| num_u64 as usize))\n .unwrap();\n\n // Take second element\n let value = obj_iter.next().unwrap();\n\n let cur_type = typ\n .types\n .get(tag_id)\n .ok_or_else(|| anyhow::anyhow!(\"No type in `tag_id` \\\"{tag_id}\\\" found\"))?;\n\n BasicValue::UnionVariant {\n tag_id,\n value: Box::new(BasicValue::from_json(value, cur_type)?),\n }\n }\n (v, t) => {\n anyhow::bail!(\"Value and type not matched.\\nTarget type {t:?}\\nJSON value: {v}\\n\")\n }\n };\n Ok(result)\n }\n}\n\nstruct TableEntry<'a>(&'a KeyValue, &'a ScopeValue);\n\nimpl serde::Serialize for Value {\n fn serialize(&self, serializer: S) -> Result {\n match self {\n Value::Null => serializer.serialize_none(),\n Value::Basic(v) => v.serialize(serializer),\n Value::Struct(v) => v.serialize(serializer),\n Value::UTable(v) => v.serialize(serializer),\n Value::KTable(m) => {\n let mut seq = serializer.serialize_seq(Some(m.len()))?;\n for (k, v) in m.iter() {\n seq.serialize_element(&TableEntry(k, v))?;\n }\n seq.end()\n }\n Value::LTable(v) => v.serialize(serializer),\n }\n }\n}\n\nimpl serde::Serialize for TableEntry<'_> {\n fn serialize(&self, serializer: S) -> Result {\n let &TableEntry(key, value) = self;\n let mut seq = serializer.serialize_seq(Some(value.0.fields.len() + 1))?;\n seq.serialize_element(key)?;\n for item in value.0.fields.iter() {\n seq.serialize_element(item)?;\n }\n seq.end()\n }\n}\n\nimpl Value\nwhere\n FieldValues: Into,\n{\n pub fn from_json(value: serde_json::Value, schema: &ValueType) -> Result {\n let result = match (value, schema) {\n (serde_json::Value::Null, _) => Value::::Null,\n (v, ValueType::Basic(t)) => Value::::Basic(BasicValue::from_json(v, t)?),\n (v, ValueType::Struct(s)) => {\n Value::::Struct(FieldValues::::from_json(v, &s.fields)?)\n }\n (serde_json::Value::Array(v), ValueType::Table(s)) => match s.kind {\n TableKind::UTable => {\n let rows = v\n .into_iter()\n .map(|v| Ok(FieldValues::from_json(v, &s.row.fields)?.into()))\n .collect::>>()?;\n Value::LTable(rows)\n }\n TableKind::KTable => {\n let rows = v\n .into_iter()\n .map(|v| {\n let mut fields_iter = s.row.fields.iter();\n let key_field = fields_iter\n .next()\n .ok_or_else(|| api_error!(\"Empty struct field values\"))?;\n\n match v {\n serde_json::Value::Array(v) => {\n let mut field_vals_iter = v.into_iter();\n let key = Self::from_json(\n field_vals_iter.next().ok_or_else(|| {\n api_error!(\"Empty struct field values\")\n })?,\n &key_field.value_type.typ,\n )?\n .into_key()?;\n let values = FieldValues::from_json_values(\n fields_iter.zip(field_vals_iter),\n )?;\n Ok((key, values.into()))\n }\n serde_json::Value::Object(mut v) => {\n let key = Self::from_json(\n std::mem::take(v.get_mut(&key_field.name).ok_or_else(\n || {\n api_error!(\n \"key field `{}` doesn't exist in value\",\n key_field.name\n )\n },\n )?),\n &key_field.value_type.typ,\n )?\n .into_key()?;\n let values = FieldValues::from_json_object(v, fields_iter)?;\n Ok((key, values.into()))\n }\n _ => api_bail!(\"Table value must be a JSON array or object\"),\n }\n })\n .collect::>>()?;\n Value::KTable(rows)\n }\n TableKind::LTable => {\n let rows = v\n .into_iter()\n .map(|v| Ok(FieldValues::from_json(v, &s.row.fields)?.into()))\n .collect::>>()?;\n Value::LTable(rows)\n }\n },\n (v, t) => {\n anyhow::bail!(\"Value and type not matched.\\nTarget type {t:?}\\nJSON value: {v}\\n\")\n }\n };\n Ok(result)\n }\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct TypedValue<'a> {\n pub t: &'a ValueType,\n pub v: &'a Value,\n}\n\nimpl Serialize for TypedValue<'_> {\n fn serialize(&self, serializer: S) -> Result {\n match (self.t, self.v) {\n (_, Value::Null) => serializer.serialize_none(),\n (ValueType::Basic(t), v) => match t {\n BasicValueType::Union(_) => match v {\n Value::Basic(BasicValue::UnionVariant { value, .. }) => {\n value.serialize(serializer)\n }\n _ => Err(serde::ser::Error::custom(\n \"Unmatched union type and value for `TypedValue`\",\n )),\n },\n _ => v.serialize(serializer),\n },\n (ValueType::Struct(s), Value::Struct(field_values)) => TypedFieldsValue {\n schema: &s.fields,\n values_iter: field_values.fields.iter(),\n }\n .serialize(serializer),\n (ValueType::Table(c), Value::UTable(rows) | Value::LTable(rows)) => {\n let mut seq = serializer.serialize_seq(Some(rows.len()))?;\n for row in rows {\n seq.serialize_element(&TypedFieldsValue {\n schema: &c.row.fields,\n values_iter: row.fields.iter(),\n })?;\n }\n seq.end()\n }\n (ValueType::Table(c), Value::KTable(rows)) => {\n let mut seq = serializer.serialize_seq(Some(rows.len()))?;\n for (k, v) in rows {\n seq.serialize_element(&TypedFieldsValue {\n schema: &c.row.fields,\n values_iter: std::iter::once(&Value::from(k.clone()))\n .chain(v.fields.iter()),\n })?;\n }\n seq.end()\n }\n _ => Err(serde::ser::Error::custom(format!(\n \"Incompatible value type: {:?} {:?}\",\n self.t, self.v\n ))),\n }\n }\n}\n\npub struct TypedFieldsValue<'a, I: Iterator + Clone> {\n pub schema: &'a [FieldSchema],\n pub values_iter: I,\n}\n\nimpl<'a, I: Iterator + Clone> Serialize for TypedFieldsValue<'a, I> {\n fn serialize(&self, serializer: S) -> Result {\n let mut map = serializer.serialize_map(Some(self.schema.len()))?;\n let values_iter = self.values_iter.clone();\n for (field, value) in self.schema.iter().zip(values_iter) {\n map.serialize_entry(\n &field.name,\n &TypedValue {\n t: &field.value_type.typ,\n v: value,\n },\n )?;\n }\n map.end()\n }\n}\n\npub mod test_util {\n use super::*;\n\n pub fn seder_roundtrip(value: &Value, typ: &ValueType) -> Result {\n let json_value = serde_json::to_value(value)?;\n let roundtrip_value = Value::from_json(json_value, typ)?;\n Ok(roundtrip_value)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use std::collections::BTreeMap;\n\n #[test]\n fn test_estimated_byte_size_null() {\n let value = Value::::Null;\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_primitive() {\n // Test primitives that should have 0 detached byte size\n let value = Value::::Basic(BasicValue::Bool(true));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n let value = Value::::Basic(BasicValue::Int64(42));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n let value = Value::::Basic(BasicValue::Float64(3.14));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_string() {\n let test_str = \"hello world\";\n let value = Value::::Basic(BasicValue::Str(Arc::from(test_str)));\n let size = value.estimated_byte_size();\n\n let expected_size = std::mem::size_of::>() + test_str.len();\n assert_eq!(size, expected_size);\n }\n\n #[test]\n fn test_estimated_byte_size_basic_bytes() {\n let test_bytes = b\"hello world\";\n let value = Value::::Basic(BasicValue::Bytes(Bytes::from(test_bytes.to_vec())));\n let size = value.estimated_byte_size();\n\n let expected_size = std::mem::size_of::>() + test_bytes.len();\n assert_eq!(size, expected_size);\n }\n\n #[test]\n fn test_estimated_byte_size_basic_json() {\n let json_val = serde_json::json!({\"key\": \"value\", \"number\": 42});\n let value = Value::::Basic(BasicValue::Json(Arc::from(json_val)));\n let size = value.estimated_byte_size();\n\n // Should include the size of the JSON structure\n // The exact size depends on the internal JSON representation\n assert!(size > std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_vector() {\n let vec_elements = vec![\n BasicValue::Str(Arc::from(\"hello\")),\n BasicValue::Str(Arc::from(\"world\")),\n BasicValue::Int64(42),\n ];\n let value = Value::::Basic(BasicValue::Vector(Arc::from(vec_elements)));\n let size = value.estimated_byte_size();\n\n // Should include the size of the vector elements\n let expected_min_size = std::mem::size_of::>()\n + \"hello\".len()\n + \"world\".len()\n + 3 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_struct() {\n let fields = vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"test\"))),\n Value::::Basic(BasicValue::Int64(123)),\n ];\n let field_values = FieldValues { fields };\n let value = Value::::Struct(field_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"test\".len()\n + 2 * std::mem::size_of::>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_utable() {\n let scope_values = vec![\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"item1\",\n )))],\n }),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"item2\",\n )))],\n }),\n ];\n let value = Value::::UTable(scope_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"item1\".len()\n + \"item2\".len()\n + 2 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_ltable() {\n let scope_values = vec![\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"list1\",\n )))],\n }),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"list2\",\n )))],\n }),\n ];\n let value = Value::::LTable(scope_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"list1\".len()\n + \"list2\".len()\n + 2 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_ktable() {\n let mut map = BTreeMap::new();\n map.insert(\n KeyValue::Str(Arc::from(\"key1\")),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"value1\",\n )))],\n }),\n );\n map.insert(\n KeyValue::Str(Arc::from(\"key2\")),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"value2\",\n )))],\n }),\n );\n let value = Value::::KTable(map);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"key1\".len()\n + \"key2\".len()\n + \"value1\".len()\n + \"value2\".len()\n + 2 * std::mem::size_of::<(String, ScopeValue)>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_nested_struct() {\n let inner_struct = Value::::Struct(FieldValues {\n fields: vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"inner\"))),\n Value::::Basic(BasicValue::Int64(456)),\n ],\n });\n\n let outer_struct = Value::::Struct(FieldValues {\n fields: vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"outer\"))),\n inner_struct,\n ],\n });\n\n let size = outer_struct.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"outer\".len()\n + \"inner\".len()\n + 4 * std::mem::size_of::>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_empty_collections() {\n // Empty UTable\n let value = Value::::UTable(vec![]);\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty LTable\n let value = Value::::LTable(vec![]);\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty KTable\n let value = Value::::KTable(BTreeMap::new());\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty Struct\n let value = Value::::Struct(FieldValues { fields: vec![] });\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n}\n"], ["/cocoindex/src/execution/db_tracking.rs", "use crate::prelude::*;\n\nuse super::{db_tracking_setup::TrackingTableSetupState, memoization::StoredMemoizationInfo};\nuse crate::utils::{db::WriteAction, fingerprint::Fingerprint};\nuse futures::Stream;\nuse serde::de::{self, Deserializer, SeqAccess, Visitor};\nuse serde::ser::SerializeSeq;\nuse sqlx::PgPool;\nuse std::fmt;\n\n#[derive(Debug, Clone)]\npub struct TrackedTargetKeyInfo {\n pub key: serde_json::Value,\n pub additional_key: serde_json::Value,\n pub process_ordinal: i64,\n pub fingerprint: Option,\n}\n\nimpl Serialize for TrackedTargetKeyInfo {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let mut seq = serializer.serialize_seq(None)?;\n seq.serialize_element(&self.key)?;\n seq.serialize_element(&self.process_ordinal)?;\n seq.serialize_element(&self.fingerprint)?;\n if !self.additional_key.is_null() {\n seq.serialize_element(&self.additional_key)?;\n }\n seq.end()\n }\n}\n\nimpl<'de> serde::Deserialize<'de> for TrackedTargetKeyInfo {\n fn deserialize(deserializer: D) -> Result\n where\n D: Deserializer<'de>,\n {\n struct TrackedTargetKeyVisitor;\n\n impl<'de> Visitor<'de> for TrackedTargetKeyVisitor {\n type Value = TrackedTargetKeyInfo;\n\n fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n formatter.write_str(\"a sequence of 3 or 4 elements for TrackedTargetKey\")\n }\n\n fn visit_seq(self, mut seq: A) -> Result\n where\n A: SeqAccess<'de>,\n {\n let target_key: serde_json::Value = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(0, &self))?;\n let process_ordinal: i64 = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(1, &self))?;\n let fingerprint: Option = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(2, &self))?;\n let additional_key: Option = seq.next_element()?;\n\n Ok(TrackedTargetKeyInfo {\n key: target_key,\n process_ordinal,\n fingerprint,\n additional_key: additional_key.unwrap_or(serde_json::Value::Null),\n })\n }\n }\n\n deserializer.deserialize_seq(TrackedTargetKeyVisitor)\n }\n}\n\n/// (source_id, target_key)\npub type TrackedTargetKeyForSource = Vec<(i32, Vec)>;\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForProcessing {\n pub memoization_info: Option>>,\n\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub max_process_ordinal: Option,\n pub process_ordinal: Option,\n}\n\npub async fn read_source_tracking_info_for_processing(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n pool: &PgPool,\n) -> Result> {\n let query_str = format!(\n \"SELECT memoization_info, processed_source_ordinal, process_logic_fingerprint, max_process_ordinal, process_ordinal FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(pool)\n .await?;\n\n Ok(tracking_info)\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForPrecommit {\n pub max_process_ordinal: i64,\n pub staging_target_keys: sqlx::types::Json,\n\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub process_ordinal: Option,\n pub target_keys: Option>,\n}\n\npub async fn read_source_tracking_info_for_precommit(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT max_process_ordinal, staging_target_keys, processed_source_ordinal, process_logic_fingerprint, process_ordinal, target_keys FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let precommit_tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(db_executor)\n .await?;\n\n Ok(precommit_tracking_info)\n}\n\n#[allow(clippy::too_many_arguments)]\npub async fn precommit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n max_process_ordinal: i64,\n staging_target_keys: TrackedTargetKeyForSource,\n memoization_info: Option<&StoredMemoizationInfo>,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {} (source_id, source_key, max_process_ordinal, staging_target_keys, memoization_info) VALUES ($1, $2, $3, $4, $5)\",\n db_setup.table_name\n ),\n WriteAction::Update => format!(\n \"UPDATE {} SET max_process_ordinal = $3, staging_target_keys = $4, memoization_info = $5 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n ),\n };\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(max_process_ordinal) // $3\n .bind(sqlx::types::Json(staging_target_keys)) // $4\n .bind(memoization_info.map(sqlx::types::Json)) // $5\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForCommit {\n pub staging_target_keys: sqlx::types::Json,\n pub process_ordinal: Option,\n}\n\npub async fn read_source_tracking_info_for_commit(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT staging_target_keys, process_ordinal FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let commit_tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(db_executor)\n .await?;\n Ok(commit_tracking_info)\n}\n\n#[allow(clippy::too_many_arguments)]\npub async fn commit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n staging_target_keys: TrackedTargetKeyForSource,\n processed_source_ordinal: Option,\n logic_fingerprint: &[u8],\n process_ordinal: i64,\n process_time_micros: i64,\n target_keys: TrackedTargetKeyForSource,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {} ( \\\n source_id, source_key, \\\n max_process_ordinal, staging_target_keys, \\\n processed_source_ordinal, process_logic_fingerprint, process_ordinal, process_time_micros, target_keys) \\\n VALUES ($1, $2, $6 + 1, $3, $4, $5, $6, $7, $8)\",\n db_setup.table_name\n ),\n WriteAction::Update => format!(\n \"UPDATE {} SET staging_target_keys = $3, processed_source_ordinal = $4, process_logic_fingerprint = $5, process_ordinal = $6, process_time_micros = $7, target_keys = $8 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n ),\n };\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(sqlx::types::Json(staging_target_keys)) // $3\n .bind(processed_source_ordinal) // $4\n .bind(logic_fingerprint) // $5\n .bind(process_ordinal) // $6\n .bind(process_time_micros) // $7\n .bind(sqlx::types::Json(target_keys)) // $8\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\npub async fn delete_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"DELETE FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n sqlx::query(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct TrackedSourceKeyMetadata {\n pub source_key: serde_json::Value,\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n}\n\npub struct ListTrackedSourceKeyMetadataState {\n query_str: String,\n}\n\nimpl ListTrackedSourceKeyMetadataState {\n pub fn new() -> Self {\n Self {\n query_str: String::new(),\n }\n }\n\n pub fn list<'a>(\n &'a mut self,\n source_id: i32,\n db_setup: &'a TrackingTableSetupState,\n pool: &'a PgPool,\n ) -> impl Stream> + 'a {\n self.query_str = format!(\n \"SELECT source_key, processed_source_ordinal, process_logic_fingerprint FROM {} WHERE source_id = $1\",\n db_setup.table_name\n );\n sqlx::query_as(&self.query_str).bind(source_id).fetch(pool)\n }\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceLastProcessedInfo {\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub process_time_micros: Option,\n}\n\npub async fn read_source_last_processed_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n pool: &PgPool,\n) -> Result> {\n let query_str = format!(\n \"SELECT processed_source_ordinal, process_logic_fingerprint, process_time_micros FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let last_processed_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(pool)\n .await?;\n Ok(last_processed_info)\n}\n\npub async fn update_source_tracking_ordinal(\n source_id: i32,\n source_key_json: &serde_json::Value,\n processed_source_ordinal: Option,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"UPDATE {} SET processed_source_ordinal = $3 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(processed_source_ordinal) // $3\n .execute(db_executor)\n .await?;\n Ok(())\n}\n"], ["/cocoindex/src/ops/sources/local_file.rs", "use async_stream::try_stream;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse log::warn;\nuse std::borrow::Cow;\nuse std::path::Path;\nuse std::{path::PathBuf, sync::Arc};\n\nuse crate::base::field_attrs;\nuse crate::{fields_value, ops::sdk::*};\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n path: String,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n}\n\nstruct Executor {\n root_path: PathBuf,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n}\n\nimpl Executor {\n fn is_excluded(&self, path: impl AsRef + Copy) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(path))\n }\n\n fn is_file_included(&self, path: impl AsRef + Copy) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(path))\n && !self.is_excluded(path)\n }\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n let root_component_size = self.root_path.components().count();\n let mut dirs = Vec::new();\n dirs.push(Cow::Borrowed(&self.root_path));\n let mut new_dirs = Vec::new();\n try_stream! {\n while let Some(dir) = dirs.pop() {\n let mut entries = tokio::fs::read_dir(dir.as_ref()).await?;\n while let Some(entry) = entries.next_entry().await? {\n let path = entry.path();\n let mut path_components = path.components();\n for _ in 0..root_component_size {\n path_components.next();\n }\n let relative_path = path_components.as_path();\n if path.is_dir() {\n if !self.is_excluded(relative_path) {\n new_dirs.push(Cow::Owned(path));\n }\n } else if self.is_file_included(relative_path) {\n let ordinal: Option = if options.include_ordinal {\n Some(path.metadata()?.modified()?.try_into()?)\n } else {\n None\n };\n if let Some(relative_path) = relative_path.to_str() {\n yield vec![PartialSourceRowMetadata {\n key: KeyValue::Str(relative_path.into()),\n ordinal,\n }];\n } else {\n warn!(\"Skipped ill-formed file path: {}\", path.display());\n }\n }\n }\n dirs.extend(new_dirs.drain(..).rev());\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n if !self.is_file_included(key.str_value()?.as_ref()) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n let path = self.root_path.join(key.str_value()?.as_ref());\n let ordinal = if options.include_ordinal {\n Some(path.metadata()?.modified()?.try_into()?)\n } else {\n None\n };\n let value = if options.include_value {\n match std::fs::read(path) {\n Ok(content) => {\n let content = if self.binary {\n fields_value!(content)\n } else {\n fields_value!(String::from_utf8_lossy(&content).to_string())\n };\n Some(SourceValue::Existence(content))\n }\n Err(e) if e.kind() == std::io::ErrorKind::NotFound => {\n Some(SourceValue::NonExistence)\n }\n Err(e) => Err(e)?,\n }\n } else {\n None\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"LocalFile\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor {\n root_path: PathBuf::from(spec.path),\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/execution/db_tracking_setup.rs", "use crate::prelude::*;\n\nuse crate::setup::{CombinedState, ResourceSetupInfo, ResourceSetupStatus, SetupChangeType};\nuse serde::{Deserialize, Serialize};\nuse sqlx::PgPool;\n\npub fn default_tracking_table_name(flow_name: &str) -> String {\n format!(\n \"{}__cocoindex_tracking\",\n utils::db::sanitize_identifier(flow_name)\n )\n}\n\npub const CURRENT_TRACKING_TABLE_VERSION: i32 = 1;\n\nasync fn upgrade_tracking_table(\n pool: &PgPool,\n table_name: &str,\n existing_version_id: i32,\n target_version_id: i32,\n) -> Result<()> {\n if existing_version_id < 1 && target_version_id >= 1 {\n let query = format!(\n \"CREATE TABLE IF NOT EXISTS {table_name} (\n source_id INTEGER NOT NULL,\n source_key JSONB NOT NULL,\n\n -- Update in the precommit phase: after evaluation done, before really applying the changes to the target storage.\n max_process_ordinal BIGINT NOT NULL,\n staging_target_keys JSONB NOT NULL,\n memoization_info JSONB,\n\n -- Update after applying the changes to the target storage.\n processed_source_ordinal BIGINT,\n process_logic_fingerprint BYTEA,\n process_ordinal BIGINT,\n process_time_micros BIGINT,\n target_keys JSONB,\n\n PRIMARY KEY (source_id, source_key)\n );\",\n );\n sqlx::query(&query).execute(pool).await?;\n }\n\n Ok(())\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TrackingTableSetupState {\n pub table_name: String,\n pub version_id: i32,\n}\n\n#[derive(Debug)]\npub struct TrackingTableSetupStatus {\n pub desired_state: Option,\n\n pub legacy_table_names: Vec,\n\n pub min_existing_version_id: Option,\n pub source_ids_to_delete: Vec,\n}\n\nimpl TrackingTableSetupStatus {\n pub fn new(\n desired: Option<&TrackingTableSetupState>,\n existing: &CombinedState,\n source_ids_to_delete: Vec,\n ) -> Option {\n let legacy_table_names = existing\n .legacy_values(desired, |v| &v.table_name)\n .into_iter()\n .cloned()\n .collect();\n let min_existing_version_id = existing\n .always_exists()\n .then(|| existing.possible_versions().map(|v| v.version_id).min())\n .flatten();\n if desired.is_some() || min_existing_version_id.is_some() {\n Some(Self {\n desired_state: desired.cloned(),\n legacy_table_names,\n min_existing_version_id,\n source_ids_to_delete,\n })\n } else {\n None\n }\n }\n\n pub fn into_setup_info(\n self,\n ) -> ResourceSetupInfo<(), TrackingTableSetupState, TrackingTableSetupStatus> {\n ResourceSetupInfo {\n key: (),\n state: self.desired_state.clone(),\n description: \"Tracking Table\".to_string(),\n setup_status: Some(self),\n legacy_key: None,\n }\n }\n}\n\nimpl ResourceSetupStatus for TrackingTableSetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut changes: Vec = vec![];\n if self.desired_state.is_some() && !self.legacy_table_names.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Rename legacy tracking tables: {}. \",\n self.legacy_table_names.join(\", \")\n )));\n }\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(state)) => {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Create the tracking table: {}. \",\n state.table_name\n )));\n }\n (Some(min_version_id), Some(desired)) => {\n if min_version_id < desired.version_id {\n changes.push(setup::ChangeDescription::Action(\n \"Update the tracking table. \".into(),\n ));\n }\n }\n (Some(_), None) => changes.push(setup::ChangeDescription::Action(format!(\n \"Drop existing tracking table: {}. \",\n self.legacy_table_names.join(\", \")\n ))),\n (None, None) => (),\n }\n if !self.source_ids_to_delete.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Delete source IDs: {}. \",\n self.source_ids_to_delete\n .iter()\n .map(|id| id.to_string())\n .collect::>()\n .join(\", \")\n )));\n }\n changes\n }\n\n fn change_type(&self) -> SetupChangeType {\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(_)) => SetupChangeType::Create,\n (Some(min_version_id), Some(desired)) => {\n if min_version_id == desired.version_id && self.legacy_table_names.is_empty() {\n SetupChangeType::NoChange\n } else if min_version_id < desired.version_id {\n SetupChangeType::Update\n } else {\n SetupChangeType::Invalid\n }\n }\n (Some(_), None) => SetupChangeType::Delete,\n (None, None) => SetupChangeType::NoChange,\n }\n }\n}\n\nimpl TrackingTableSetupStatus {\n pub async fn apply_change(&self) -> Result<()> {\n let lib_context = get_lib_context()?;\n let pool = lib_context.require_builtin_db_pool()?;\n if let Some(desired) = &self.desired_state {\n for lagacy_name in self.legacy_table_names.iter() {\n let query = format!(\n \"ALTER TABLE IF EXISTS {} RENAME TO {}\",\n lagacy_name, desired.table_name\n );\n sqlx::query(&query).execute(pool).await?;\n }\n\n if self.min_existing_version_id != Some(desired.version_id) {\n upgrade_tracking_table(\n pool,\n &desired.table_name,\n self.min_existing_version_id.unwrap_or(0),\n desired.version_id,\n )\n .await?;\n }\n } else {\n for lagacy_name in self.legacy_table_names.iter() {\n let query = format!(\"DROP TABLE IF EXISTS {lagacy_name}\");\n sqlx::query(&query).execute(pool).await?;\n }\n return Ok(());\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/base/schema.rs", "use crate::prelude::*;\n\nuse super::spec::*;\nuse crate::builder::plan::AnalyzedValueMapping;\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct VectorTypeSchema {\n pub element_type: Box,\n pub dimension: Option,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct UnionTypeSchema {\n pub types: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(tag = \"kind\")]\npub enum BasicValueType {\n /// A sequence of bytes in binary.\n Bytes,\n\n /// String encoded in UTF-8.\n Str,\n\n /// A boolean value.\n Bool,\n\n /// 64-bit integer.\n Int64,\n\n /// 32-bit floating point number.\n Float32,\n\n /// 64-bit floating point number.\n Float64,\n\n /// A range, with a start offset and a length.\n Range,\n\n /// A UUID.\n Uuid,\n\n /// Date (without time within the current day).\n Date,\n\n /// Time of the day.\n Time,\n\n /// Local date and time, without timezone.\n LocalDateTime,\n\n /// Date and time with timezone.\n OffsetDateTime,\n\n /// A time duration.\n TimeDelta,\n\n /// A JSON value.\n Json,\n\n /// A vector of values (usually numbers, for embeddings).\n Vector(VectorTypeSchema),\n\n /// A union\n Union(UnionTypeSchema),\n}\n\nimpl std::fmt::Display for BasicValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n BasicValueType::Bytes => write!(f, \"Bytes\"),\n BasicValueType::Str => write!(f, \"Str\"),\n BasicValueType::Bool => write!(f, \"Bool\"),\n BasicValueType::Int64 => write!(f, \"Int64\"),\n BasicValueType::Float32 => write!(f, \"Float32\"),\n BasicValueType::Float64 => write!(f, \"Float64\"),\n BasicValueType::Range => write!(f, \"Range\"),\n BasicValueType::Uuid => write!(f, \"Uuid\"),\n BasicValueType::Date => write!(f, \"Date\"),\n BasicValueType::Time => write!(f, \"Time\"),\n BasicValueType::LocalDateTime => write!(f, \"LocalDateTime\"),\n BasicValueType::OffsetDateTime => write!(f, \"OffsetDateTime\"),\n BasicValueType::TimeDelta => write!(f, \"TimeDelta\"),\n BasicValueType::Json => write!(f, \"Json\"),\n BasicValueType::Vector(s) => {\n write!(f, \"Vector[{}\", s.element_type)?;\n if let Some(dimension) = s.dimension {\n write!(f, \", {dimension}\")?;\n }\n write!(f, \"]\")\n }\n BasicValueType::Union(s) => {\n write!(f, \"Union[\")?;\n for (i, typ) in s.types.iter().enumerate() {\n if i > 0 {\n // Add type delimiter\n write!(f, \" | \")?;\n }\n write!(f, \"{typ}\")?;\n }\n write!(f, \"]\")\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]\npub struct StructSchema {\n pub fields: Arc>,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub description: Option>,\n}\n\nimpl StructSchema {\n pub fn without_attrs(&self) -> Self {\n Self {\n fields: Arc::new(self.fields.iter().map(|f| f.without_attrs()).collect()),\n description: None,\n }\n }\n}\n\nimpl std::fmt::Display for StructSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Struct(\")?;\n for (i, field) in self.fields.iter().enumerate() {\n if i > 0 {\n write!(f, \", \")?;\n }\n write!(f, \"{field}\")?;\n }\n write!(f, \")\")\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\n#[allow(clippy::enum_variant_names)]\npub enum TableKind {\n /// An table with unordered rows, without key.\n UTable,\n /// A table's first field is the key.\n #[serde(alias = \"Table\")]\n KTable,\n /// A table whose rows orders are preserved.\n #[serde(alias = \"List\")]\n LTable,\n}\n\nimpl std::fmt::Display for TableKind {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n TableKind::UTable => write!(f, \"Table\"),\n TableKind::KTable => write!(f, \"KTable\"),\n TableKind::LTable => write!(f, \"LTable\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct TableSchema {\n pub kind: TableKind,\n pub row: StructSchema,\n}\n\nimpl TableSchema {\n pub fn has_key(&self) -> bool {\n match self.kind {\n TableKind::KTable => true,\n TableKind::UTable | TableKind::LTable => false,\n }\n }\n\n pub fn key_type(&self) -> Option<&EnrichedValueType> {\n match self.kind {\n TableKind::KTable => self\n .row\n .fields\n .first()\n .as_ref()\n .map(|field| &field.value_type),\n TableKind::UTable | TableKind::LTable => None,\n }\n }\n\n pub fn without_attrs(&self) -> Self {\n Self {\n kind: self.kind,\n row: self.row.without_attrs(),\n }\n }\n}\n\nimpl std::fmt::Display for TableSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}({})\", self.kind, self.row)\n }\n}\n\nimpl TableSchema {\n pub fn new(kind: TableKind, row: StructSchema) -> Self {\n Self { kind, row }\n }\n\n pub fn key_field(&self) -> Option<&FieldSchema> {\n match self.kind {\n TableKind::KTable => Some(self.row.fields.first().unwrap()),\n TableKind::UTable | TableKind::LTable => None,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(tag = \"kind\")]\npub enum ValueType {\n Struct(StructSchema),\n\n #[serde(untagged)]\n Basic(BasicValueType),\n\n #[serde(untagged)]\n Table(TableSchema),\n}\n\nimpl ValueType {\n pub fn key_type(&self) -> Option<&EnrichedValueType> {\n match self {\n ValueType::Basic(_) => None,\n ValueType::Struct(_) => None,\n ValueType::Table(c) => c.key_type(),\n }\n }\n\n // Type equality, ignoring attributes.\n pub fn without_attrs(&self) -> Self {\n match self {\n ValueType::Basic(a) => ValueType::Basic(a.clone()),\n ValueType::Struct(a) => ValueType::Struct(a.without_attrs()),\n ValueType::Table(a) => ValueType::Table(a.without_attrs()),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct EnrichedValueType {\n #[serde(rename = \"type\")]\n pub typ: DataType,\n\n #[serde(default, skip_serializing_if = \"std::ops::Not::not\")]\n pub nullable: bool,\n\n #[serde(default, skip_serializing_if = \"BTreeMap::is_empty\")]\n pub attrs: Arc>,\n}\n\nimpl EnrichedValueType {\n pub fn without_attrs(&self) -> Self {\n Self {\n typ: self.typ.without_attrs(),\n nullable: self.nullable,\n attrs: Default::default(),\n }\n }\n}\n\nimpl EnrichedValueType {\n pub fn from_alternative(\n value_type: &EnrichedValueType,\n ) -> Result\n where\n for<'a> &'a AltDataType: TryInto,\n {\n Ok(Self {\n typ: (&value_type.typ).try_into()?,\n nullable: value_type.nullable,\n attrs: value_type.attrs.clone(),\n })\n }\n\n pub fn with_attr(mut self, key: &str, value: serde_json::Value) -> Self {\n Arc::make_mut(&mut self.attrs).insert(key.to_string(), value);\n self\n }\n}\n\nimpl std::fmt::Display for EnrichedValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.typ)?;\n if self.nullable {\n write!(f, \"?\")?;\n }\n if !self.attrs.is_empty() {\n write!(\n f,\n \" [{}]\",\n self.attrs\n .iter()\n .map(|(k, v)| format!(\"{k}: {v}\"))\n .collect::>()\n .join(\", \")\n )?;\n }\n Ok(())\n }\n}\n\nimpl std::fmt::Display for ValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ValueType::Basic(b) => write!(f, \"{b}\"),\n ValueType::Struct(s) => write!(f, \"{s}\"),\n ValueType::Table(c) => write!(f, \"{c}\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct FieldSchema {\n /// ID is used to identify the field in the schema.\n pub name: FieldName,\n\n #[serde(flatten)]\n pub value_type: EnrichedValueType,\n}\n\nimpl FieldSchema {\n pub fn new(name: impl ToString, value_type: EnrichedValueType) -> Self {\n Self {\n name: name.to_string(),\n value_type,\n }\n }\n\n pub fn without_attrs(&self) -> Self {\n Self {\n name: self.name.clone(),\n value_type: self.value_type.without_attrs(),\n }\n }\n}\n\nimpl FieldSchema {\n pub fn from_alternative(field: &FieldSchema) -> Result\n where\n for<'a> &'a AltDataType: TryInto,\n {\n Ok(Self {\n name: field.name.clone(),\n value_type: EnrichedValueType::from_alternative(&field.value_type)?,\n })\n }\n}\n\nimpl std::fmt::Display for FieldSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.name, self.value_type)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct CollectorSchema {\n pub fields: Vec,\n /// If specified, the collector will have an automatically generated UUID field with the given index.\n pub auto_uuid_field_idx: Option,\n}\n\nimpl std::fmt::Display for CollectorSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Collector(\")?;\n for (i, field) in self.fields.iter().enumerate() {\n if i > 0 {\n write!(f, \", \")?;\n }\n write!(f, \"{field}\")?;\n }\n write!(f, \")\")\n }\n}\n\nimpl CollectorSchema {\n pub fn from_fields(fields: Vec, auto_uuid_field: Option) -> Self {\n let mut fields = fields;\n let auto_uuid_field_idx = if let Some(auto_uuid_field) = auto_uuid_field {\n fields.insert(\n 0,\n FieldSchema::new(\n auto_uuid_field,\n EnrichedValueType {\n typ: ValueType::Basic(BasicValueType::Uuid),\n nullable: false,\n attrs: Default::default(),\n },\n ),\n );\n Some(0)\n } else {\n None\n };\n Self {\n fields,\n auto_uuid_field_idx,\n }\n }\n pub fn without_attrs(&self) -> Self {\n Self {\n fields: self.fields.iter().map(|f| f.without_attrs()).collect(),\n auto_uuid_field_idx: self.auto_uuid_field_idx,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct OpScopeSchema {\n /// Output schema for ops with output.\n pub op_output_types: HashMap,\n\n /// Child op scope for foreach ops.\n pub op_scopes: HashMap>,\n\n /// Collectors for the current scope.\n pub collectors: Vec>>,\n}\n\n/// Top-level schema for a flow instance.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FlowSchema {\n pub schema: StructSchema,\n\n pub root_op_scope: OpScopeSchema,\n}\n\nimpl std::ops::Deref for FlowSchema {\n type Target = StructSchema;\n\n fn deref(&self) -> &Self::Target {\n &self.schema\n }\n}\n\npub struct OpArgSchema {\n pub name: OpArgName,\n pub value_type: EnrichedValueType,\n pub analyzed_value: AnalyzedValueMapping,\n}\n"], ["/cocoindex/src/py/convert.rs", "use crate::prelude::*;\n\nuse bytes::Bytes;\nuse numpy::{PyArray1, PyArrayDyn, PyArrayMethods};\nuse pyo3::IntoPyObjectExt;\nuse pyo3::exceptions::PyTypeError;\nuse pyo3::types::PyAny;\nuse pyo3::types::{PyList, PyTuple};\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pythonize::{depythonize, pythonize};\nuse serde::de::DeserializeOwned;\nuse std::ops::Deref;\n\nuse super::IntoPyResult;\n\n#[derive(Debug)]\npub struct Pythonized(pub T);\n\nimpl<'py, T: DeserializeOwned> FromPyObject<'py> for Pythonized {\n fn extract_bound(obj: &Bound<'py, PyAny>) -> PyResult {\n Ok(Pythonized(depythonize(obj).into_py_result()?))\n }\n}\n\nimpl<'py, T: Serialize> IntoPyObject<'py> for &Pythonized {\n type Target = PyAny;\n type Output = Bound<'py, PyAny>;\n type Error = PyErr;\n\n fn into_pyobject(self, py: Python<'py>) -> PyResult {\n pythonize(py, &self.0).into_py_result()\n }\n}\n\nimpl<'py, T: Serialize> IntoPyObject<'py> for Pythonized {\n type Target = PyAny;\n type Output = Bound<'py, PyAny>;\n type Error = PyErr;\n\n fn into_pyobject(self, py: Python<'py>) -> PyResult {\n (&self).into_pyobject(py)\n }\n}\n\nimpl Pythonized {\n pub fn into_inner(self) -> T {\n self.0\n }\n}\n\nimpl Deref for Pythonized {\n type Target = T;\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nfn basic_value_to_py_object<'py>(\n py: Python<'py>,\n v: &value::BasicValue,\n) -> PyResult> {\n let result = match v {\n value::BasicValue::Bytes(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Str(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Bool(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Int64(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Float32(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Float64(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Range(v) => pythonize(py, v).into_py_result()?,\n value::BasicValue::Uuid(uuid_val) => uuid_val.into_bound_py_any(py)?,\n value::BasicValue::Date(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Time(v) => v.into_bound_py_any(py)?,\n value::BasicValue::LocalDateTime(v) => v.into_bound_py_any(py)?,\n value::BasicValue::OffsetDateTime(v) => v.into_bound_py_any(py)?,\n value::BasicValue::TimeDelta(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Json(v) => pythonize(py, v).into_py_result()?,\n value::BasicValue::Vector(v) => handle_vector_to_py(py, v)?,\n value::BasicValue::UnionVariant { tag_id, value } => {\n (*tag_id, basic_value_to_py_object(py, value)?).into_bound_py_any(py)?\n }\n };\n Ok(result)\n}\n\npub fn field_values_to_py_object<'py, 'a>(\n py: Python<'py>,\n values: impl Iterator,\n) -> PyResult> {\n let fields = values\n .map(|v| value_to_py_object(py, v))\n .collect::>>()?;\n Ok(PyTuple::new(py, fields)?.into_any())\n}\n\npub fn value_to_py_object<'py>(py: Python<'py>, v: &value::Value) -> PyResult> {\n let result = match v {\n value::Value::Null => py.None().into_bound(py),\n value::Value::Basic(v) => basic_value_to_py_object(py, v)?,\n value::Value::Struct(v) => field_values_to_py_object(py, v.fields.iter())?,\n value::Value::UTable(v) | value::Value::LTable(v) => {\n let rows = v\n .iter()\n .map(|v| field_values_to_py_object(py, v.0.fields.iter()))\n .collect::>>()?;\n PyList::new(py, rows)?.into_any()\n }\n value::Value::KTable(v) => {\n let rows = v\n .iter()\n .map(|(k, v)| {\n field_values_to_py_object(\n py,\n std::iter::once(&value::Value::from(k.clone())).chain(v.0.fields.iter()),\n )\n })\n .collect::>>()?;\n PyList::new(py, rows)?.into_any()\n }\n };\n Ok(result)\n}\n\nfn basic_value_from_py_object<'py>(\n typ: &schema::BasicValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let result = match typ {\n schema::BasicValueType::Bytes => {\n value::BasicValue::Bytes(Bytes::from(v.extract::>()?))\n }\n schema::BasicValueType::Str => value::BasicValue::Str(Arc::from(v.extract::()?)),\n schema::BasicValueType::Bool => value::BasicValue::Bool(v.extract::()?),\n schema::BasicValueType::Int64 => value::BasicValue::Int64(v.extract::()?),\n schema::BasicValueType::Float32 => value::BasicValue::Float32(v.extract::()?),\n schema::BasicValueType::Float64 => value::BasicValue::Float64(v.extract::()?),\n schema::BasicValueType::Range => value::BasicValue::Range(depythonize(v)?),\n schema::BasicValueType::Uuid => value::BasicValue::Uuid(v.extract::()?),\n schema::BasicValueType::Date => value::BasicValue::Date(v.extract::()?),\n schema::BasicValueType::Time => value::BasicValue::Time(v.extract::()?),\n schema::BasicValueType::LocalDateTime => {\n value::BasicValue::LocalDateTime(v.extract::()?)\n }\n schema::BasicValueType::OffsetDateTime => {\n if v.getattr_opt(\"tzinfo\")?\n .ok_or_else(|| {\n PyErr::new::(format!(\n \"expecting a datetime.datetime value, got {}\",\n v.get_type()\n ))\n })?\n .is_none()\n {\n value::BasicValue::OffsetDateTime(\n v.extract::()?.and_utc().into(),\n )\n } else {\n value::BasicValue::OffsetDateTime(\n v.extract::>()?,\n )\n }\n }\n schema::BasicValueType::TimeDelta => {\n value::BasicValue::TimeDelta(v.extract::()?)\n }\n schema::BasicValueType::Json => {\n value::BasicValue::Json(Arc::from(depythonize::(v)?))\n }\n schema::BasicValueType::Vector(elem) => {\n if let Some(vector) = handle_ndarray_from_py(&elem.element_type, v)? {\n vector\n } else {\n // Fallback to list\n value::BasicValue::Vector(Arc::from(\n v.extract::>>()?\n .into_iter()\n .map(|v| basic_value_from_py_object(&elem.element_type, &v))\n .collect::>>()?,\n ))\n }\n }\n schema::BasicValueType::Union(s) => {\n let mut valid_value = None;\n\n // Try parsing the value\n for (i, typ) in s.types.iter().enumerate() {\n if let Ok(value) = basic_value_from_py_object(typ, v) {\n valid_value = Some(value::BasicValue::UnionVariant {\n tag_id: i,\n value: Box::new(value),\n });\n break;\n }\n }\n\n valid_value.ok_or_else(|| {\n PyErr::new::(format!(\n \"invalid union value: {}, available types: {:?}\",\n v, s.types\n ))\n })?\n }\n };\n Ok(result)\n}\n\n// Helper function to convert PyAny to BasicValue for NDArray\nfn handle_ndarray_from_py<'py>(\n elem_type: &schema::BasicValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult> {\n macro_rules! try_convert {\n ($t:ty, $cast:expr) => {\n if let Ok(array) = v.downcast::>() {\n let data = array.readonly().as_slice()?.to_vec();\n let vec = data.into_iter().map($cast).collect::>();\n return Ok(Some(value::BasicValue::Vector(Arc::from(vec))));\n }\n };\n }\n\n match *elem_type {\n schema::BasicValueType::Float32 => try_convert!(f32, value::BasicValue::Float32),\n schema::BasicValueType::Float64 => try_convert!(f64, value::BasicValue::Float64),\n schema::BasicValueType::Int64 => try_convert!(i64, value::BasicValue::Int64),\n _ => {}\n }\n\n Ok(None)\n}\n\n// Helper function to convert BasicValue::Vector to PyAny\nfn handle_vector_to_py<'py>(\n py: Python<'py>,\n v: &[value::BasicValue],\n) -> PyResult> {\n match v.first() {\n Some(value::BasicValue::Float32(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Float32(f) => Ok(*f),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Float32\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n Some(value::BasicValue::Float64(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Float64(f) => Ok(*f),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Float64\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n Some(value::BasicValue::Int64(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Int64(i) => Ok(*i),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Int64\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n _ => Ok(v\n .iter()\n .map(|v| basic_value_to_py_object(py, v))\n .collect::>>()?\n .into_bound_py_any(py)?),\n }\n}\n\nfn field_values_from_py_object<'py>(\n schema: &schema::StructSchema,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let list = v.extract::>>()?;\n if list.len() != schema.fields.len() {\n return Err(PyException::new_err(format!(\n \"struct field number mismatch, expected {}, got {}\",\n schema.fields.len(),\n list.len()\n )));\n }\n\n Ok(value::FieldValues {\n fields: schema\n .fields\n .iter()\n .zip(list.into_iter())\n .map(|(f, v)| value_from_py_object(&f.value_type.typ, &v))\n .collect::>>()?,\n })\n}\n\npub fn value_from_py_object<'py>(\n typ: &schema::ValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let result = if v.is_none() {\n value::Value::Null\n } else {\n match typ {\n schema::ValueType::Basic(typ) => {\n value::Value::Basic(basic_value_from_py_object(typ, v)?)\n }\n schema::ValueType::Struct(schema) => {\n value::Value::Struct(field_values_from_py_object(schema, v)?)\n }\n schema::ValueType::Table(schema) => {\n let list = v.extract::>>()?;\n let values = list\n .into_iter()\n .map(|v| field_values_from_py_object(&schema.row, &v))\n .collect::>>()?;\n\n match schema.kind {\n schema::TableKind::UTable => {\n value::Value::UTable(values.into_iter().map(|v| v.into()).collect())\n }\n schema::TableKind::LTable => {\n value::Value::LTable(values.into_iter().map(|v| v.into()).collect())\n }\n\n schema::TableKind::KTable => value::Value::KTable(\n values\n .into_iter()\n .map(|v| {\n let mut iter = v.fields.into_iter();\n let key = iter.next().unwrap().into_key().into_py_result()?;\n Ok((\n key,\n value::ScopeValue(value::FieldValues {\n fields: iter.collect::>(),\n }),\n ))\n })\n .collect::>>()?,\n ),\n }\n }\n }\n };\n Ok(result)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::base::schema;\n use crate::base::value;\n use crate::base::value::ScopeValue;\n use pyo3::Python;\n use std::collections::BTreeMap;\n use std::sync::Arc;\n\n fn assert_roundtrip_conversion(original_value: &value::Value, value_type: &schema::ValueType) {\n Python::with_gil(|py| {\n // Convert Rust value to Python object using value_to_py_object\n let py_object = value_to_py_object(py, original_value)\n .expect(\"Failed to convert Rust value to Python object\");\n\n println!(\"Python object: {py_object:?}\");\n let roundtripped_value = value_from_py_object(value_type, &py_object)\n .expect(\"Failed to convert Python object back to Rust value\");\n\n println!(\"Roundtripped value: {roundtripped_value:?}\");\n assert_eq!(\n original_value, &roundtripped_value,\n \"Value mismatch after roundtrip\"\n );\n });\n }\n\n #[test]\n fn test_roundtrip_basic_values() {\n let values_and_types = vec![\n (\n value::Value::Basic(value::BasicValue::Int64(42)),\n schema::ValueType::Basic(schema::BasicValueType::Int64),\n ),\n (\n value::Value::Basic(value::BasicValue::Float64(3.14)),\n schema::ValueType::Basic(schema::BasicValueType::Float64),\n ),\n (\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"hello\"))),\n schema::ValueType::Basic(schema::BasicValueType::Str),\n ),\n (\n value::Value::Basic(value::BasicValue::Bool(true)),\n schema::ValueType::Basic(schema::BasicValueType::Bool),\n ),\n ];\n\n for (val, typ) in values_and_types {\n assert_roundtrip_conversion(&val, &typ);\n }\n }\n\n #[test]\n fn test_roundtrip_struct() {\n let struct_schema = schema::StructSchema {\n description: Some(Arc::from(\"Test struct description\")),\n fields: Arc::new(vec![\n schema::FieldSchema {\n name: \"a\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Int64),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"b\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Str),\n nullable: false,\n attrs: Default::default(),\n },\n },\n ]),\n };\n\n let struct_val_data = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(10)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"world\"))),\n ],\n };\n\n let struct_val = value::Value::Struct(struct_val_data);\n let struct_typ = schema::ValueType::Struct(struct_schema); // No clone needed\n\n assert_roundtrip_conversion(&struct_val, &struct_typ);\n }\n\n #[test]\n fn test_roundtrip_table_types() {\n let row_schema_struct = Arc::new(schema::StructSchema {\n description: Some(Arc::from(\"Test table row description\")),\n fields: Arc::new(vec![\n schema::FieldSchema {\n name: \"key_col\".to_string(), // Will be used as key for KTable implicitly\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Int64),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"data_col_1\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Str),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"data_col_2\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Bool),\n nullable: false,\n attrs: Default::default(),\n },\n },\n ]),\n });\n\n let row1_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(1)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row1_data\"))),\n value::Value::Basic(value::BasicValue::Bool(true)),\n ],\n };\n let row1_scope_val: value::ScopeValue = row1_fields.into();\n\n let row2_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(2)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row2_data\"))),\n value::Value::Basic(value::BasicValue::Bool(false)),\n ],\n };\n let row2_scope_val: value::ScopeValue = row2_fields.into();\n\n // UTable\n let utable_schema = schema::TableSchema {\n kind: schema::TableKind::UTable,\n row: (*row_schema_struct).clone(),\n };\n let utable_val = value::Value::UTable(vec![row1_scope_val.clone(), row2_scope_val.clone()]);\n let utable_typ = schema::ValueType::Table(utable_schema);\n assert_roundtrip_conversion(&utable_val, &utable_typ);\n\n // LTable\n let ltable_schema = schema::TableSchema {\n kind: schema::TableKind::LTable,\n row: (*row_schema_struct).clone(),\n };\n let ltable_val = value::Value::LTable(vec![row1_scope_val.clone(), row2_scope_val.clone()]);\n let ltable_typ = schema::ValueType::Table(ltable_schema);\n assert_roundtrip_conversion(<able_val, <able_typ);\n\n // KTable\n let ktable_schema = schema::TableSchema {\n kind: schema::TableKind::KTable,\n row: (*row_schema_struct).clone(),\n };\n let mut ktable_data = BTreeMap::new();\n\n // Create KTable entries where the ScopeValue doesn't include the key field\n // This matches how the Python code will serialize/deserialize\n let row1_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row1_data\"))),\n value::Value::Basic(value::BasicValue::Bool(true)),\n ],\n };\n let row1_scope_val: value::ScopeValue = row1_fields.into();\n\n let row2_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row2_data\"))),\n value::Value::Basic(value::BasicValue::Bool(false)),\n ],\n };\n let row2_scope_val: value::ScopeValue = row2_fields.into();\n\n // For KTable, the key is extracted from the first field of ScopeValue based on current serialization\n let key1 = value::Value::::Basic(value::BasicValue::Int64(1))\n .into_key()\n .unwrap();\n let key2 = value::Value::::Basic(value::BasicValue::Int64(2))\n .into_key()\n .unwrap();\n\n ktable_data.insert(key1, row1_scope_val.clone());\n ktable_data.insert(key2, row2_scope_val.clone());\n\n let ktable_val = value::Value::KTable(ktable_data);\n let ktable_typ = schema::ValueType::Table(ktable_schema);\n assert_roundtrip_conversion(&ktable_val, &ktable_typ);\n }\n}\n"], ["/cocoindex/src/ops/functions/test_utils.rs", "use crate::builder::plan::{\n AnalyzedFieldReference, AnalyzedLocalFieldReference, AnalyzedValueMapping,\n};\nuse crate::ops::sdk::{\n AuthRegistry, BasicValueType, EnrichedValueType, FlowInstanceContext, OpArgSchema,\n OpArgsResolver, SimpleFunctionExecutor, SimpleFunctionFactoryBase, Value, make_output_type,\n};\nuse anyhow::Result;\nuse serde::de::DeserializeOwned;\nuse std::sync::Arc;\n\n// This function builds an argument schema for a flow function.\npub fn build_arg_schema(\n name: &str,\n value_type: BasicValueType,\n) -> (Option<&str>, EnrichedValueType) {\n (Some(name), make_output_type(value_type))\n}\n\n// This function tests a flow function by providing a spec, input argument schemas, and values.\npub async fn test_flow_function(\n factory: Arc,\n spec: S,\n input_arg_schemas: Vec<(Option<&str>, EnrichedValueType)>,\n input_arg_values: Vec,\n) -> Result\nwhere\n S: DeserializeOwned + Send + Sync + 'static,\n R: Send + Sync + 'static,\n F: SimpleFunctionFactoryBase + ?Sized,\n{\n // 1. Construct OpArgSchema\n let op_arg_schemas: Vec = input_arg_schemas\n .into_iter()\n .enumerate()\n .map(|(idx, (name, value_type))| OpArgSchema {\n name: name.map_or(crate::base::spec::OpArgName(None), |n| {\n crate::base::spec::OpArgName(Some(n.to_string()))\n }),\n value_type,\n analyzed_value: AnalyzedValueMapping::Field(AnalyzedFieldReference {\n local: AnalyzedLocalFieldReference {\n fields_idx: vec![idx as u32],\n },\n scope_up_level: 0,\n }),\n })\n .collect();\n\n // 2. Resolve Schema & Args\n let mut args_resolver = OpArgsResolver::new(&op_arg_schemas)?;\n let context = Arc::new(FlowInstanceContext {\n flow_instance_name: \"test_flow_function\".to_string(),\n auth_registry: Arc::new(AuthRegistry::default()),\n py_exec_ctx: None,\n });\n\n let (resolved_args_from_schema, _output_schema): (R, EnrichedValueType) = factory\n .resolve_schema(&spec, &mut args_resolver, &context)\n .await?;\n\n args_resolver.done()?;\n\n // 3. Build Executor\n let executor: Box = factory\n .build_executor(spec, resolved_args_from_schema, Arc::clone(&context))\n .await?;\n\n // 4. Evaluate\n let result = executor.evaluate(input_arg_values).await?;\n\n Ok(result)\n}\n"], ["/cocoindex/src/llm/gemini.rs", "use crate::prelude::*;\n\nuse crate::llm::{\n LlmEmbeddingClient, LlmGenerateRequest, LlmGenerateResponse, LlmGenerationClient, OutputFormat,\n ToJsonSchemaOptions, detect_image_mime_type,\n};\nuse base64::prelude::*;\nuse google_cloud_aiplatform_v1 as vertexai;\nuse serde_json::Value;\nuse urlencoding::encode;\n\nfn get_embedding_dimension(model: &str) -> Option {\n let model = model.to_ascii_lowercase();\n if model.starts_with(\"gemini-embedding-\") {\n Some(3072)\n } else if model.starts_with(\"text-embedding-\") {\n Some(768)\n } else if model.starts_with(\"embedding-\") {\n Some(768)\n } else if model.starts_with(\"text-multilingual-embedding-\") {\n Some(768)\n } else {\n None\n }\n}\n\npub struct AiStudioClient {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl AiStudioClient {\n pub fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Gemini doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"GEMINI_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"GEMINI_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n// Recursively remove all `additionalProperties` fields from a JSON value\nfn remove_additional_properties(value: &mut Value) {\n match value {\n Value::Object(map) => {\n map.remove(\"additionalProperties\");\n for v in map.values_mut() {\n remove_additional_properties(v);\n }\n }\n Value::Array(arr) => {\n for v in arr {\n remove_additional_properties(v);\n }\n }\n _ => {}\n }\n}\n\nimpl AiStudioClient {\n fn get_api_url(&self, model: &str, api_name: &str) -> String {\n format!(\n \"https://generativelanguage.googleapis.com/v1beta/models/{}:{}?key={}\",\n encode(model),\n api_name,\n encode(&self.api_key)\n )\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for AiStudioClient {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result {\n let mut user_parts: Vec = Vec::new();\n\n // Add text part first\n user_parts.push(serde_json::json!({ \"text\": request.user_prompt }));\n\n // Add image part if present\n if let Some(image_bytes) = &request.image {\n let base64_image = BASE64_STANDARD.encode(image_bytes.as_ref());\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n user_parts.push(serde_json::json!({\n \"inlineData\": {\n \"mimeType\": mime_type,\n \"data\": base64_image\n }\n }));\n }\n\n // Compose the contents\n let contents = vec![serde_json::json!({\n \"role\": \"user\",\n \"parts\": user_parts\n })];\n\n // Prepare payload\n let mut payload = serde_json::json!({ \"contents\": contents });\n if let Some(system) = request.system_prompt {\n payload[\"systemInstruction\"] = serde_json::json!({\n \"parts\": [ { \"text\": system } ]\n });\n }\n\n // If structured output is requested, add schema and responseMimeType\n if let Some(OutputFormat::JsonSchema { schema, .. }) = &request.output_format {\n let mut schema_json = serde_json::to_value(schema)?;\n remove_additional_properties(&mut schema_json);\n payload[\"generationConfig\"] = serde_json::json!({\n \"responseMimeType\": \"application/json\",\n \"responseSchema\": schema_json\n });\n }\n\n let url = self.get_api_url(request.model, \"generateContent\");\n let resp = retryable::run(\n || self.client.post(&url).json(&payload).send(),\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Gemini API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let resp_json: Value = resp.json().await.context(\"Invalid JSON\")?;\n\n if let Some(error) = resp_json.get(\"error\") {\n bail!(\"Gemini API error: {:?}\", error);\n }\n let mut resp_json = resp_json;\n let text = match &mut resp_json[\"candidates\"][0][\"content\"][\"parts\"][0][\"text\"] {\n Value::String(s) => std::mem::take(s),\n _ => bail!(\"No text in response\"),\n };\n\n Ok(LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[derive(Deserialize)]\nstruct ContentEmbedding {\n values: Vec,\n}\n#[derive(Deserialize)]\nstruct EmbedContentResponse {\n embedding: ContentEmbedding,\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for AiStudioClient {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let url = self.get_api_url(request.model, \"embedContent\");\n let mut payload = serde_json::json!({\n \"model\": request.model,\n \"content\": { \"parts\": [{ \"text\": request.text }] },\n });\n if let Some(task_type) = request.task_type {\n payload[\"taskType\"] = serde_json::Value::String(task_type.into());\n }\n let resp = retryable::run(\n || self.client.post(&url).json(&payload).send(),\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Gemini API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let embedding_resp: EmbedContentResponse = resp.json().await.context(\"Invalid JSON\")?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding_resp.embedding.values,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n\npub struct VertexAiClient {\n client: vertexai::client::PredictionService,\n config: super::VertexAiConfig,\n}\n\nimpl VertexAiClient {\n pub async fn new(\n address: Option,\n api_config: Option,\n ) -> Result {\n if address.is_some() {\n api_bail!(\"VertexAi API address is not supported for VertexAi API type\");\n }\n let Some(super::LlmApiConfig::VertexAi(config)) = api_config else {\n api_bail!(\"VertexAi API config is required for VertexAi API type\");\n };\n let client = vertexai::client::PredictionService::builder()\n .build()\n .await?;\n Ok(Self { client, config })\n }\n\n fn get_model_path(&self, model: &str) -> String {\n format!(\n \"projects/{}/locations/{}/publishers/google/models/{}\",\n self.config.project,\n self.config.region.as_deref().unwrap_or(\"global\"),\n model\n )\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for VertexAiClient {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n use vertexai::model::{Blob, Content, GenerationConfig, Part, Schema, part::Data};\n\n // Compose parts\n let mut parts = Vec::new();\n // Add text part\n parts.push(Part::new().set_text(request.user_prompt.to_string()));\n // Add image part if present\n if let Some(image_bytes) = request.image {\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n parts.push(\n Part::new().set_inline_data(\n Blob::new()\n .set_data(image_bytes.into_owned())\n .set_mime_type(mime_type.to_string()),\n ),\n );\n }\n // Compose content\n let mut contents = Vec::new();\n contents.push(Content::new().set_role(\"user\".to_string()).set_parts(parts));\n // Compose system instruction if present\n let system_instruction = request.system_prompt.as_ref().map(|sys| {\n Content::new()\n .set_role(\"system\".to_string())\n .set_parts(vec![Part::new().set_text(sys.to_string())])\n });\n\n // Compose generation config\n let mut generation_config = None;\n if let Some(OutputFormat::JsonSchema { schema, .. }) = &request.output_format {\n let schema_json = serde_json::to_value(schema)?;\n generation_config = Some(\n GenerationConfig::new()\n .set_response_mime_type(\"application/json\".to_string())\n .set_response_schema(serde_json::from_value::(schema_json)?),\n );\n }\n\n let mut req = self\n .client\n .generate_content()\n .set_model(self.get_model_path(request.model))\n .set_contents(contents);\n if let Some(sys) = system_instruction {\n req = req.set_system_instruction(sys);\n }\n if let Some(config) = generation_config {\n req = req.set_generation_config(config);\n }\n\n // Call the API\n let resp = req.send().await?;\n // Extract text from response\n let Some(Data::Text(text)) = resp\n .candidates\n .into_iter()\n .next()\n .and_then(|c| c.content)\n .and_then(|content| content.parts.into_iter().next())\n .and_then(|part| part.data)\n else {\n bail!(\"No text in response\");\n };\n Ok(super::LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for VertexAiClient {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n // Create the instances for the request\n let mut instance = serde_json::json!({\n \"content\": request.text\n });\n // Add task type if specified\n if let Some(task_type) = &request.task_type {\n instance[\"task_type\"] = serde_json::Value::String(task_type.to_string());\n }\n\n let instances = vec![instance];\n\n // Prepare the request parameters\n let mut parameters = serde_json::json!({});\n if let Some(output_dimension) = request.output_dimension {\n parameters[\"outputDimensionality\"] = serde_json::Value::Number(output_dimension.into());\n }\n\n // Build the prediction request using the raw predict builder\n let response = self\n .client\n .predict()\n .set_endpoint(self.get_model_path(request.model))\n .set_instances(instances)\n .set_parameters(parameters)\n .send()\n .await?;\n\n // Extract the embedding from the response\n let embeddings = response\n .predictions\n .into_iter()\n .next()\n .and_then(|mut e| e.get_mut(\"embeddings\").map(|v| v.take()))\n .ok_or_else(|| anyhow::anyhow!(\"No embeddings in response\"))?;\n let embedding: ContentEmbedding = serde_json::from_value(embeddings)?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding.values,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n"], ["/cocoindex/src/ops/functions/embed_text.rs", "use crate::{\n llm::{\n LlmApiConfig, LlmApiType, LlmEmbeddingClient, LlmEmbeddingRequest, new_llm_embedding_client,\n },\n ops::sdk::*,\n};\n\n#[derive(Deserialize)]\nstruct Spec {\n api_type: LlmApiType,\n model: String,\n address: Option,\n api_config: Option,\n output_dimension: Option,\n task_type: Option,\n}\n\nstruct Args {\n client: Box,\n text: ResolvedOpArg,\n}\n\nstruct Executor {\n spec: Spec,\n args: Args,\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n fn behavior_version(&self) -> Option {\n Some(1)\n }\n\n fn enable_cache(&self) -> bool {\n true\n }\n\n async fn evaluate(&self, input: Vec) -> Result {\n let text = self.args.text.value(&input)?.as_str()?;\n let req = LlmEmbeddingRequest {\n model: &self.spec.model,\n text: Cow::Borrowed(text),\n output_dimension: self.spec.output_dimension,\n task_type: self\n .spec\n .task_type\n .as_ref()\n .map(|s| Cow::Borrowed(s.as_str())),\n };\n let embedding = self.args.client.embed_text(req).await?;\n Ok(embedding.embedding.into())\n }\n}\n\nstruct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"EmbedText\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Self::ResolvedArgs, EnrichedValueType)> {\n let text = args_resolver.next_arg(\"text\")?;\n let client =\n new_llm_embedding_client(spec.api_type, spec.address.clone(), spec.api_config.clone())\n .await?;\n let output_dimension = match spec.output_dimension {\n Some(output_dimension) => output_dimension,\n None => {\n client.get_default_embedding_dimension(spec.model.as_str())\n .ok_or_else(|| api_error!(\"model \\\"{}\\\" is unknown for {:?}, needs to specify `output_dimension` explicitly\", spec.model, spec.api_type))?\n }\n };\n let output_schema = make_output_type(BasicValueType::Vector(VectorTypeSchema {\n dimension: Some(output_dimension as usize),\n element_type: Box::new(BasicValueType::Float32),\n }));\n Ok((Args { client, text }, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor { spec, args }))\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory.register(registry)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n #[tokio::test]\n #[ignore = \"This test requires OpenAI API key or a configured local LLM and may make network calls.\"]\n async fn test_embed_text() {\n let spec = Spec {\n api_type: LlmApiType::OpenAi,\n model: \"text-embedding-ada-002\".to_string(),\n address: None,\n api_config: None,\n output_dimension: None,\n task_type: None,\n };\n\n let factory = Arc::new(Factory);\n let text_content = \"CocoIndex is a performant data transformation framework for AI.\";\n\n let input_args_values = vec![text_content.to_string().into()];\n\n let input_arg_schemas = vec![build_arg_schema(\"text\", BasicValueType::Str)];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n if result.is_err() {\n eprintln!(\n \"test_embed_text: test_flow_function returned error (potentially expected for evaluate): {:?}\",\n result.as_ref().err()\n );\n }\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed. NOTE: This test may require network access/API keys for OpenAI. Error: {:?}\",\n result.err()\n );\n\n let value = result.unwrap();\n\n match value {\n Value::Basic(BasicValue::Vector(arc_vec)) => {\n assert_eq!(arc_vec.len(), 1536, \"Embedding vector dimension mismatch\");\n for item in arc_vec.iter() {\n match item {\n BasicValue::Float32(_) => {}\n _ => panic!(\"Embedding vector element is not Float32: {item:?}\"),\n }\n }\n }\n _ => panic!(\"Expected Value::Basic(BasicValue::Vector), got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/execution/memoization.rs", "use anyhow::{Result, bail};\nuse serde::{Deserialize, Serialize};\nuse std::{\n borrow::Cow,\n collections::HashMap,\n future::Future,\n sync::{Arc, Mutex},\n};\n\nuse crate::{\n base::{schema, value},\n service::error::{SharedError, SharedResultExtRef},\n utils::fingerprint::{Fingerprint, Fingerprinter},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StoredCacheEntry {\n time_sec: i64,\n value: serde_json::Value,\n}\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct StoredMemoizationInfo {\n #[serde(default, skip_serializing_if = \"HashMap::is_empty\")]\n pub cache: HashMap,\n\n #[serde(default, skip_serializing_if = \"HashMap::is_empty\")]\n pub uuids: HashMap>,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub content_hash: Option,\n}\n\npub type CacheEntryCell = Arc>>;\nenum CacheData {\n /// Existing entry in previous runs, but not in current run yet.\n Previous(serde_json::Value),\n /// Value appeared in current run.\n Current(CacheEntryCell),\n}\n\nstruct CacheEntry {\n time: chrono::DateTime,\n data: CacheData,\n}\n\n#[derive(Default)]\nstruct UuidEntry {\n uuids: Vec,\n num_current: usize,\n}\n\nimpl UuidEntry {\n fn new(uuids: Vec) -> Self {\n Self {\n uuids,\n num_current: 0,\n }\n }\n\n fn into_stored(self) -> Option> {\n if self.num_current == 0 {\n return None;\n }\n let mut uuids = self.uuids;\n if self.num_current < uuids.len() {\n uuids.truncate(self.num_current);\n }\n Some(uuids)\n }\n}\n\npub struct EvaluationMemoryOptions {\n pub enable_cache: bool,\n\n /// If true, it's for evaluation only.\n /// In this mode, we don't memoize anything.\n pub evaluation_only: bool,\n}\n\npub struct EvaluationMemory {\n current_time: chrono::DateTime,\n cache: Option>>,\n uuids: Mutex>,\n evaluation_only: bool,\n}\n\nimpl EvaluationMemory {\n pub fn new(\n current_time: chrono::DateTime,\n stored_info: Option,\n options: EvaluationMemoryOptions,\n ) -> Self {\n let (stored_cache, stored_uuids) = stored_info\n .map(|stored_info| (stored_info.cache, stored_info.uuids))\n .unzip();\n Self {\n current_time,\n cache: options.enable_cache.then(|| {\n Mutex::new(\n stored_cache\n .into_iter()\n .flat_map(|iter| iter.into_iter())\n .map(|(k, e)| {\n (\n k,\n CacheEntry {\n time: chrono::DateTime::from_timestamp(e.time_sec, 0)\n .unwrap_or(chrono::DateTime::::MIN_UTC),\n data: CacheData::Previous(e.value),\n },\n )\n })\n .collect(),\n )\n }),\n uuids: Mutex::new(\n (!options.evaluation_only)\n .then_some(stored_uuids)\n .flatten()\n .into_iter()\n .flat_map(|iter| iter.into_iter())\n .map(|(k, v)| (k, UuidEntry::new(v)))\n .collect(),\n ),\n evaluation_only: options.evaluation_only,\n }\n }\n\n pub fn into_stored(self) -> Result {\n if self.evaluation_only {\n bail!(\"For evaluation only, cannot convert to stored MemoizationInfo\");\n }\n let cache = if let Some(cache) = self.cache {\n cache\n .into_inner()?\n .into_iter()\n .filter_map(|(k, e)| match e.data {\n CacheData::Previous(_) => None,\n CacheData::Current(entry) => match entry.get() {\n Some(Ok(v)) => Some(serde_json::to_value(v).map(|value| {\n (\n k,\n StoredCacheEntry {\n time_sec: e.time.timestamp(),\n value,\n },\n )\n })),\n _ => None,\n },\n })\n .collect::>()?\n } else {\n bail!(\"Cache is disabled, cannot convert to stored MemoizationInfo\");\n };\n let uuids = self\n .uuids\n .into_inner()?\n .into_iter()\n .filter_map(|(k, v)| v.into_stored().map(|uuids| (k, uuids)))\n .collect();\n Ok(StoredMemoizationInfo {\n cache,\n uuids,\n content_hash: None,\n })\n }\n\n pub fn get_cache_entry(\n &self,\n key: impl FnOnce() -> Result,\n typ: &schema::ValueType,\n ttl: Option,\n ) -> Result> {\n let mut cache = if let Some(cache) = &self.cache {\n cache.lock().unwrap()\n } else {\n return Ok(None);\n };\n let result = match cache.entry(key()?) {\n std::collections::hash_map::Entry::Occupied(mut entry)\n if !ttl\n .map(|ttl| entry.get().time + ttl < self.current_time)\n .unwrap_or(false) =>\n {\n let entry_mut = &mut entry.get_mut();\n match &mut entry_mut.data {\n CacheData::Previous(value) => {\n let value = value::Value::from_json(std::mem::take(value), typ)?;\n let cell = Arc::new(tokio::sync::OnceCell::from(Ok(value)));\n let time = entry_mut.time;\n entry.insert(CacheEntry {\n time,\n data: CacheData::Current(cell.clone()),\n });\n cell\n }\n CacheData::Current(cell) => cell.clone(),\n }\n }\n entry => {\n let cell = Arc::new(tokio::sync::OnceCell::new());\n entry.insert_entry(CacheEntry {\n time: self.current_time,\n data: CacheData::Current(cell.clone()),\n });\n cell\n }\n };\n Ok(Some(result))\n }\n\n pub fn next_uuid(&self, key: Fingerprint) -> Result {\n let mut uuids = self.uuids.lock().unwrap();\n\n let entry = uuids.entry(key).or_default();\n let uuid = if self.evaluation_only {\n let fp = Fingerprinter::default()\n .with(&key)?\n .with(&entry.num_current)?\n .into_fingerprint();\n uuid::Uuid::new_v8(fp.0)\n } else if entry.num_current < entry.uuids.len() {\n entry.uuids[entry.num_current]\n } else {\n let uuid = uuid::Uuid::new_v4();\n entry.uuids.push(uuid);\n uuid\n };\n entry.num_current += 1;\n Ok(uuid)\n }\n}\n\npub async fn evaluate_with_cell(\n cell: Option<&CacheEntryCell>,\n compute: impl FnOnce() -> Fut,\n) -> Result>\nwhere\n Fut: Future>,\n{\n let result = match cell {\n Some(cell) => Cow::Borrowed(\n cell.get_or_init(|| {\n let fut = compute();\n async move { fut.await.map_err(SharedError::new) }\n })\n .await\n .std_result()?,\n ),\n None => Cow::Owned(compute().await?),\n };\n Ok(result)\n}\n"], ["/cocoindex/src/base/json_schema.rs", "use crate::prelude::*;\n\nuse crate::utils::immutable::RefList;\nuse schemars::schema::{\n ArrayValidation, InstanceType, ObjectValidation, Schema, SchemaObject, SingleOrVec,\n SubschemaValidation,\n};\nuse std::fmt::Write;\n\npub struct ToJsonSchemaOptions {\n /// If true, mark all fields as required.\n /// Use union type (with `null`) for optional fields instead.\n /// Models like OpenAI will reject the schema if a field is not required.\n pub fields_always_required: bool,\n\n /// If true, the JSON schema supports the `format` keyword.\n pub supports_format: bool,\n\n /// If true, extract descriptions to a separate extra instruction.\n pub extract_descriptions: bool,\n\n /// If true, the top level must be a JSON object.\n pub top_level_must_be_object: bool,\n}\n\nstruct JsonSchemaBuilder {\n options: ToJsonSchemaOptions,\n extra_instructions_per_field: IndexMap,\n}\n\nimpl JsonSchemaBuilder {\n fn new(options: ToJsonSchemaOptions) -> Self {\n Self {\n options,\n extra_instructions_per_field: IndexMap::new(),\n }\n }\n\n fn set_description(\n &mut self,\n schema: &mut SchemaObject,\n description: impl ToString,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) {\n if self.options.extract_descriptions {\n let mut fields: Vec<_> = field_path.iter().map(|f| f.as_str()).collect();\n fields.reverse();\n self.extra_instructions_per_field\n .insert(fields.join(\".\"), description.to_string());\n } else {\n schema.metadata.get_or_insert_default().description = Some(description.to_string());\n }\n }\n\n fn for_basic_value_type(\n &mut self,\n basic_type: &schema::BasicValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n let mut schema = SchemaObject::default();\n match basic_type {\n schema::BasicValueType::Str => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n }\n schema::BasicValueType::Bytes => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n }\n schema::BasicValueType::Bool => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Boolean)));\n }\n schema::BasicValueType::Int64 => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Integer)));\n }\n schema::BasicValueType::Float32 | schema::BasicValueType::Float64 => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Number)));\n }\n schema::BasicValueType::Range => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Array)));\n schema.array = Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n SchemaObject {\n instance_type: Some(SingleOrVec::Single(Box::new(\n InstanceType::Integer,\n ))),\n ..Default::default()\n }\n .into(),\n ))),\n min_items: Some(2),\n max_items: Some(2),\n ..Default::default()\n }));\n self.set_description(\n &mut schema,\n \"A range represented by a list of two positions, start pos (inclusive), end pos (exclusive).\",\n field_path,\n );\n }\n schema::BasicValueType::Uuid => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"uuid\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A UUID, e.g. 123e4567-e89b-12d3-a456-426614174000\",\n field_path,\n );\n }\n schema::BasicValueType::Date => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A date in YYYY-MM-DD format, e.g. 2025-03-27\",\n field_path,\n );\n }\n schema::BasicValueType::Time => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A time in HH:MM:SS format, e.g. 13:32:12\",\n field_path,\n );\n }\n schema::BasicValueType::LocalDateTime => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date-time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"Date time without timezone offset in YYYY-MM-DDTHH:MM:SS format, e.g. 2025-03-27T13:32:12\",\n field_path,\n );\n }\n schema::BasicValueType::OffsetDateTime => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date-time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"Date time with timezone offset in RFC3339, e.g. 2025-03-27T13:32:12Z, 2025-03-27T07:32:12.313-06:00\",\n field_path,\n );\n }\n &schema::BasicValueType::TimeDelta => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"duration\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A duration, e.g. 'PT1H2M3S' (ISO 8601) or '1 day 2 hours 3 seconds'\",\n field_path,\n );\n }\n schema::BasicValueType::Json => {\n // Can be any value. No type constraint.\n }\n schema::BasicValueType::Vector(s) => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Array)));\n schema.array = Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n self.for_basic_value_type(&s.element_type, field_path)\n .into(),\n ))),\n min_items: s.dimension.and_then(|d| u32::try_from(d).ok()),\n max_items: s.dimension.and_then(|d| u32::try_from(d).ok()),\n ..Default::default()\n }));\n }\n schema::BasicValueType::Union(s) => {\n schema.subschemas = Some(Box::new(SubschemaValidation {\n one_of: Some(\n s.types\n .iter()\n .map(|t| Schema::Object(self.for_basic_value_type(t, field_path)))\n .collect(),\n ),\n ..Default::default()\n }));\n }\n }\n schema\n }\n\n fn for_struct_schema(\n &mut self,\n struct_schema: &schema::StructSchema,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n let mut schema = SchemaObject::default();\n if let Some(description) = &struct_schema.description {\n self.set_description(&mut schema, description, field_path);\n }\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Object)));\n schema.object = Some(Box::new(ObjectValidation {\n properties: struct_schema\n .fields\n .iter()\n .map(|f| {\n let mut schema =\n self.for_enriched_value_type(&f.value_type, field_path.prepend(&f.name));\n if self.options.fields_always_required && f.value_type.nullable {\n if let Some(instance_type) = &mut schema.instance_type {\n let mut types = match instance_type {\n SingleOrVec::Single(t) => vec![**t],\n SingleOrVec::Vec(t) => std::mem::take(t),\n };\n types.push(InstanceType::Null);\n *instance_type = SingleOrVec::Vec(types);\n }\n }\n (f.name.to_string(), schema.into())\n })\n .collect(),\n required: struct_schema\n .fields\n .iter()\n .filter(|&f| (self.options.fields_always_required || !f.value_type.nullable))\n .map(|f| f.name.to_string())\n .collect(),\n additional_properties: Some(Schema::Bool(false).into()),\n ..Default::default()\n }));\n schema\n }\n\n fn for_value_type(\n &mut self,\n value_type: &schema::ValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n match value_type {\n schema::ValueType::Basic(b) => self.for_basic_value_type(b, field_path),\n schema::ValueType::Struct(s) => self.for_struct_schema(s, field_path),\n schema::ValueType::Table(c) => SchemaObject {\n instance_type: Some(SingleOrVec::Single(Box::new(InstanceType::Array))),\n array: Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n self.for_struct_schema(&c.row, field_path).into(),\n ))),\n ..Default::default()\n })),\n ..Default::default()\n },\n }\n }\n\n fn for_enriched_value_type(\n &mut self,\n enriched_value_type: &schema::EnrichedValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n self.for_value_type(&enriched_value_type.typ, field_path)\n }\n\n fn build_extra_instructions(&self) -> Result> {\n if self.extra_instructions_per_field.is_empty() {\n return Ok(None);\n }\n\n let mut instructions = String::new();\n write!(&mut instructions, \"Instructions for specific fields:\\n\\n\")?;\n for (field_path, instruction) in self.extra_instructions_per_field.iter() {\n write!(\n &mut instructions,\n \"- {}: {}\\n\\n\",\n if field_path.is_empty() {\n \"(root object)\"\n } else {\n field_path.as_str()\n },\n instruction\n )?;\n }\n Ok(Some(instructions))\n }\n}\n\npub struct ValueExtractor {\n value_type: schema::ValueType,\n object_wrapper_field_name: Option,\n}\n\nimpl ValueExtractor {\n pub fn extract_value(&self, json_value: serde_json::Value) -> Result {\n let unwrapped_json_value =\n if let Some(object_wrapper_field_name) = &self.object_wrapper_field_name {\n match json_value {\n serde_json::Value::Object(mut o) => o\n .remove(object_wrapper_field_name)\n .unwrap_or(serde_json::Value::Null),\n _ => {\n bail!(\"Field `{}` not found\", object_wrapper_field_name)\n }\n }\n } else {\n json_value\n };\n let result = value::Value::from_json(unwrapped_json_value, &self.value_type)?;\n Ok(result)\n }\n}\n\npub struct BuildJsonSchemaOutput {\n pub schema: SchemaObject,\n pub extra_instructions: Option,\n pub value_extractor: ValueExtractor,\n}\n\npub fn build_json_schema(\n value_type: schema::EnrichedValueType,\n options: ToJsonSchemaOptions,\n) -> Result {\n let mut builder = JsonSchemaBuilder::new(options);\n let (schema, object_wrapper_field_name) = if builder.options.top_level_must_be_object\n && !matches!(value_type.typ, schema::ValueType::Struct(_))\n {\n let object_wrapper_field_name = \"value\".to_string();\n let wrapper_struct = schema::StructSchema {\n fields: Arc::new(vec![schema::FieldSchema {\n name: object_wrapper_field_name.clone(),\n value_type: value_type.clone(),\n }]),\n description: None,\n };\n (\n builder.for_struct_schema(&wrapper_struct, RefList::Nil),\n Some(object_wrapper_field_name),\n )\n } else {\n (\n builder.for_enriched_value_type(&value_type, RefList::Nil),\n None,\n )\n };\n Ok(BuildJsonSchemaOutput {\n schema,\n extra_instructions: builder.build_extra_instructions()?,\n value_extractor: ValueExtractor {\n value_type: value_type.typ,\n object_wrapper_field_name,\n },\n })\n}\n"], ["/cocoindex/src/llm/anthropic.rs", "use crate::prelude::*;\nuse base64::prelude::*;\n\nuse crate::llm::{\n LlmGenerateRequest, LlmGenerateResponse, LlmGenerationClient, OutputFormat,\n ToJsonSchemaOptions, detect_image_mime_type,\n};\nuse anyhow::Context;\nuse urlencoding::encode;\n\npub struct Client {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl Client {\n pub async fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Anthropic doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"ANTHROPIC_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"ANTHROPIC_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result {\n let mut user_content_parts: Vec = Vec::new();\n\n // Add image part if present\n if let Some(image_bytes) = &request.image {\n let base64_image = BASE64_STANDARD.encode(image_bytes.as_ref());\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n user_content_parts.push(serde_json::json!({\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": mime_type,\n \"data\": base64_image,\n }\n }));\n }\n\n // Add text part\n user_content_parts.push(serde_json::json!({\n \"type\": \"text\",\n \"text\": request.user_prompt\n }));\n\n let messages = vec![serde_json::json!({\n \"role\": \"user\",\n \"content\": user_content_parts\n })];\n\n let mut payload = serde_json::json!({\n \"model\": request.model,\n \"messages\": messages,\n \"max_tokens\": 4096\n });\n\n // Add system prompt as top-level field if present (required)\n if let Some(system) = request.system_prompt {\n payload[\"system\"] = serde_json::json!(system);\n }\n\n // Extract schema from output_format, error if not JsonSchema\n let schema = match request.output_format.as_ref() {\n Some(OutputFormat::JsonSchema { schema, .. }) => schema,\n _ => api_bail!(\"Anthropic client expects OutputFormat::JsonSchema for all requests\"),\n };\n\n let schema_json = serde_json::to_value(schema)?;\n payload[\"tools\"] = serde_json::json!([\n { \"type\": \"custom\", \"name\": \"report_result\", \"input_schema\": schema_json }\n ]);\n\n let url = \"https://api.anthropic.com/v1/messages\";\n\n let encoded_api_key = encode(&self.api_key);\n\n let resp = retryable::run(\n || {\n self.client\n .post(url)\n .header(\"x-api-key\", encoded_api_key.as_ref())\n .header(\"anthropic-version\", \"2023-06-01\")\n .json(&payload)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Anthropic API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let mut resp_json: serde_json::Value = resp.json().await.context(\"Invalid JSON\")?;\n if let Some(error) = resp_json.get(\"error\") {\n bail!(\"Anthropic API error: {:?}\", error);\n }\n\n // Debug print full response\n // println!(\"Anthropic API full response: {resp_json:?}\");\n\n let resp_content = &resp_json[\"content\"];\n let tool_name = \"report_result\";\n let mut extracted_json: Option = None;\n if let Some(array) = resp_content.as_array() {\n for item in array {\n if item.get(\"type\") == Some(&serde_json::Value::String(\"tool_use\".to_string()))\n && item.get(\"name\") == Some(&serde_json::Value::String(tool_name.to_string()))\n {\n if let Some(input) = item.get(\"input\") {\n extracted_json = Some(input.clone());\n break;\n }\n }\n }\n }\n let text = if let Some(json) = extracted_json {\n // Try strict JSON serialization first\n serde_json::to_string(&json)?\n } else {\n // Fallback: try text if no tool output found\n match &mut resp_json[\"content\"][0][\"text\"] {\n serde_json::Value::String(s) => {\n // Try strict JSON parsing first\n match serde_json::from_str::(s) {\n Ok(_) => std::mem::take(s),\n Err(e) => {\n // Try permissive json5 parsing as fallback\n match json5::from_str::(s) {\n Ok(value) => {\n println!(\"[Anthropic] Used permissive JSON5 parser for output\");\n serde_json::to_string(&value)?\n }\n Err(e2) => {\n return Err(anyhow::anyhow!(format!(\n \"No structured tool output or text found in response, and permissive JSON5 parsing also failed: {e}; {e2}\"\n )));\n }\n }\n }\n }\n }\n _ => {\n return Err(anyhow::anyhow!(\n \"No structured tool output or text found in response\"\n ));\n }\n }\n };\n\n Ok(LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n"], ["/cocoindex/src/setup/components.rs", "use super::{CombinedState, ResourceSetupStatus, SetupChangeType, StateChange};\nuse crate::prelude::*;\nuse std::fmt::Debug;\n\npub trait State: Debug + Send + Sync {\n fn key(&self) -> Key;\n}\n\n#[async_trait]\npub trait SetupOperator: 'static + Send + Sync {\n type Key: Debug + Hash + Eq + Clone + Send + Sync;\n type State: State;\n type SetupState: Send + Sync + IntoIterator;\n type Context: Sync;\n\n fn describe_key(&self, key: &Self::Key) -> String;\n\n fn describe_state(&self, state: &Self::State) -> String;\n\n fn is_up_to_date(&self, current: &Self::State, desired: &Self::State) -> bool;\n\n async fn create(&self, state: &Self::State, context: &Self::Context) -> Result<()>;\n\n async fn delete(&self, key: &Self::Key, context: &Self::Context) -> Result<()>;\n\n async fn update(&self, state: &Self::State, context: &Self::Context) -> Result<()> {\n self.delete(&state.key(), context).await?;\n self.create(state, context).await\n }\n}\n\n#[derive(Debug)]\nstruct CompositeStateUpsert {\n state: S,\n already_exists: bool,\n}\n\n#[derive(Derivative)]\n#[derivative(Debug)]\npub struct SetupStatus {\n #[derivative(Debug = \"ignore\")]\n desc: D,\n keys_to_delete: IndexSet,\n states_to_upsert: Vec>,\n}\n\nimpl SetupStatus {\n pub fn create(\n desc: D,\n desired: Option,\n existing: CombinedState,\n ) -> Result {\n let existing_component_states = CombinedState {\n current: existing.current.map(|s| {\n s.into_iter()\n .map(|s| (s.key(), s))\n .collect::>()\n }),\n staging: existing\n .staging\n .into_iter()\n .map(|s| match s {\n StateChange::Delete => StateChange::Delete,\n StateChange::Upsert(s) => {\n StateChange::Upsert(s.into_iter().map(|s| (s.key(), s)).collect())\n }\n })\n .collect(),\n legacy_state_key: existing.legacy_state_key,\n };\n let mut keys_to_delete = IndexSet::new();\n let mut states_to_upsert = vec![];\n\n // Collect all existing component keys\n for c in existing_component_states.possible_versions() {\n keys_to_delete.extend(c.keys().cloned());\n }\n\n if let Some(desired_state) = desired {\n for desired_comp_state in desired_state {\n let key = desired_comp_state.key();\n\n // Remove keys that should be kept from deletion list\n keys_to_delete.shift_remove(&key);\n\n // Add components that need to be updated\n let is_up_to_date = existing_component_states.always_exists()\n && existing_component_states.possible_versions().all(|v| {\n v.get(&key)\n .is_some_and(|s| desc.is_up_to_date(s, &desired_comp_state))\n });\n if !is_up_to_date {\n let already_exists = existing_component_states\n .possible_versions()\n .any(|v| v.contains_key(&key));\n states_to_upsert.push(CompositeStateUpsert {\n state: desired_comp_state,\n already_exists,\n });\n }\n }\n }\n\n Ok(Self {\n desc,\n keys_to_delete,\n states_to_upsert,\n })\n }\n}\n\nimpl ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n\n for key in &self.keys_to_delete {\n result.push(setup::ChangeDescription::Action(format!(\n \"Delete {}\",\n self.desc.describe_key(key)\n )));\n }\n\n for state in &self.states_to_upsert {\n result.push(setup::ChangeDescription::Action(format!(\n \"{} {}\",\n if state.already_exists {\n \"Update\"\n } else {\n \"Create\"\n },\n self.desc.describe_state(&state.state)\n )));\n }\n\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n if self.keys_to_delete.is_empty() && self.states_to_upsert.is_empty() {\n SetupChangeType::NoChange\n } else if self.keys_to_delete.is_empty() {\n SetupChangeType::Create\n } else if self.states_to_upsert.is_empty() {\n SetupChangeType::Delete\n } else {\n SetupChangeType::Update\n }\n }\n}\n\npub async fn apply_component_changes(\n changes: Vec<&SetupStatus>,\n context: &D::Context,\n) -> Result<()> {\n // First delete components that need to be removed\n for change in changes.iter() {\n for key in &change.keys_to_delete {\n change.desc.delete(key, context).await?;\n }\n }\n\n // Then upsert components that need to be updated\n for change in changes.iter() {\n for state in &change.states_to_upsert {\n if state.already_exists {\n change.desc.update(&state.state, context).await?;\n } else {\n change.desc.create(&state.state, context).await?;\n }\n }\n }\n\n Ok(())\n}\n\nimpl ResourceSetupStatus for (A, B) {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n result.extend(self.0.describe_changes());\n result.extend(self.1.describe_changes());\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n match (self.0.change_type(), self.1.change_type()) {\n (SetupChangeType::Invalid, _) | (_, SetupChangeType::Invalid) => {\n SetupChangeType::Invalid\n }\n (SetupChangeType::NoChange, b) => b,\n (a, _) => a,\n }\n }\n}\n"], ["/cocoindex/src/llm/openai.rs", "use crate::api_bail;\n\nuse super::{LlmEmbeddingClient, LlmGenerationClient, detect_image_mime_type};\nuse anyhow::Result;\nuse async_openai::{\n Client as OpenAIClient,\n config::OpenAIConfig,\n types::{\n ChatCompletionRequestMessage, ChatCompletionRequestMessageContentPartImage,\n ChatCompletionRequestMessageContentPartText, ChatCompletionRequestSystemMessage,\n ChatCompletionRequestSystemMessageContent, ChatCompletionRequestUserMessage,\n ChatCompletionRequestUserMessageContent, ChatCompletionRequestUserMessageContentPart,\n CreateChatCompletionRequest, CreateEmbeddingRequest, EmbeddingInput, ImageDetail,\n ResponseFormat, ResponseFormatJsonSchema,\n },\n};\nuse async_trait::async_trait;\nuse base64::prelude::*;\nuse phf::phf_map;\n\nstatic DEFAULT_EMBEDDING_DIMENSIONS: phf::Map<&str, u32> = phf_map! {\n \"text-embedding-3-small\" => 1536,\n \"text-embedding-3-large\" => 3072,\n \"text-embedding-ada-002\" => 1536,\n};\n\npub struct Client {\n client: async_openai::Client,\n}\n\nimpl Client {\n pub(crate) fn from_parts(client: async_openai::Client) -> Self {\n Self { client }\n }\n\n pub fn new(address: Option) -> Result {\n if let Some(address) = address {\n api_bail!(\"OpenAI doesn't support custom API address: {address}\");\n }\n // Verify API key is set\n if std::env::var(\"OPENAI_API_KEY\").is_err() {\n api_bail!(\"OPENAI_API_KEY environment variable must be set\");\n }\n Ok(Self {\n // OpenAI client will use OPENAI_API_KEY env variable by default\n client: OpenAIClient::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n let mut messages = Vec::new();\n\n // Add system prompt if provided\n if let Some(system) = request.system_prompt {\n messages.push(ChatCompletionRequestMessage::System(\n ChatCompletionRequestSystemMessage {\n content: ChatCompletionRequestSystemMessageContent::Text(system.into_owned()),\n ..Default::default()\n },\n ));\n }\n\n // Add user message\n let user_message_content = match request.image {\n Some(img_bytes) => {\n let base64_image = BASE64_STANDARD.encode(img_bytes.as_ref());\n let mime_type = detect_image_mime_type(img_bytes.as_ref())?;\n let image_url = format!(\"data:{mime_type};base64,{base64_image}\");\n ChatCompletionRequestUserMessageContent::Array(vec![\n ChatCompletionRequestUserMessageContentPart::Text(\n ChatCompletionRequestMessageContentPartText {\n text: request.user_prompt.into_owned(),\n },\n ),\n ChatCompletionRequestUserMessageContentPart::ImageUrl(\n ChatCompletionRequestMessageContentPartImage {\n image_url: async_openai::types::ImageUrl {\n url: image_url,\n detail: Some(ImageDetail::Auto),\n },\n },\n ),\n ])\n }\n None => ChatCompletionRequestUserMessageContent::Text(request.user_prompt.into_owned()),\n };\n messages.push(ChatCompletionRequestMessage::User(\n ChatCompletionRequestUserMessage {\n content: user_message_content,\n ..Default::default()\n },\n ));\n\n // Create the chat completion request\n let request = CreateChatCompletionRequest {\n model: request.model.to_string(),\n messages,\n response_format: match request.output_format {\n Some(super::OutputFormat::JsonSchema { name, schema }) => {\n Some(ResponseFormat::JsonSchema {\n json_schema: ResponseFormatJsonSchema {\n name: name.into_owned(),\n description: None,\n schema: Some(serde_json::to_value(&schema)?),\n strict: Some(true),\n },\n })\n }\n None => None,\n },\n ..Default::default()\n };\n\n // Send request and get response\n let response = self.client.chat().create(request).await?;\n\n // Extract the response text from the first choice\n let text = response\n .choices\n .into_iter()\n .next()\n .and_then(|choice| choice.message.content)\n .ok_or_else(|| anyhow::anyhow!(\"No response from OpenAI\"))?;\n\n Ok(super::LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> super::ToJsonSchemaOptions {\n super::ToJsonSchemaOptions {\n fields_always_required: true,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let response = self\n .client\n .embeddings()\n .create(CreateEmbeddingRequest {\n model: request.model.to_string(),\n input: EmbeddingInput::String(request.text.to_string()),\n dimensions: request.output_dimension,\n ..Default::default()\n })\n .await?;\n Ok(super::LlmEmbeddingResponse {\n embedding: response\n .data\n .into_iter()\n .next()\n .ok_or_else(|| anyhow::anyhow!(\"No embedding returned from OpenAI\"))?\n .embedding,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n DEFAULT_EMBEDDING_DIMENSIONS.get(model).copied()\n }\n}\n"], ["/cocoindex/src/ops/functions/parse_json.rs", "use crate::ops::sdk::*;\nuse anyhow::Result;\nuse std::collections::HashMap;\nuse std::sync::{Arc, LazyLock};\nuse unicase::UniCase;\n\npub struct Args {\n text: ResolvedOpArg,\n language: Option,\n}\n\ntype ParseFn = fn(&str) -> Result;\nstruct LanguageConfig {\n parse_fn: ParseFn,\n}\n\nfn add_language(\n output: &mut HashMap, Arc>,\n name: &'static str,\n aliases: impl IntoIterator,\n parse_fn: ParseFn,\n) {\n let lang_config = Arc::new(LanguageConfig { parse_fn });\n for name in std::iter::once(name).chain(aliases.into_iter()) {\n if output.insert(name.into(), lang_config.clone()).is_some() {\n panic!(\"Language `{name}` already exists\");\n }\n }\n}\n\nfn parse_json(text: &str) -> Result {\n Ok(serde_json::from_str(text)?)\n}\n\nstatic PARSE_FN_BY_LANG: LazyLock, Arc>> =\n LazyLock::new(|| {\n let mut map = HashMap::new();\n add_language(&mut map, \"json\", [\".json\"], parse_json);\n map\n });\n\nstruct Executor {\n args: Args,\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n async fn evaluate(&self, input: Vec) -> Result {\n let text = self.args.text.value(&input)?.as_str()?;\n let lang_config = {\n let language = self.args.language.value(&input)?;\n language\n .optional()\n .map(|v| anyhow::Ok(v.as_str()?.as_ref()))\n .transpose()?\n .and_then(|lang| PARSE_FN_BY_LANG.get(&UniCase::new(lang)))\n };\n let parse_fn = lang_config.map(|c| c.parse_fn).unwrap_or(parse_json);\n let parsed_value = parse_fn(text)?;\n Ok(value::Value::Basic(value::BasicValue::Json(Arc::new(\n parsed_value,\n ))))\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = EmptySpec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"ParseJson\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n _spec: &'a EmptySpec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n language: args_resolver\n .next_optional_arg(\"language\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n };\n\n let output_schema = make_output_type(BasicValueType::Json);\n Ok((args, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n _spec: EmptySpec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor { args }))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n use serde_json::json;\n\n #[tokio::test]\n async fn test_parse_json() {\n let spec = EmptySpec {};\n\n let factory = Arc::new(Factory);\n let json_string_content = r#\"{\"city\": \"Magdeburg\"}\"#;\n let lang_value: Value = \"json\".to_string().into();\n\n let input_args_values = vec![json_string_content.to_string().into(), lang_value.clone()];\n\n let input_arg_schemas = vec![\n build_arg_schema(\"text\", BasicValueType::Str),\n build_arg_schema(\"language\", BasicValueType::Str),\n ];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed: {:?}\",\n result.err()\n );\n let value = result.unwrap();\n\n match value {\n Value::Basic(BasicValue::Json(arc_json_value)) => {\n let expected_json = json!({\"city\": \"Magdeburg\"});\n assert_eq!(\n *arc_json_value, expected_json,\n \"Parsed JSON value mismatch with specified language\"\n );\n }\n _ => panic!(\"Expected Value::Basic(BasicValue::Json), got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/service/error.rs", "use crate::prelude::*;\n\nuse axum::{\n Json,\n http::StatusCode,\n response::{IntoResponse, Response},\n};\nuse pyo3::{exceptions::PyException, prelude::*};\nuse std::{\n error::Error,\n fmt::{Debug, Display},\n};\n\n#[derive(Debug)]\npub struct ApiError {\n pub err: anyhow::Error,\n pub status_code: StatusCode,\n}\n\nimpl ApiError {\n pub fn new(message: &str, status_code: StatusCode) -> Self {\n Self {\n err: anyhow!(\"{}\", message),\n status_code,\n }\n }\n}\n\nimpl Display for ApiError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Display::fmt(&self.err, f)\n }\n}\n\nimpl Error for ApiError {\n fn source(&self) -> Option<&(dyn Error + 'static)> {\n self.err.source()\n }\n}\n\n#[derive(Serialize)]\nstruct ErrorResponse {\n error: String,\n}\n\nimpl IntoResponse for ApiError {\n fn into_response(self) -> Response {\n debug!(\"Internal server error:\\n{:?}\", self.err);\n let error_response = ErrorResponse {\n error: self.err.to_string(),\n };\n (self.status_code, Json(error_response)).into_response()\n }\n}\n\nimpl From for ApiError {\n fn from(err: anyhow::Error) -> ApiError {\n if err.is::() {\n return err.downcast::().unwrap();\n }\n Self {\n err,\n status_code: StatusCode::INTERNAL_SERVER_ERROR,\n }\n }\n}\n\nimpl From for PyErr {\n fn from(val: ApiError) -> Self {\n PyException::new_err(val.err.to_string())\n }\n}\n\n#[derive(Clone)]\npub struct SharedError {\n pub err: Arc,\n}\n\nimpl SharedError {\n pub fn new(err: anyhow::Error) -> Self {\n Self { err: Arc::new(err) }\n }\n}\nimpl Debug for SharedError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.err, f)\n }\n}\n\nimpl Display for SharedError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Display::fmt(&self.err, f)\n }\n}\n\nimpl From for SharedError {\n fn from(err: E) -> Self {\n Self {\n err: Arc::new(anyhow::Error::from(err)),\n }\n }\n}\n\nimpl AsRef for SharedError {\n fn as_ref(&self) -> &(dyn std::error::Error + 'static) {\n self.err.as_ref().as_ref()\n }\n}\n\nimpl AsRef for SharedError {\n fn as_ref(&self) -> &(dyn std::error::Error + Send + Sync + 'static) {\n self.err.as_ref().as_ref()\n }\n}\n\npub fn shared_ok(value: T) -> Result {\n Ok(value)\n}\n\npub type SharedResult = Result;\n\npub struct SharedErrorWrapper(SharedError);\n\nimpl Display for SharedErrorWrapper {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.0, f)\n }\n}\n\nimpl Debug for SharedErrorWrapper {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.0, f)\n }\n}\n\nimpl Error for SharedErrorWrapper {\n fn source(&self) -> Option<&(dyn Error + 'static)> {\n self.0.err.as_ref().source()\n }\n}\n\npub trait SharedResultExt {\n fn std_result(self) -> Result;\n}\n\nimpl SharedResultExt for Result {\n fn std_result(self) -> Result {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(SharedErrorWrapper(err)),\n }\n }\n}\n\npub trait SharedResultExtRef<'a, T> {\n fn std_result(self) -> Result<&'a T, SharedErrorWrapper>;\n}\n\nimpl<'a, T> SharedResultExtRef<'a, T> for &'a Result {\n fn std_result(self) -> Result<&'a T, SharedErrorWrapper> {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(SharedErrorWrapper(err.clone())),\n }\n }\n}\n\npub fn invariance_violation() -> anyhow::Error {\n anyhow::anyhow!(\"Invariance violation\")\n}\n\n#[macro_export]\nmacro_rules! api_bail {\n ( $fmt:literal $(, $($arg:tt)*)?) => {\n return Err($crate::service::error::ApiError::new(&format!($fmt $(, $($arg)*)?), axum::http::StatusCode::BAD_REQUEST).into())\n };\n}\n\n#[macro_export]\nmacro_rules! api_error {\n ( $fmt:literal $(, $($arg:tt)*)?) => {\n $crate::service::error::ApiError::new(&format!($fmt $(, $($arg)*)?), axum::http::StatusCode::BAD_REQUEST)\n };\n}\n"], ["/cocoindex/src/ops/targets/shared/table_columns.rs", "use crate::{\n ops::sdk::SetupStateCompatibility,\n prelude::*,\n setup::{CombinedState, SetupChangeType},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TableColumnsSchema {\n #[serde(with = \"indexmap::map::serde_seq\", alias = \"key_fields_schema\")]\n pub key_columns: IndexMap,\n\n #[serde(with = \"indexmap::map::serde_seq\", alias = \"value_fields_schema\")]\n pub value_columns: IndexMap,\n}\n\n#[derive(Debug)]\npub enum TableUpsertionAction {\n Create {\n keys: IndexMap,\n values: IndexMap,\n },\n Update {\n columns_to_delete: IndexSet,\n columns_to_upsert: IndexMap,\n },\n}\n\nimpl TableUpsertionAction {\n pub fn is_empty(&self) -> bool {\n match self {\n Self::Create { .. } => false,\n Self::Update {\n columns_to_delete,\n columns_to_upsert,\n } => columns_to_delete.is_empty() && columns_to_upsert.is_empty(),\n }\n }\n}\n\n#[derive(Debug)]\npub struct TableMainSetupAction {\n pub drop_existing: bool,\n pub table_upsertion: Option>,\n}\n\nimpl TableMainSetupAction {\n pub fn from_states(\n desired_state: Option<&S>,\n existing: &CombinedState,\n existing_invalidated: bool,\n ) -> Self\n where\n for<'a> &'a S: Into>>,\n T: Clone,\n {\n let existing_may_exists = existing.possible_versions().next().is_some();\n let possible_existing_cols: Vec>> = existing\n .possible_versions()\n .map(Into::>>::into)\n .collect();\n let Some(desired_state) = desired_state else {\n return Self {\n drop_existing: existing_may_exists,\n table_upsertion: None,\n };\n };\n\n let desired_cols: Cow<'_, TableColumnsSchema> = desired_state.into();\n let drop_existing = existing_invalidated\n || possible_existing_cols\n .iter()\n .any(|v| v.key_columns != desired_cols.key_columns)\n || (existing_may_exists && !existing.always_exists());\n\n let table_upsertion = if existing.always_exists() && !drop_existing {\n TableUpsertionAction::Update {\n columns_to_delete: possible_existing_cols\n .iter()\n .flat_map(|v| v.value_columns.keys())\n .filter(|column_name| !desired_cols.value_columns.contains_key(*column_name))\n .cloned()\n .collect(),\n columns_to_upsert: desired_cols\n .value_columns\n .iter()\n .filter(|(column_name, schema)| {\n !possible_existing_cols\n .iter()\n .all(|v| v.value_columns.get(*column_name) == Some(schema))\n })\n .map(|(k, v)| (k.to_owned(), v.to_owned()))\n .collect(),\n }\n } else {\n TableUpsertionAction::Create {\n keys: desired_cols.key_columns.to_owned(),\n values: desired_cols.value_columns.to_owned(),\n }\n };\n\n Self {\n drop_existing,\n table_upsertion: Some(table_upsertion).filter(|action| !action.is_empty()),\n }\n }\n\n pub fn describe_changes(&self) -> Vec\n where\n T: std::fmt::Display,\n {\n let mut descriptions = vec![];\n if self.drop_existing {\n descriptions.push(setup::ChangeDescription::Action(\"Drop table\".to_string()));\n }\n if let Some(table_upsertion) = &self.table_upsertion {\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Create table:\\n key columns: {}\\n value columns: {}\\n\",\n keys.iter().map(|(k, v)| format!(\"{k} {v}\")).join(\", \"),\n values.iter().map(|(k, v)| format!(\"{k} {v}\")).join(\", \"),\n )));\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n if !columns_to_delete.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Delete column from table: {}\",\n columns_to_delete.iter().join(\", \"),\n )));\n }\n if !columns_to_upsert.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Add / update columns in table: {}\",\n columns_to_upsert\n .iter()\n .map(|(k, v)| format!(\"{k} {v}\"))\n .join(\", \"),\n )));\n }\n }\n }\n }\n descriptions\n }\n\n pub fn change_type(&self, has_other_update: bool) -> SetupChangeType {\n match (self.drop_existing, &self.table_upsertion) {\n (_, Some(TableUpsertionAction::Create { .. })) => SetupChangeType::Create,\n (_, Some(TableUpsertionAction::Update { .. })) => SetupChangeType::Update,\n (true, None) => SetupChangeType::Delete,\n (false, None) => {\n if has_other_update {\n SetupChangeType::Update\n } else {\n SetupChangeType::NoChange\n }\n }\n }\n }\n}\n\npub fn check_table_compatibility(\n desired: &TableColumnsSchema,\n existing: &TableColumnsSchema,\n) -> SetupStateCompatibility {\n let is_key_identical = existing.key_columns == desired.key_columns;\n if is_key_identical {\n let is_value_lossy = existing\n .value_columns\n .iter()\n .any(|(k, v)| desired.value_columns.get(k) != Some(v));\n if is_value_lossy {\n SetupStateCompatibility::PartialCompatible\n } else {\n SetupStateCompatibility::Compatible\n }\n } else {\n SetupStateCompatibility::NotCompatible\n }\n}\n"], ["/cocoindex/src/ops/sdk.rs", "pub(crate) use crate::prelude::*;\n\nuse crate::builder::plan::AnalyzedFieldReference;\nuse crate::builder::plan::AnalyzedLocalFieldReference;\nuse std::collections::BTreeMap;\n\npub use super::factory_bases::*;\npub use super::interface::*;\npub use crate::base::schema::*;\npub use crate::base::spec::*;\npub use crate::base::value::*;\n\n// Disambiguate the ExportTargetBuildOutput type.\npub use super::factory_bases::TypedExportDataCollectionBuildOutput;\npub use super::registry::ExecutorFactoryRegistry;\n/// Defined for all types convertible to ValueType, to ease creation for ValueType in various operation factories.\npub trait TypeCore {\n fn into_type(self) -> ValueType;\n}\n\nimpl TypeCore for BasicValueType {\n fn into_type(self) -> ValueType {\n ValueType::Basic(self)\n }\n}\n\nimpl TypeCore for StructSchema {\n fn into_type(self) -> ValueType {\n ValueType::Struct(self)\n }\n}\n\nimpl TypeCore for TableSchema {\n fn into_type(self) -> ValueType {\n ValueType::Table(self)\n }\n}\n\npub fn make_output_type(value_type: Type) -> EnrichedValueType {\n EnrichedValueType {\n typ: value_type.into_type(),\n attrs: Default::default(),\n nullable: false,\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct EmptySpec {}\n\n#[macro_export]\nmacro_rules! fields_value {\n ($($field:expr), +) => {\n $crate::base::value::FieldValues { fields: std::vec![ $(($field).into()),+ ] }\n };\n}\n\npub struct SchemaBuilderFieldRef(AnalyzedLocalFieldReference);\n\nimpl SchemaBuilderFieldRef {\n pub fn to_field_ref(&self) -> AnalyzedFieldReference {\n AnalyzedFieldReference {\n local: self.0.clone(),\n scope_up_level: 0,\n }\n }\n}\npub struct StructSchemaBuilder<'a> {\n base_fields_idx: Vec,\n target: &'a mut StructSchema,\n}\n\nimpl<'a> StructSchemaBuilder<'a> {\n pub fn new(target: &'a mut StructSchema) -> Self {\n Self {\n base_fields_idx: Vec::new(),\n target,\n }\n }\n\n pub fn _set_description(&mut self, description: impl Into>) {\n self.target.description = Some(description.into());\n }\n\n pub fn add_field(&mut self, field_schema: FieldSchema) -> SchemaBuilderFieldRef {\n let current_idx = self.target.fields.len() as u32;\n Arc::make_mut(&mut self.target.fields).push(field_schema);\n let mut fields_idx = self.base_fields_idx.clone();\n fields_idx.push(current_idx);\n SchemaBuilderFieldRef(AnalyzedLocalFieldReference { fields_idx })\n }\n\n pub fn _add_struct_field(\n &mut self,\n name: impl Into,\n nullable: bool,\n attrs: Arc>,\n ) -> (StructSchemaBuilder<'_>, SchemaBuilderFieldRef) {\n let field_schema = FieldSchema::new(\n name.into(),\n EnrichedValueType {\n typ: ValueType::Struct(StructSchema {\n fields: Arc::new(Vec::new()),\n description: None,\n }),\n nullable,\n attrs,\n },\n );\n let local_ref = self.add_field(field_schema);\n let struct_schema = match &mut Arc::make_mut(&mut self.target.fields)\n .last_mut()\n .unwrap()\n .value_type\n .typ\n {\n ValueType::Struct(s) => s,\n _ => unreachable!(),\n };\n (\n StructSchemaBuilder {\n base_fields_idx: local_ref.0.fields_idx.clone(),\n target: struct_schema,\n },\n local_ref,\n )\n }\n}\n"], ["/cocoindex/src/llm/ollama.rs", "use crate::prelude::*;\n\nuse super::{LlmEmbeddingClient, LlmGenerationClient};\nuse schemars::schema::SchemaObject;\nuse serde_with::{base64::Base64, serde_as};\n\nfn get_embedding_dimension(model: &str) -> Option {\n match model.to_ascii_lowercase().as_str() {\n \"mxbai-embed-large\"\n | \"bge-m3\"\n | \"bge-large\"\n | \"snowflake-arctic-embed\"\n | \"snowflake-arctic-embed2\" => Some(1024),\n\n \"nomic-embed-text\"\n | \"paraphrase-multilingual\"\n | \"snowflake-arctic-embed:110m\"\n | \"snowflake-arctic-embed:137m\"\n | \"granite-embedding:278m\" => Some(768),\n\n \"all-minilm\"\n | \"snowflake-arctic-embed:22m\"\n | \"snowflake-arctic-embed:33m\"\n | \"granite-embedding\" => Some(384),\n\n _ => None,\n }\n}\n\npub struct Client {\n generate_url: String,\n embed_url: String,\n reqwest_client: reqwest::Client,\n}\n\n#[derive(Debug, Serialize)]\nenum OllamaFormat<'a> {\n #[serde(untagged)]\n JsonSchema(&'a SchemaObject),\n}\n\n#[serde_as]\n#[derive(Debug, Serialize)]\nstruct OllamaRequest<'a> {\n pub model: &'a str,\n pub prompt: &'a str,\n #[serde_as(as = \"Option>\")]\n pub images: Option>,\n pub format: Option>,\n pub system: Option<&'a str>,\n pub stream: Option,\n}\n\n#[derive(Debug, Deserialize)]\nstruct OllamaResponse {\n pub response: String,\n}\n\n#[derive(Debug, Serialize)]\nstruct OllamaEmbeddingRequest<'a> {\n pub model: &'a str,\n pub input: &'a str,\n}\n\n#[derive(Debug, Deserialize)]\nstruct OllamaEmbeddingResponse {\n pub embedding: Vec,\n}\n\nconst OLLAMA_DEFAULT_ADDRESS: &str = \"http://localhost:11434\";\n\nimpl Client {\n pub async fn new(address: Option) -> Result {\n let address = match &address {\n Some(addr) => addr.trim_end_matches('/'),\n None => OLLAMA_DEFAULT_ADDRESS,\n };\n Ok(Self {\n generate_url: format!(\"{address}/api/generate\"),\n embed_url: format!(\"{address}/api/embed\"),\n reqwest_client: reqwest::Client::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n let req = OllamaRequest {\n model: request.model,\n prompt: request.user_prompt.as_ref(),\n images: request.image.as_deref().map(|img| vec![img]),\n format: request.output_format.as_ref().map(\n |super::OutputFormat::JsonSchema { schema, .. }| {\n OllamaFormat::JsonSchema(schema.as_ref())\n },\n ),\n system: request.system_prompt.as_ref().map(|s| s.as_ref()),\n stream: Some(false),\n };\n let res = retryable::run(\n || {\n self.reqwest_client\n .post(self.generate_url.as_str())\n .json(&req)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !res.status().is_success() {\n bail!(\n \"Ollama API error: {:?}\\n{}\\n\",\n res.status(),\n res.text().await?\n );\n }\n let json: OllamaResponse = res.json().await?;\n Ok(super::LlmGenerateResponse {\n text: json.response,\n })\n }\n\n fn json_schema_options(&self) -> super::ToJsonSchemaOptions {\n super::ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: true,\n extract_descriptions: true,\n top_level_must_be_object: false,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let req = OllamaEmbeddingRequest {\n model: request.model,\n input: request.text.as_ref(),\n };\n let resp = retryable::run(\n || {\n self.reqwest_client\n .post(self.embed_url.as_str())\n .json(&req)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Ollama API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let embedding_resp: OllamaEmbeddingResponse = resp.json().await.context(\"Invalid JSON\")?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding_resp.embedding,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n"], ["/cocoindex/src/utils/yaml_ser.rs", "use base64::prelude::*;\nuse serde::ser::{self, Serialize};\nuse yaml_rust2::yaml::Yaml;\n\n#[derive(Debug)]\npub struct YamlSerializerError {\n msg: String,\n}\n\nimpl std::fmt::Display for YamlSerializerError {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"YamlSerializerError: {}\", self.msg)\n }\n}\n\nimpl std::error::Error for YamlSerializerError {}\n\nimpl ser::Error for YamlSerializerError {\n fn custom(msg: T) -> Self\n where\n T: std::fmt::Display,\n {\n YamlSerializerError {\n msg: format!(\"{msg}\"),\n }\n }\n}\n\npub struct YamlSerializer;\n\nimpl YamlSerializer {\n pub fn serialize(value: &T) -> Result\n where\n T: Serialize,\n {\n value.serialize(YamlSerializer)\n }\n}\n\nimpl ser::Serializer for YamlSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n type SerializeSeq = SeqSerializer;\n type SerializeTuple = SeqSerializer;\n type SerializeTupleStruct = SeqSerializer;\n type SerializeTupleVariant = VariantSeqSerializer;\n type SerializeMap = MapSerializer;\n type SerializeStruct = MapSerializer;\n type SerializeStructVariant = VariantMapSerializer;\n\n fn serialize_bool(self, v: bool) -> Result {\n Ok(Yaml::Boolean(v))\n }\n\n fn serialize_i8(self, v: i8) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i16(self, v: i16) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i32(self, v: i32) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i64(self, v: i64) -> Result {\n Ok(Yaml::Integer(v))\n }\n\n fn serialize_u8(self, v: u8) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u16(self, v: u16) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u32(self, v: u32) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u64(self, v: u64) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_f32(self, v: f32) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_f64(self, v: f64) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_char(self, v: char) -> Result {\n Ok(Yaml::String(v.to_string()))\n }\n\n fn serialize_str(self, v: &str) -> Result {\n Ok(Yaml::String(v.to_owned()))\n }\n\n fn serialize_bytes(self, v: &[u8]) -> Result {\n let encoded = BASE64_STANDARD.encode(v);\n Ok(Yaml::String(encoded))\n }\n\n fn serialize_none(self) -> Result {\n Ok(Yaml::Null)\n }\n\n fn serialize_some(self, value: &T) -> Result\n where\n T: Serialize + ?Sized,\n {\n value.serialize(self)\n }\n\n fn serialize_unit(self) -> Result {\n Ok(Yaml::Hash(Default::default()))\n }\n\n fn serialize_unit_struct(self, _name: &'static str) -> Result {\n Ok(Yaml::Hash(Default::default()))\n }\n\n fn serialize_unit_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n ) -> Result {\n Ok(Yaml::String(variant.to_owned()))\n }\n\n fn serialize_newtype_struct(\n self,\n _name: &'static str,\n value: &T,\n ) -> Result\n where\n T: Serialize + ?Sized,\n {\n value.serialize(self)\n }\n\n fn serialize_newtype_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n value: &T,\n ) -> Result\n where\n T: Serialize + ?Sized,\n {\n let mut hash = yaml_rust2::yaml::Hash::new();\n hash.insert(Yaml::String(variant.to_owned()), value.serialize(self)?);\n Ok(Yaml::Hash(hash))\n }\n\n fn serialize_seq(self, len: Option) -> Result {\n Ok(SeqSerializer {\n vec: Vec::with_capacity(len.unwrap_or(0)),\n })\n }\n\n fn serialize_tuple(self, len: usize) -> Result {\n self.serialize_seq(Some(len))\n }\n\n fn serialize_tuple_struct(\n self,\n _name: &'static str,\n len: usize,\n ) -> Result {\n self.serialize_seq(Some(len))\n }\n\n fn serialize_tuple_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n len: usize,\n ) -> Result {\n Ok(VariantSeqSerializer {\n variant_name: variant.to_owned(),\n vec: Vec::with_capacity(len),\n })\n }\n\n fn serialize_map(self, _len: Option) -> Result {\n Ok(MapSerializer {\n map: yaml_rust2::yaml::Hash::new(),\n next_key: None,\n })\n }\n\n fn serialize_struct(\n self,\n _name: &'static str,\n len: usize,\n ) -> Result {\n self.serialize_map(Some(len))\n }\n\n fn serialize_struct_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n Ok(VariantMapSerializer {\n variant_name: variant.to_owned(),\n map: yaml_rust2::yaml::Hash::new(),\n })\n }\n}\n\npub struct SeqSerializer {\n vec: Vec,\n}\n\nimpl ser::SerializeSeq for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.vec.push(value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n Ok(Yaml::Array(self.vec))\n }\n}\n\nimpl ser::SerializeTuple for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeSeq::serialize_element(self, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeSeq::end(self)\n }\n}\n\nimpl ser::SerializeTupleStruct for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeSeq::serialize_element(self, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeSeq::end(self)\n }\n}\n\npub struct MapSerializer {\n map: yaml_rust2::yaml::Hash,\n next_key: Option,\n}\n\nimpl ser::SerializeMap for MapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.next_key = Some(key.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n let key = self.next_key.take().unwrap();\n self.map.insert(key, value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n Ok(Yaml::Hash(self.map))\n }\n}\n\nimpl ser::SerializeStruct for MapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeMap::serialize_entry(self, key, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeMap::end(self)\n }\n}\n\npub struct VariantMapSerializer {\n variant_name: String,\n map: yaml_rust2::yaml::Hash,\n}\n\nimpl ser::SerializeStructVariant for VariantMapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.map.insert(\n Yaml::String(key.to_owned()),\n value.serialize(YamlSerializer)?,\n );\n Ok(())\n }\n\n fn end(self) -> Result {\n let mut outer_map = yaml_rust2::yaml::Hash::new();\n outer_map.insert(Yaml::String(self.variant_name), Yaml::Hash(self.map));\n Ok(Yaml::Hash(outer_map))\n }\n}\n\npub struct VariantSeqSerializer {\n variant_name: String,\n vec: Vec,\n}\n\nimpl ser::SerializeTupleVariant for VariantSeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.vec.push(value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n let mut map = yaml_rust2::yaml::Hash::new();\n map.insert(Yaml::String(self.variant_name), Yaml::Array(self.vec));\n Ok(Yaml::Hash(map))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use serde::ser::Error as SerdeSerError;\n use serde::{Serialize, Serializer};\n use std::collections::BTreeMap;\n use yaml_rust2::yaml::{Hash, Yaml};\n\n fn assert_yaml_serialization(value: T, expected_yaml: Yaml) {\n let result = YamlSerializer::serialize(&value);\n println!(\"Serialized value: {result:?}, Expected value: {expected_yaml:?}\");\n\n assert!(\n result.is_ok(),\n \"Serialization failed when it should have succeeded. Error: {:?}\",\n result.err()\n );\n assert_eq!(\n result.unwrap(),\n expected_yaml,\n \"Serialized YAML did not match expected YAML.\"\n );\n }\n\n #[test]\n fn test_serialize_bool() {\n assert_yaml_serialization(true, Yaml::Boolean(true));\n assert_yaml_serialization(false, Yaml::Boolean(false));\n }\n\n #[test]\n fn test_serialize_integers() {\n assert_yaml_serialization(42i8, Yaml::Integer(42));\n assert_yaml_serialization(-100i16, Yaml::Integer(-100));\n assert_yaml_serialization(123456i32, Yaml::Integer(123456));\n assert_yaml_serialization(7890123456789i64, Yaml::Integer(7890123456789));\n assert_yaml_serialization(255u8, Yaml::Integer(255));\n assert_yaml_serialization(65535u16, Yaml::Integer(65535));\n assert_yaml_serialization(4000000000u32, Yaml::Integer(4000000000));\n // u64 is serialized as Yaml::Real(String) in your implementation\n assert_yaml_serialization(\n 18446744073709551615u64,\n Yaml::Real(\"18446744073709551615\".to_string()),\n );\n }\n\n #[test]\n fn test_serialize_floats() {\n assert_yaml_serialization(3.14f32, Yaml::Real(\"3.14\".to_string()));\n assert_yaml_serialization(-0.001f64, Yaml::Real(\"-0.001\".to_string()));\n assert_yaml_serialization(1.0e10f64, Yaml::Real(\"10000000000\".to_string()));\n }\n\n #[test]\n fn test_serialize_char() {\n assert_yaml_serialization('X', Yaml::String(\"X\".to_string()));\n assert_yaml_serialization('✨', Yaml::String(\"✨\".to_string()));\n }\n\n #[test]\n fn test_serialize_str_and_string() {\n assert_yaml_serialization(\"hello YAML\", Yaml::String(\"hello YAML\".to_string()));\n assert_yaml_serialization(\"\".to_string(), Yaml::String(\"\".to_string()));\n }\n\n #[test]\n fn test_serialize_raw_bytes() {\n let bytes_slice: &[u8] = &[0x48, 0x65, 0x6c, 0x6c, 0x6f]; // \"Hello\"\n let expected = Yaml::Array(vec![\n Yaml::Integer(72),\n Yaml::Integer(101),\n Yaml::Integer(108),\n Yaml::Integer(108),\n Yaml::Integer(111),\n ]);\n assert_yaml_serialization(bytes_slice, expected.clone());\n\n let bytes_vec: Vec = bytes_slice.to_vec();\n assert_yaml_serialization(bytes_vec, expected);\n\n let empty_bytes_slice: &[u8] = &[];\n assert_yaml_serialization(empty_bytes_slice, Yaml::Array(vec![]));\n }\n\n struct MyBytesWrapper<'a>(&'a [u8]);\n\n impl<'a> Serialize for MyBytesWrapper<'a> {\n fn serialize(&self, serializer: S) -> Result\n where\n S: Serializer,\n {\n serializer.serialize_bytes(self.0)\n }\n }\n\n #[test]\n fn test_custom_wrapper_serializes_bytes_as_base64_string() {\n let data: &[u8] = &[72, 101, 108, 108, 111]; // \"Hello\"\n let wrapped_data = MyBytesWrapper(data);\n\n let base64_encoded = BASE64_STANDARD.encode(data);\n let expected_yaml = Yaml::String(base64_encoded);\n\n assert_yaml_serialization(wrapped_data, expected_yaml);\n\n let empty_data: &[u8] = &[];\n let wrapped_empty_data = MyBytesWrapper(empty_data);\n let empty_base64_encoded = BASE64_STANDARD.encode(empty_data);\n let expected_empty_yaml = Yaml::String(empty_base64_encoded);\n assert_yaml_serialization(wrapped_empty_data, expected_empty_yaml);\n }\n\n #[test]\n fn test_serialize_option() {\n let val_none: Option = None;\n assert_yaml_serialization(val_none, Yaml::Null);\n\n let val_some: Option = Some(\"has value\".to_string());\n assert_yaml_serialization(val_some, Yaml::String(\"has value\".to_string()));\n }\n\n #[test]\n fn test_serialize_unit() {\n assert_yaml_serialization((), Yaml::Hash(Hash::new()));\n }\n\n #[test]\n fn test_serialize_unit_struct() {\n #[derive(Serialize)]\n struct MyUnitStruct;\n\n assert_yaml_serialization(MyUnitStruct, Yaml::Hash(Hash::new()));\n }\n\n #[test]\n fn test_serialize_newtype_struct() {\n #[derive(Serialize)]\n struct MyNewtypeStruct(u64);\n\n assert_yaml_serialization(MyNewtypeStruct(12345u64), Yaml::Real(\"12345\".to_string()));\n }\n\n #[test]\n fn test_serialize_seq() {\n let empty_vec: Vec = vec![];\n assert_yaml_serialization(empty_vec, Yaml::Array(vec![]));\n\n let simple_vec = vec![10, 20, 30];\n assert_yaml_serialization(\n simple_vec,\n Yaml::Array(vec![\n Yaml::Integer(10),\n Yaml::Integer(20),\n Yaml::Integer(30),\n ]),\n );\n\n let string_vec = vec![\"a\".to_string(), \"b\".to_string()];\n assert_yaml_serialization(\n string_vec,\n Yaml::Array(vec![\n Yaml::String(\"a\".to_string()),\n Yaml::String(\"b\".to_string()),\n ]),\n );\n }\n\n #[test]\n fn test_serialize_tuple() {\n let tuple_val = (42i32, \"text\", false);\n assert_yaml_serialization(\n tuple_val,\n Yaml::Array(vec![\n Yaml::Integer(42),\n Yaml::String(\"text\".to_string()),\n Yaml::Boolean(false),\n ]),\n );\n }\n\n #[test]\n fn test_serialize_tuple_struct() {\n #[derive(Serialize)]\n struct MyTupleStruct(String, i64);\n\n assert_yaml_serialization(\n MyTupleStruct(\"value\".to_string(), -500),\n Yaml::Array(vec![Yaml::String(\"value\".to_string()), Yaml::Integer(-500)]),\n );\n }\n\n #[test]\n fn test_serialize_map() {\n let mut map = BTreeMap::new(); // BTreeMap for ordered keys, matching yaml::Hash\n map.insert(\"key1\".to_string(), 100);\n map.insert(\"key2\".to_string(), 200);\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"key1\".to_string()), Yaml::Integer(100));\n expected_hash.insert(Yaml::String(\"key2\".to_string()), Yaml::Integer(200));\n assert_yaml_serialization(map, Yaml::Hash(expected_hash));\n\n let empty_map: BTreeMap = BTreeMap::new();\n assert_yaml_serialization(empty_map, Yaml::Hash(Hash::new()));\n }\n\n #[derive(Serialize)]\n struct SimpleStruct {\n id: u32,\n name: String,\n is_active: bool,\n }\n\n #[test]\n fn test_serialize_struct() {\n let s = SimpleStruct {\n id: 101,\n name: \"A Struct\".to_string(),\n is_active: true,\n };\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"id\".to_string()), Yaml::Integer(101));\n expected_hash.insert(\n Yaml::String(\"name\".to_string()),\n Yaml::String(\"A Struct\".to_string()),\n );\n expected_hash.insert(Yaml::String(\"is_active\".to_string()), Yaml::Boolean(true));\n assert_yaml_serialization(s, Yaml::Hash(expected_hash));\n }\n\n #[derive(Serialize)]\n struct NestedStruct {\n description: String,\n data: SimpleStruct,\n tags: Vec,\n }\n\n #[test]\n fn test_serialize_nested_struct() {\n let ns = NestedStruct {\n description: \"Contains another struct and a vec\".to_string(),\n data: SimpleStruct {\n id: 202,\n name: \"Inner\".to_string(),\n is_active: false,\n },\n tags: vec![\"nested\".to_string(), \"complex\".to_string()],\n };\n\n let mut inner_struct_hash = Hash::new();\n inner_struct_hash.insert(Yaml::String(\"id\".to_string()), Yaml::Integer(202));\n inner_struct_hash.insert(\n Yaml::String(\"name\".to_string()),\n Yaml::String(\"Inner\".to_string()),\n );\n inner_struct_hash.insert(Yaml::String(\"is_active\".to_string()), Yaml::Boolean(false));\n\n let tags_array = Yaml::Array(vec![\n Yaml::String(\"nested\".to_string()),\n Yaml::String(\"complex\".to_string()),\n ]);\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(\n Yaml::String(\"description\".to_string()),\n Yaml::String(\"Contains another struct and a vec\".to_string()),\n );\n expected_hash.insert(\n Yaml::String(\"data\".to_string()),\n Yaml::Hash(inner_struct_hash),\n );\n expected_hash.insert(Yaml::String(\"tags\".to_string()), tags_array);\n\n assert_yaml_serialization(ns, Yaml::Hash(expected_hash));\n }\n\n #[derive(Serialize)]\n enum MyEnum {\n Unit,\n Newtype(i32),\n Tuple(String, bool),\n Struct { field_a: u16, field_b: char },\n }\n\n #[test]\n fn test_serialize_enum_unit_variant() {\n assert_yaml_serialization(MyEnum::Unit, Yaml::String(\"Unit\".to_string()));\n }\n\n #[test]\n fn test_serialize_enum_newtype_variant() {\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"Newtype\".to_string()), Yaml::Integer(999));\n assert_yaml_serialization(MyEnum::Newtype(999), Yaml::Hash(expected_hash));\n }\n\n #[test]\n fn test_serialize_enum_tuple_variant() {\n let mut expected_hash = Hash::new();\n let inner_array = Yaml::Array(vec![\n Yaml::String(\"tuple_data\".to_string()),\n Yaml::Boolean(true),\n ]);\n expected_hash.insert(Yaml::String(\"Tuple\".to_string()), inner_array);\n assert_yaml_serialization(\n MyEnum::Tuple(\"tuple_data\".to_string(), true),\n Yaml::Hash(expected_hash),\n );\n }\n\n #[test]\n fn test_serialize_enum_struct_variant() {\n let mut inner_struct_hash = Hash::new();\n inner_struct_hash.insert(Yaml::String(\"field_a\".to_string()), Yaml::Integer(123));\n inner_struct_hash.insert(\n Yaml::String(\"field_b\".to_string()),\n Yaml::String(\"Z\".to_string()),\n );\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(\n Yaml::String(\"Struct\".to_string()),\n Yaml::Hash(inner_struct_hash),\n );\n assert_yaml_serialization(\n MyEnum::Struct {\n field_a: 123,\n field_b: 'Z',\n },\n Yaml::Hash(expected_hash),\n );\n }\n\n #[test]\n fn test_yaml_serializer_error_display() {\n let error = YamlSerializerError {\n msg: \"A test error message\".to_string(),\n };\n assert_eq!(\n format!(\"{error}\"),\n \"YamlSerializerError: A test error message\"\n );\n }\n\n #[test]\n fn test_yaml_serializer_error_custom() {\n let error = YamlSerializerError::custom(\"Custom error detail\");\n assert_eq!(error.msg, \"Custom error detail\");\n assert_eq!(\n format!(\"{error}\"),\n \"YamlSerializerError: Custom error detail\"\n );\n let _err_trait_obj: Box = Box::new(error);\n }\n}\n"], ["/cocoindex/src/builder/plan.rs", "use crate::prelude::*;\n\nuse crate::ops::interface::*;\nuse crate::utils::fingerprint::{Fingerprint, Fingerprinter};\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedLocalFieldReference {\n /// Must be non-empty.\n pub fields_idx: Vec,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedFieldReference {\n pub local: AnalyzedLocalFieldReference,\n /// How many levels up the scope the field is at.\n /// 0 means the current scope.\n #[serde(skip_serializing_if = \"u32_is_zero\")]\n pub scope_up_level: u32,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedLocalCollectorReference {\n pub collector_idx: u32,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedCollectorReference {\n pub local: AnalyzedLocalCollectorReference,\n /// How many levels up the scope the field is at.\n /// 0 means the current scope.\n #[serde(skip_serializing_if = \"u32_is_zero\")]\n pub scope_up_level: u32,\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct AnalyzedStructMapping {\n pub fields: Vec,\n}\n\n#[derive(Debug, Clone, Serialize)]\n#[serde(tag = \"kind\")]\npub enum AnalyzedValueMapping {\n Constant { value: value::Value },\n Field(AnalyzedFieldReference),\n Struct(AnalyzedStructMapping),\n}\n\n#[derive(Debug, Clone)]\npub struct AnalyzedOpOutput {\n pub field_idx: u32,\n}\n\npub struct AnalyzedImportOp {\n pub name: String,\n pub executor: Box,\n pub output: AnalyzedOpOutput,\n pub primary_key_type: schema::ValueType,\n pub refresh_options: spec::SourceRefreshOptions,\n\n pub concurrency_controller: concur_control::CombinedConcurrencyController,\n}\n\npub struct AnalyzedFunctionExecInfo {\n pub enable_cache: bool,\n pub behavior_version: Option,\n\n /// Fingerprinter of the function's behavior.\n pub fingerprinter: Fingerprinter,\n /// To deserialize cached value.\n pub output_type: schema::ValueType,\n}\n\npub struct AnalyzedTransformOp {\n pub name: String,\n pub inputs: Vec,\n pub function_exec_info: AnalyzedFunctionExecInfo,\n pub executor: Box,\n pub output: AnalyzedOpOutput,\n}\n\npub struct AnalyzedForEachOp {\n pub name: String,\n pub local_field_ref: AnalyzedLocalFieldReference,\n pub op_scope: AnalyzedOpScope,\n pub concurrency_controller: concur_control::ConcurrencyController,\n}\n\npub struct AnalyzedCollectOp {\n pub name: String,\n pub has_auto_uuid_field: bool,\n pub input: AnalyzedStructMapping,\n pub collector_ref: AnalyzedCollectorReference,\n /// Fingerprinter of the collector's schema. Used to decide when to reuse auto-generated UUIDs.\n pub fingerprinter: Fingerprinter,\n}\n\npub enum AnalyzedPrimaryKeyDef {\n Fields(Vec),\n}\n\npub struct AnalyzedExportOp {\n pub name: String,\n pub input: AnalyzedLocalCollectorReference,\n pub export_target_factory: Arc,\n pub export_context: Arc,\n pub primary_key_def: AnalyzedPrimaryKeyDef,\n pub primary_key_type: schema::ValueType,\n /// idx for value fields - excluding the primary key field.\n pub value_fields: Vec,\n /// If true, value is never changed on the same primary key.\n /// This is guaranteed if the primary key contains auto-generated UUIDs.\n pub value_stable: bool,\n}\n\npub struct AnalyzedExportTargetOpGroup {\n pub target_factory: Arc,\n pub op_idx: Vec,\n}\n\npub enum AnalyzedReactiveOp {\n Transform(AnalyzedTransformOp),\n ForEach(AnalyzedForEachOp),\n Collect(AnalyzedCollectOp),\n}\n\npub struct AnalyzedOpScope {\n pub reactive_ops: Vec,\n pub collector_len: usize,\n}\n\npub struct ExecutionPlan {\n pub logic_fingerprint: Fingerprint,\n\n pub import_ops: Vec,\n pub op_scope: AnalyzedOpScope,\n pub export_ops: Vec,\n pub export_op_groups: Vec,\n}\n\npub struct TransientExecutionPlan {\n pub input_fields: Vec,\n pub op_scope: AnalyzedOpScope,\n pub output_value: AnalyzedValueMapping,\n}\n\nfn u32_is_zero(v: &u32) -> bool {\n *v == 0\n}\n"], ["/cocoindex/src/llm/voyage.rs", "use crate::prelude::*;\n\nuse crate::llm::{LlmEmbeddingClient, LlmEmbeddingRequest, LlmEmbeddingResponse};\nuse phf::phf_map;\n\nstatic DEFAULT_EMBEDDING_DIMENSIONS: phf::Map<&str, u32> = phf_map! {\n // Current models\n \"voyage-3-large\" => 1024,\n \"voyage-3.5\" => 1024,\n \"voyage-3.5-lite\" => 1024,\n \"voyage-code-3\" => 1024,\n \"voyage-finance-2\" => 1024,\n \"voyage-law-2\" => 1024,\n \"voyage-code-2\" => 1536,\n\n // Legacy models\n \"voyage-3\" => 1024,\n \"voyage-3-lite\" => 512,\n \"voyage-multilingual-2\" => 1024,\n \"voyage-large-2-instruct\" => 1024,\n \"voyage-large-2\" => 1536,\n \"voyage-2\" => 1024,\n \"voyage-lite-02-instruct\" => 1024,\n \"voyage-02\" => 1024,\n \"voyage-01\" => 1024,\n \"voyage-lite-01\" => 1024,\n \"voyage-lite-01-instruct\" => 1024,\n};\n\npub struct Client {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl Client {\n pub fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Voyage AI doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"VOYAGE_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"VOYAGE_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n#[derive(Deserialize)]\nstruct EmbeddingData {\n embedding: Vec,\n}\n\n#[derive(Deserialize)]\nstruct EmbedResponse {\n data: Vec,\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: LlmEmbeddingRequest<'req>,\n ) -> Result {\n let url = \"https://api.voyageai.com/v1/embeddings\";\n\n let mut payload = serde_json::json!({\n \"input\": request.text,\n \"model\": request.model,\n });\n\n if let Some(task_type) = request.task_type {\n payload[\"input_type\"] = serde_json::Value::String(task_type.into());\n }\n\n let resp = retryable::run(\n || {\n self.client\n .post(url)\n .header(\"Authorization\", format!(\"Bearer {}\", self.api_key))\n .json(&payload)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n\n if !resp.status().is_success() {\n bail!(\n \"Voyage AI API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n\n let embedding_resp: EmbedResponse = resp.json().await.context(\"Invalid JSON\")?;\n\n if embedding_resp.data.is_empty() {\n bail!(\"No embedding data in response\");\n }\n\n Ok(LlmEmbeddingResponse {\n embedding: embedding_resp.data[0].embedding.clone(),\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n DEFAULT_EMBEDDING_DIMENSIONS.get(model).copied()\n }\n}\n"], ["/cocoindex/src/utils/concur_control.rs", "use crate::prelude::*;\n\nuse tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore};\n\nstruct WeightedSemaphore {\n downscale_factor: u8,\n downscaled_quota: u32,\n sem: Arc,\n}\n\nimpl WeightedSemaphore {\n pub fn new(quota: usize) -> Self {\n let mut downscale_factor = 0;\n let mut downscaled_quota = quota;\n while downscaled_quota > u32::MAX as usize {\n downscaled_quota >>= 1;\n downscale_factor += 1;\n }\n let sem = Arc::new(Semaphore::new(downscaled_quota));\n Self {\n downscaled_quota: downscaled_quota as u32,\n downscale_factor,\n sem,\n }\n }\n\n async fn acquire_reservation(&self) -> Result {\n self.sem.clone().acquire_owned().await\n }\n\n async fn acquire(\n &self,\n weight: usize,\n reserved: bool,\n ) -> Result, AcquireError> {\n let downscaled_weight = (weight >> self.downscale_factor) as u32;\n let capped_weight = downscaled_weight.min(self.downscaled_quota);\n let reserved_weight = if reserved { 1 } else { 0 };\n if reserved_weight >= capped_weight {\n return Ok(None);\n }\n Ok(Some(\n self.sem\n .clone()\n .acquire_many_owned(capped_weight - reserved_weight)\n .await?,\n ))\n }\n}\n\npub struct Options {\n pub max_inflight_rows: Option,\n pub max_inflight_bytes: Option,\n}\n\npub struct ConcurrencyControllerPermit {\n _inflight_count_permit: Option,\n _inflight_bytes_permit: Option,\n}\n\npub struct ConcurrencyController {\n inflight_count_sem: Option>,\n inflight_bytes_sem: Option,\n}\n\npub static BYTES_UNKNOWN_YET: Option usize> = None;\n\nimpl ConcurrencyController {\n pub fn new(exec_options: &Options) -> Self {\n Self {\n inflight_count_sem: exec_options\n .max_inflight_rows\n .map(|max| Arc::new(Semaphore::new(max))),\n inflight_bytes_sem: exec_options.max_inflight_bytes.map(WeightedSemaphore::new),\n }\n }\n\n /// If `bytes_fn` is `None`, it means the number of bytes is not known yet.\n /// The controller will reserve a minimum number of bytes.\n /// The caller should call `acquire_bytes_with_reservation` with the actual number of bytes later.\n pub async fn acquire(\n &self,\n bytes_fn: Option usize>,\n ) -> Result {\n let inflight_count_permit = if let Some(sem) = &self.inflight_count_sem {\n Some(sem.clone().acquire_owned().await?)\n } else {\n None\n };\n let inflight_bytes_permit = if let Some(sem) = &self.inflight_bytes_sem {\n if let Some(bytes_fn) = bytes_fn {\n sem.acquire(bytes_fn(), false).await?\n } else {\n Some(sem.acquire_reservation().await?)\n }\n } else {\n None\n };\n Ok(ConcurrencyControllerPermit {\n _inflight_count_permit: inflight_count_permit,\n _inflight_bytes_permit: inflight_bytes_permit,\n })\n }\n\n pub async fn acquire_bytes_with_reservation(\n &self,\n bytes_fn: impl FnOnce() -> usize,\n ) -> Result, AcquireError> {\n if let Some(sem) = &self.inflight_bytes_sem {\n sem.acquire(bytes_fn(), true).await\n } else {\n Ok(None)\n }\n }\n}\n\npub struct CombinedConcurrencyControllerPermit {\n _permit: ConcurrencyControllerPermit,\n _global_permit: ConcurrencyControllerPermit,\n}\n\npub struct CombinedConcurrencyController {\n controller: ConcurrencyController,\n global_controller: Arc,\n needs_num_bytes: bool,\n}\n\nimpl CombinedConcurrencyController {\n pub fn new(exec_options: &Options, global_controller: Arc) -> Self {\n Self {\n controller: ConcurrencyController::new(exec_options),\n needs_num_bytes: exec_options.max_inflight_bytes.is_some()\n || global_controller.inflight_bytes_sem.is_some(),\n global_controller,\n }\n }\n\n pub async fn acquire(\n &self,\n bytes_fn: Option usize>,\n ) -> Result {\n let num_bytes_fn = if let Some(bytes_fn) = bytes_fn\n && self.needs_num_bytes\n {\n let num_bytes = bytes_fn();\n Some(move || num_bytes)\n } else {\n None\n };\n\n let permit = self.controller.acquire(num_bytes_fn).await?;\n let global_permit = self.global_controller.acquire(num_bytes_fn).await?;\n Ok(CombinedConcurrencyControllerPermit {\n _permit: permit,\n _global_permit: global_permit,\n })\n }\n\n pub async fn acquire_bytes_with_reservation(\n &self,\n bytes_fn: impl FnOnce() -> usize,\n ) -> Result<(Option, Option), AcquireError> {\n let num_bytes = bytes_fn();\n let permit = self\n .controller\n .acquire_bytes_with_reservation(move || num_bytes)\n .await?;\n let global_permit = self\n .global_controller\n .acquire_bytes_with_reservation(move || num_bytes)\n .await?;\n Ok((permit, global_permit))\n }\n}\n"], ["/cocoindex/src/llm/mod.rs", "use crate::prelude::*;\n\nuse crate::base::json_schema::ToJsonSchemaOptions;\nuse infer::Infer;\nuse schemars::schema::SchemaObject;\nuse std::borrow::Cow;\n\nstatic INFER: LazyLock = LazyLock::new(Infer::new);\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub enum LlmApiType {\n Ollama,\n OpenAi,\n Gemini,\n Anthropic,\n LiteLlm,\n OpenRouter,\n Voyage,\n Vllm,\n VertexAi,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct VertexAiConfig {\n pub project: String,\n pub region: Option,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum LlmApiConfig {\n VertexAi(VertexAiConfig),\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct LlmSpec {\n pub api_type: LlmApiType,\n pub address: Option,\n pub model: String,\n pub api_config: Option,\n}\n\n#[derive(Debug)]\npub enum OutputFormat<'a> {\n JsonSchema {\n name: Cow<'a, str>,\n schema: Cow<'a, SchemaObject>,\n },\n}\n\n#[derive(Debug)]\npub struct LlmGenerateRequest<'a> {\n pub model: &'a str,\n pub system_prompt: Option>,\n pub user_prompt: Cow<'a, str>,\n pub image: Option>,\n pub output_format: Option>,\n}\n\n#[derive(Debug)]\npub struct LlmGenerateResponse {\n pub text: String,\n}\n\n#[async_trait]\npub trait LlmGenerationClient: Send + Sync {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result;\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions;\n}\n\n#[derive(Debug)]\npub struct LlmEmbeddingRequest<'a> {\n pub model: &'a str,\n pub text: Cow<'a, str>,\n pub output_dimension: Option,\n pub task_type: Option>,\n}\n\npub struct LlmEmbeddingResponse {\n pub embedding: Vec,\n}\n\n#[async_trait]\npub trait LlmEmbeddingClient: Send + Sync {\n async fn embed_text<'req>(\n &self,\n request: LlmEmbeddingRequest<'req>,\n ) -> Result;\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option;\n}\n\nmod anthropic;\nmod gemini;\nmod litellm;\nmod ollama;\nmod openai;\nmod openrouter;\nmod vllm;\nmod voyage;\n\npub async fn new_llm_generation_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::Anthropic => {\n Box::new(anthropic::Client::new(address).await?) as Box\n }\n LlmApiType::LiteLlm => {\n Box::new(litellm::Client::new_litellm(address).await?) as Box\n }\n LlmApiType::OpenRouter => Box::new(openrouter::Client::new_openrouter(address).await?)\n as Box,\n LlmApiType::Voyage => {\n api_bail!(\"Voyage is not supported for generation\")\n }\n LlmApiType::Vllm => {\n Box::new(vllm::Client::new_vllm(address).await?) as Box\n }\n };\n Ok(client)\n}\n\npub async fn new_llm_embedding_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Voyage => {\n Box::new(voyage::Client::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::OpenRouter | LlmApiType::LiteLlm | LlmApiType::Vllm | LlmApiType::Anthropic => {\n api_bail!(\"Embedding is not supported for API type {:?}\", api_type)\n }\n };\n Ok(client)\n}\n\npub fn detect_image_mime_type(bytes: &[u8]) -> Result<&'static str> {\n let infer = &*INFER;\n match infer.get(bytes) {\n Some(info) if info.mime_type().starts_with(\"image/\") => Ok(info.mime_type()),\n _ => bail!(\"Unknown or unsupported image format\"),\n }\n}\n"], ["/cocoindex/src/utils/fingerprint.rs", "use anyhow::bail;\nuse base64::prelude::*;\nuse blake2::digest::typenum;\nuse blake2::{Blake2b, Digest};\nuse serde::Deserialize;\nuse serde::ser::{\n Serialize, SerializeMap, SerializeSeq, SerializeStruct, SerializeStructVariant, SerializeTuple,\n SerializeTupleStruct, SerializeTupleVariant, Serializer,\n};\n\n#[derive(Debug)]\npub struct FingerprinterError {\n msg: String,\n}\n\nimpl std::fmt::Display for FingerprinterError {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"FingerprinterError: {}\", self.msg)\n }\n}\nimpl std::error::Error for FingerprinterError {}\nimpl serde::ser::Error for FingerprinterError {\n fn custom(msg: T) -> Self\n where\n T: std::fmt::Display,\n {\n FingerprinterError {\n msg: format!(\"{msg}\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\npub struct Fingerprint(pub [u8; 16]);\n\nimpl Fingerprint {\n pub fn to_base64(self) -> String {\n BASE64_STANDARD.encode(self.0)\n }\n\n pub fn from_base64(s: &str) -> anyhow::Result {\n let bytes = match s.len() {\n 24 => BASE64_STANDARD.decode(s)?,\n\n // For backward compatibility. Some old version (<= v0.1.2) is using hex encoding.\n 32 => hex::decode(s)?,\n _ => bail!(\"Encoded fingerprint length is unexpected: {}\", s.len()),\n };\n match bytes.try_into() {\n Ok(bytes) => Ok(Fingerprint(bytes)),\n Err(e) => bail!(\"Fingerprint bytes length is unexpected: {}\", e.len()),\n }\n }\n}\n\nimpl Serialize for Fingerprint {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n serializer.serialize_str(&self.to_base64())\n }\n}\n\nimpl<'de> Deserialize<'de> for Fingerprint {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let s = String::deserialize(deserializer)?;\n Self::from_base64(&s).map_err(serde::de::Error::custom)\n }\n}\n#[derive(Clone, Default)]\npub struct Fingerprinter {\n hasher: Blake2b,\n}\n\nimpl Fingerprinter {\n pub fn into_fingerprint(self) -> Fingerprint {\n Fingerprint(self.hasher.finalize().into())\n }\n\n pub fn with(self, value: &S) -> Result {\n let mut fingerprinter = self;\n value.serialize(&mut fingerprinter)?;\n Ok(fingerprinter)\n }\n\n pub fn write(&mut self, value: &S) -> Result<(), FingerprinterError> {\n value.serialize(self)\n }\n\n fn write_type_tag(&mut self, tag: &str) {\n self.hasher.update(tag.as_bytes());\n self.hasher.update(b\";\");\n }\n\n fn write_end_tag(&mut self) {\n self.hasher.update(b\".\");\n }\n\n fn write_varlen_bytes(&mut self, bytes: &[u8]) {\n self.write_usize(bytes.len());\n self.hasher.update(bytes);\n }\n\n fn write_usize(&mut self, value: usize) {\n self.hasher.update((value as u32).to_le_bytes());\n }\n}\n\nimpl Serializer for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n type SerializeSeq = Self;\n type SerializeTuple = Self;\n type SerializeTupleStruct = Self;\n type SerializeTupleVariant = Self;\n type SerializeMap = Self;\n type SerializeStruct = Self;\n type SerializeStructVariant = Self;\n\n fn serialize_bool(self, v: bool) -> Result<(), Self::Error> {\n self.write_type_tag(if v { \"t\" } else { \"f\" });\n Ok(())\n }\n\n fn serialize_i8(self, v: i8) -> Result<(), Self::Error> {\n self.write_type_tag(\"i1\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i16(self, v: i16) -> Result<(), Self::Error> {\n self.write_type_tag(\"i2\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i32(self, v: i32) -> Result<(), Self::Error> {\n self.write_type_tag(\"i4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i64(self, v: i64) -> Result<(), Self::Error> {\n self.write_type_tag(\"i8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u8(self, v: u8) -> Result<(), Self::Error> {\n self.write_type_tag(\"u1\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u16(self, v: u16) -> Result<(), Self::Error> {\n self.write_type_tag(\"u2\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u32(self, v: u32) -> Result<(), Self::Error> {\n self.write_type_tag(\"u4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u64(self, v: u64) -> Result<(), Self::Error> {\n self.write_type_tag(\"u8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_f32(self, v: f32) -> Result<(), Self::Error> {\n self.write_type_tag(\"f4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_f64(self, v: f64) -> Result<(), Self::Error> {\n self.write_type_tag(\"f8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_char(self, v: char) -> Result<(), Self::Error> {\n self.write_type_tag(\"c\");\n self.write_usize(v as usize);\n Ok(())\n }\n\n fn serialize_str(self, v: &str) -> Result<(), Self::Error> {\n self.write_type_tag(\"s\");\n self.write_varlen_bytes(v.as_bytes());\n Ok(())\n }\n\n fn serialize_bytes(self, v: &[u8]) -> Result<(), Self::Error> {\n self.write_type_tag(\"b\");\n self.write_varlen_bytes(v);\n Ok(())\n }\n\n fn serialize_none(self) -> Result<(), Self::Error> {\n self.write_type_tag(\"\");\n Ok(())\n }\n\n fn serialize_some(self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(self)\n }\n\n fn serialize_unit(self) -> Result<(), Self::Error> {\n self.write_type_tag(\"()\");\n Ok(())\n }\n\n fn serialize_unit_struct(self, name: &'static str) -> Result<(), Self::Error> {\n self.write_type_tag(\"US\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(())\n }\n\n fn serialize_unit_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n ) -> Result<(), Self::Error> {\n self.write_type_tag(\"UV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(())\n }\n\n fn serialize_newtype_struct(self, name: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.write_type_tag(\"NS\");\n self.write_varlen_bytes(name.as_bytes());\n value.serialize(self)\n }\n\n fn serialize_newtype_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n value: &T,\n ) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.write_type_tag(\"NV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n value.serialize(self)\n }\n\n fn serialize_seq(self, _len: Option) -> Result {\n self.write_type_tag(\"L\");\n Ok(self)\n }\n\n fn serialize_tuple(self, _len: usize) -> Result {\n self.write_type_tag(\"T\");\n Ok(self)\n }\n\n fn serialize_tuple_struct(\n self,\n name: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"TS\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(self)\n }\n\n fn serialize_tuple_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"TV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(self)\n }\n\n fn serialize_map(self, _len: Option) -> Result {\n self.write_type_tag(\"M\");\n Ok(self)\n }\n\n fn serialize_struct(\n self,\n name: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"S\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(self)\n }\n\n fn serialize_struct_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"SV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(self)\n }\n}\n\nimpl SerializeSeq for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTuple for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTupleStruct for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTupleVariant for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeMap for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n key.serialize(&mut **self)\n }\n\n fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeStruct for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.hasher.update(key.as_bytes());\n self.hasher.update(b\"\\n\");\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeStructVariant for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.hasher.update(key.as_bytes());\n self.hasher.update(b\"\\n\");\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n"], ["/cocoindex/src/execution/stats.rs", "use crate::prelude::*;\n\nuse std::{\n ops::AddAssign,\n sync::atomic::{AtomicI64, Ordering::Relaxed},\n};\n\n#[derive(Default, Serialize)]\npub struct Counter(pub AtomicI64);\n\nimpl Counter {\n pub fn inc(&self, by: i64) {\n self.0.fetch_add(by, Relaxed);\n }\n\n pub fn get(&self) -> i64 {\n self.0.load(Relaxed)\n }\n\n pub fn delta(&self, base: &Self) -> Counter {\n Counter(AtomicI64::new(self.get() - base.get()))\n }\n\n pub fn into_inner(self) -> i64 {\n self.0.into_inner()\n }\n\n pub fn merge(&self, delta: &Self) {\n self.0.fetch_add(delta.get(), Relaxed);\n }\n}\n\nimpl AddAssign for Counter {\n fn add_assign(&mut self, rhs: Self) {\n self.0.fetch_add(rhs.into_inner(), Relaxed);\n }\n}\n\nimpl Clone for Counter {\n fn clone(&self) -> Self {\n Self(AtomicI64::new(self.get()))\n }\n}\n\nimpl std::fmt::Display for Counter {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.get())\n }\n}\n\nimpl std::fmt::Debug for Counter {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.get())\n }\n}\n\n#[derive(Debug, Serialize, Default, Clone)]\npub struct UpdateStats {\n pub num_no_change: Counter,\n pub num_insertions: Counter,\n pub num_deletions: Counter,\n /// Number of source rows that were updated.\n pub num_updates: Counter,\n /// Number of source rows that were reprocessed because of logic change.\n pub num_reprocesses: Counter,\n pub num_errors: Counter,\n}\n\nimpl UpdateStats {\n pub fn delta(&self, base: &Self) -> Self {\n UpdateStats {\n num_no_change: self.num_no_change.delta(&base.num_no_change),\n num_insertions: self.num_insertions.delta(&base.num_insertions),\n num_deletions: self.num_deletions.delta(&base.num_deletions),\n num_updates: self.num_updates.delta(&base.num_updates),\n num_reprocesses: self.num_reprocesses.delta(&base.num_reprocesses),\n num_errors: self.num_errors.delta(&base.num_errors),\n }\n }\n\n pub fn merge(&self, delta: &Self) {\n self.num_no_change.merge(&delta.num_no_change);\n self.num_insertions.merge(&delta.num_insertions);\n self.num_deletions.merge(&delta.num_deletions);\n self.num_updates.merge(&delta.num_updates);\n self.num_reprocesses.merge(&delta.num_reprocesses);\n self.num_errors.merge(&delta.num_errors);\n }\n\n pub fn has_any_change(&self) -> bool {\n self.num_insertions.get() > 0\n || self.num_deletions.get() > 0\n || self.num_updates.get() > 0\n || self.num_reprocesses.get() > 0\n || self.num_errors.get() > 0\n }\n}\n\nimpl std::fmt::Display for UpdateStats {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let mut messages = Vec::new();\n let num_errors = self.num_errors.get();\n if num_errors > 0 {\n messages.push(format!(\"{num_errors} source rows FAILED\"));\n }\n\n let num_skipped = self.num_no_change.get();\n if num_skipped > 0 {\n messages.push(format!(\"{num_skipped} source rows NO CHANGE\"));\n }\n\n let num_insertions = self.num_insertions.get();\n let num_deletions = self.num_deletions.get();\n let num_updates = self.num_updates.get();\n let num_reprocesses = self.num_reprocesses.get();\n let num_source_rows = num_insertions + num_deletions + num_updates + num_reprocesses;\n if num_source_rows > 0 {\n messages.push(format!(\n \"{num_source_rows} source rows processed ({num_insertions} ADDED, {num_deletions} REMOVED, {num_updates} UPDATED, {num_reprocesses} REPROCESSED on flow change)\",\n ));\n }\n\n if !messages.is_empty() {\n write!(f, \"{}\", messages.join(\"; \"))?;\n } else {\n write!(f, \"No changes\")?;\n }\n\n Ok(())\n }\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceUpdateInfo {\n pub source_name: String,\n pub stats: UpdateStats,\n}\n\nimpl std::fmt::Display for SourceUpdateInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.source_name, self.stats)\n }\n}\n\n#[derive(Debug, Serialize)]\npub struct IndexUpdateInfo {\n pub sources: Vec,\n}\n\nimpl std::fmt::Display for IndexUpdateInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n for source in self.sources.iter() {\n writeln!(f, \"{source}\")?;\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/utils/retryable.rs", "use log::trace;\nuse std::{future::Future, time::Duration};\n\npub trait IsRetryable {\n fn is_retryable(&self) -> bool;\n}\n\npub struct Error {\n error: anyhow::Error,\n is_retryable: bool,\n}\n\nimpl std::fmt::Display for Error {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Display::fmt(&self.error, f)\n }\n}\n\nimpl std::fmt::Debug for Error {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Debug::fmt(&self.error, f)\n }\n}\n\nimpl IsRetryable for Error {\n fn is_retryable(&self) -> bool {\n self.is_retryable\n }\n}\n\nimpl IsRetryable for reqwest::Error {\n fn is_retryable(&self) -> bool {\n self.status() == Some(reqwest::StatusCode::TOO_MANY_REQUESTS)\n }\n}\n\nimpl Error {\n pub fn always_retryable(error: anyhow::Error) -> Self {\n Self {\n error,\n is_retryable: true,\n }\n }\n}\n\nimpl From for Error {\n fn from(error: anyhow::Error) -> Self {\n Self {\n error,\n is_retryable: false,\n }\n }\n}\n\nimpl From for anyhow::Error {\n fn from(val: Error) -> Self {\n val.error\n }\n}\n\nimpl From for Error {\n fn from(error: E) -> Self {\n Self {\n is_retryable: error.is_retryable(),\n error: anyhow::Error::new(error),\n }\n }\n}\n\npub type Result = std::result::Result;\n\n#[allow(non_snake_case)]\npub fn Ok(value: T) -> Result {\n Result::Ok(value)\n}\n\npub struct RetryOptions {\n pub max_retries: Option,\n pub initial_backoff: Duration,\n pub max_backoff: Duration,\n}\n\nimpl Default for RetryOptions {\n fn default() -> Self {\n Self {\n max_retries: Some(10),\n initial_backoff: Duration::from_millis(100),\n max_backoff: Duration::from_secs(10),\n }\n }\n}\n\npub static HEAVY_LOADED_OPTIONS: RetryOptions = RetryOptions {\n max_retries: Some(10),\n initial_backoff: Duration::from_secs(1),\n max_backoff: Duration::from_secs(60),\n};\n\npub async fn run<\n Ok,\n Err: std::fmt::Display + IsRetryable,\n Fut: Future>,\n F: Fn() -> Fut,\n>(\n f: F,\n options: &RetryOptions,\n) -> Result {\n let mut retries = 0;\n let mut backoff = options.initial_backoff;\n\n loop {\n match f().await {\n Result::Ok(result) => return Result::Ok(result),\n Result::Err(err) => {\n if !err.is_retryable()\n || options\n .max_retries\n .is_some_and(|max_retries| retries >= max_retries)\n {\n return Result::Err(err);\n }\n retries += 1;\n trace!(\n \"Will retry #{} in {}ms for error: {}\",\n retries,\n backoff.as_millis(),\n err\n );\n tokio::time::sleep(backoff).await;\n if backoff < options.max_backoff {\n backoff = std::cmp::min(\n Duration::from_micros(\n (backoff.as_micros() * rand::random_range(1618..=2000) / 1000) as u64,\n ),\n options.max_backoff,\n );\n }\n }\n }\n }\n}\n"], ["/cocoindex/src/setup/auth_registry.rs", "use std::collections::hash_map;\n\nuse crate::prelude::*;\n\npub struct AuthRegistry {\n entries: RwLock>,\n}\n\nimpl Default for AuthRegistry {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl AuthRegistry {\n pub fn new() -> Self {\n Self {\n entries: RwLock::new(HashMap::new()),\n }\n }\n\n pub fn add(&self, key: String, value: serde_json::Value) -> Result<()> {\n let mut entries = self.entries.write().unwrap();\n match entries.entry(key) {\n hash_map::Entry::Occupied(entry) => {\n api_bail!(\"Auth entry already exists: {}\", entry.key());\n }\n hash_map::Entry::Vacant(entry) => {\n entry.insert(value);\n }\n }\n Ok(())\n }\n\n pub fn get(&self, entry_ref: &spec::AuthEntryReference) -> Result {\n let entries = self.entries.read().unwrap();\n match entries.get(&entry_ref.key) {\n Some(value) => Ok(serde_json::from_value(value.clone())?),\n None => api_bail!(\n \"Auth entry `{key}` not found.\\n\\\n Hint: If you're not referencing `{key}` in your flow, it will likely be caused by a previously persisted target using it. \\\n You need to bring back the definition for the auth entry `{key}`, so that CocoIndex will be able to do a cleanup in the next `setup` run. \\\n See https://cocoindex.io/docs/core/flow_def#auth-registry for more details.\",\n key = entry_ref.key\n ),\n }\n }\n}\n"], ["/cocoindex/src/base/duration.rs", "use std::f64;\n\nuse anyhow::{Result, anyhow, bail};\nuse chrono::Duration;\n\n/// Parses a string of number-unit pairs into a vector of (number, unit),\n/// ensuring units are among the allowed ones.\nfn parse_components(\n s: &str,\n allowed_units: &[char],\n original_input: &str,\n) -> Result> {\n let mut result = Vec::new();\n let mut iter = s.chars().peekable();\n while iter.peek().is_some() {\n let mut num_str = String::new();\n let mut has_decimal = false;\n\n // Parse digits and optional decimal point\n while let Some(&c) = iter.peek() {\n if c.is_ascii_digit() || (c == '.' && !has_decimal) {\n if c == '.' {\n has_decimal = true;\n }\n num_str.push(iter.next().unwrap());\n } else {\n break;\n }\n }\n if num_str.is_empty() {\n bail!(\"Expected number in: {}\", original_input);\n }\n let num = num_str\n .parse::()\n .map_err(|_| anyhow!(\"Invalid number '{}' in: {}\", num_str, original_input))?;\n if let Some(&unit) = iter.peek() {\n if allowed_units.contains(&unit) {\n result.push((num, unit));\n iter.next();\n } else {\n bail!(\"Invalid unit '{}' in: {}\", unit, original_input);\n }\n } else {\n bail!(\n \"Missing unit after number '{}' in: {}\",\n num_str,\n original_input\n );\n }\n }\n Ok(result)\n}\n\n/// Parses an ISO 8601 duration string into a `chrono::Duration`.\nfn parse_iso8601_duration(s: &str, original_input: &str) -> Result {\n let (is_negative, s_after_sign) = if let Some(stripped) = s.strip_prefix('-') {\n (true, stripped)\n } else {\n (false, s)\n };\n\n if !s_after_sign.starts_with('P') {\n bail!(\"Duration must start with 'P' in: {}\", original_input);\n }\n let s_after_p = &s_after_sign[1..];\n\n let (date_part, time_part) = if let Some(pos) = s_after_p.find('T') {\n (&s_after_p[..pos], Some(&s_after_p[pos + 1..]))\n } else {\n (s_after_p, None)\n };\n\n // Date components (Y, M, W, D)\n let date_components = parse_components(date_part, &['Y', 'M', 'W', 'D'], original_input)?;\n\n // Time components (H, M, S)\n let time_components = if let Some(time_str) = time_part {\n let comps = parse_components(time_str, &['H', 'M', 'S'], original_input)?;\n if comps.is_empty() {\n bail!(\n \"Time part present but no time components in: {}\",\n original_input\n );\n }\n comps\n } else {\n vec![]\n };\n\n if date_components.is_empty() && time_components.is_empty() {\n bail!(\"No components in duration: {}\", original_input);\n }\n\n // Accumulate date duration\n let date_duration = date_components\n .iter()\n .fold(Duration::zero(), |acc, &(num, unit)| {\n let days = match unit {\n 'Y' => num * 365.0,\n 'M' => num * 30.0,\n 'W' => num * 7.0,\n 'D' => num,\n _ => unreachable!(\"Invalid date unit should be caught by prior validation\"),\n };\n let microseconds = (days * 86_400_000_000.0) as i64;\n acc + Duration::microseconds(microseconds)\n });\n\n // Accumulate time duration\n let time_duration =\n time_components\n .iter()\n .fold(Duration::zero(), |acc, &(num, unit)| match unit {\n 'H' => {\n let nanoseconds = (num * 3_600_000_000_000.0).round() as i64;\n acc + Duration::nanoseconds(nanoseconds)\n }\n 'M' => {\n let nanoseconds = (num * 60_000_000_000.0).round() as i64;\n acc + Duration::nanoseconds(nanoseconds)\n }\n 'S' => {\n let nanoseconds = (num.fract() * 1_000_000_000.0).round() as i64;\n acc + Duration::seconds(num as i64) + Duration::nanoseconds(nanoseconds)\n }\n _ => unreachable!(\"Invalid time unit should be caught by prior validation\"),\n });\n\n let mut total = date_duration + time_duration;\n if is_negative {\n total = -total;\n }\n\n Ok(total)\n}\n\n/// Parses a human-readable duration string into a `chrono::Duration`.\nfn parse_human_readable_duration(s: &str, original_input: &str) -> Result {\n let parts: Vec<&str> = s.split_whitespace().collect();\n if parts.is_empty() || parts.len() % 2 != 0 {\n bail!(\n \"Invalid human-readable duration format in: {}\",\n original_input\n );\n }\n\n let durations: Result> = parts\n .chunks(2)\n .map(|chunk| {\n let num: i64 = chunk[0]\n .parse()\n .map_err(|_| anyhow!(\"Invalid number '{}' in: {}\", chunk[0], original_input))?;\n\n match chunk[1].to_lowercase().as_str() {\n \"day\" | \"days\" => Ok(Duration::days(num)),\n \"hour\" | \"hours\" => Ok(Duration::hours(num)),\n \"minute\" | \"minutes\" => Ok(Duration::minutes(num)),\n \"second\" | \"seconds\" => Ok(Duration::seconds(num)),\n \"millisecond\" | \"milliseconds\" => Ok(Duration::milliseconds(num)),\n \"microsecond\" | \"microseconds\" => Ok(Duration::microseconds(num)),\n _ => bail!(\"Invalid unit '{}' in: {}\", chunk[1], original_input),\n }\n })\n .collect();\n\n durations.map(|durs| durs.into_iter().sum())\n}\n\n/// Parses a duration string into a `chrono::Duration`, trying ISO 8601 first, then human-readable format.\npub fn parse_duration(s: &str) -> Result {\n let original_input = s;\n let s = s.trim();\n if s.is_empty() {\n bail!(\"Empty duration string\");\n }\n\n let is_likely_iso8601 = match s.as_bytes() {\n [c, ..] if c.eq_ignore_ascii_case(&b'P') => true,\n [b'-', c, ..] if c.eq_ignore_ascii_case(&b'P') => true,\n _ => false,\n };\n\n if is_likely_iso8601 {\n parse_iso8601_duration(s, original_input)\n } else {\n parse_human_readable_duration(s, original_input)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n fn check_ok(res: Result, expected: Duration, input_str: &str) {\n match res {\n Ok(duration) => assert_eq!(duration, expected, \"Input: '{input_str}'\"),\n Err(e) => panic!(\"Input: '{input_str}', expected Ok({expected:?}), but got Err: {e}\"),\n }\n }\n\n fn check_err_contains(res: Result, expected_substring: &str, input_str: &str) {\n match res {\n Ok(d) => panic!(\n \"Input: '{input_str}', expected error containing '{expected_substring}', but got Ok({d:?})\"\n ),\n Err(e) => {\n let err_msg = e.to_string();\n assert!(\n err_msg.contains(expected_substring),\n \"Input: '{input_str}', error message '{err_msg}' does not contain expected substring '{expected_substring}'\"\n );\n }\n }\n }\n\n #[test]\n fn test_empty_string() {\n check_err_contains(parse_duration(\"\"), \"Empty duration string\", \"\\\"\\\"\");\n }\n\n #[test]\n fn test_whitespace_string() {\n check_err_contains(parse_duration(\" \"), \"Empty duration string\", \"\\\" \\\"\");\n }\n\n #[test]\n fn test_iso_just_p() {\n check_err_contains(parse_duration(\"P\"), \"No components in duration: P\", \"\\\"P\\\"\");\n }\n\n #[test]\n fn test_iso_pt() {\n check_err_contains(\n parse_duration(\"PT\"),\n \"Time part present but no time components in: PT\",\n \"\\\"PT\\\"\",\n );\n }\n\n #[test]\n fn test_iso_missing_number_before_unit_in_date_part() {\n check_err_contains(parse_duration(\"PD\"), \"Expected number in: PD\", \"\\\"PD\\\"\");\n }\n #[test]\n fn test_iso_missing_number_before_unit_in_time_part() {\n check_err_contains(parse_duration(\"PTM\"), \"Expected number in: PTM\", \"\\\"PTM\\\"\");\n }\n\n #[test]\n fn test_iso_time_unit_without_t() {\n check_err_contains(parse_duration(\"P1H\"), \"Invalid unit 'H' in: P1H\", \"\\\"P1H\\\"\");\n check_err_contains(parse_duration(\"P1S\"), \"Invalid unit 'S' in: P1S\", \"\\\"P1S\\\"\");\n }\n\n #[test]\n fn test_iso_invalid_unit() {\n check_err_contains(parse_duration(\"P1X\"), \"Invalid unit 'X' in: P1X\", \"\\\"P1X\\\"\");\n check_err_contains(\n parse_duration(\"PT1X\"),\n \"Invalid unit 'X' in: PT1X\",\n \"\\\"PT1X\\\"\",\n );\n }\n\n #[test]\n fn test_iso_valid_lowercase_unit_is_not_allowed() {\n check_err_contains(\n parse_duration(\"p1h\"),\n \"Duration must start with 'P' in: p1h\",\n \"\\\"p1h\\\"\",\n );\n check_err_contains(\n parse_duration(\"PT1h\"),\n \"Invalid unit 'h' in: PT1h\",\n \"\\\"PT1h\\\"\",\n );\n }\n\n #[test]\n fn test_iso_trailing_number_error() {\n check_err_contains(\n parse_duration(\"P1D2\"),\n \"Missing unit after number '2' in: P1D2\",\n \"\\\"P1D2\\\"\",\n );\n }\n\n #[test]\n fn test_iso_invalid_fractional_format() {\n check_err_contains(\n parse_duration(\"PT1..5S\"),\n \"Invalid unit '.' in: PT1..5S\",\n \"\\\"PT1..5S\\\"\",\n );\n check_err_contains(\n parse_duration(\"PT1.5.5S\"),\n \"Invalid unit '.' in: PT1.5.5S\",\n \"\\\"PT1.5.5S\\\"\",\n );\n check_err_contains(\n parse_duration(\"P1..5D\"),\n \"Invalid unit '.' in: P1..5D\",\n \"\\\"P1..5D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_misplaced_t() {\n check_err_contains(\n parse_duration(\"P1DT2H T3M\"),\n \"Expected number in: P1DT2H T3M\",\n \"\\\"P1DT2H T3M\\\"\",\n );\n check_err_contains(\n parse_duration(\"P1T2H\"),\n \"Missing unit after number '1' in: P1T2H\",\n \"\\\"P1T2H\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_number_after_p() {\n check_err_contains(\n parse_duration(\"P-1D\"),\n \"Expected number in: P-1D\",\n \"\\\"P-1D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_valid_months() {\n check_ok(parse_duration(\"P1M\"), Duration::days(30), \"\\\"P1M\\\"\");\n check_ok(parse_duration(\" P13M\"), Duration::days(13 * 30), \"\\\"P13M\\\"\");\n }\n\n #[test]\n fn test_iso_valid_weeks() {\n check_ok(parse_duration(\"P1W\"), Duration::days(7), \"\\\"P1W\\\"\");\n check_ok(parse_duration(\" P1W \"), Duration::days(7), \"\\\"P1W\\\"\");\n }\n\n #[test]\n fn test_iso_valid_days() {\n check_ok(parse_duration(\"P1D\"), Duration::days(1), \"\\\"P1D\\\"\");\n }\n\n #[test]\n fn test_iso_valid_hours() {\n check_ok(parse_duration(\"PT2H\"), Duration::hours(2), \"\\\"PT2H\\\"\");\n }\n\n #[test]\n fn test_iso_valid_minutes() {\n check_ok(parse_duration(\"PT3M\"), Duration::minutes(3), \"\\\"PT3M\\\"\");\n }\n\n #[test]\n fn test_iso_valid_seconds() {\n check_ok(parse_duration(\"PT4S\"), Duration::seconds(4), \"\\\"PT4S\\\"\");\n }\n\n #[test]\n fn test_iso_combined_units() {\n check_ok(\n parse_duration(\"P1Y2M3W4DT5H6M7S\"),\n Duration::days(365 + 60 + 3 * 7 + 4)\n + Duration::hours(5)\n + Duration::minutes(6)\n + Duration::seconds(7),\n \"\\\"P1Y2M3DT4H5M6S\\\"\",\n );\n check_ok(\n parse_duration(\"P1DT2H3M4S\"),\n Duration::days(1) + Duration::hours(2) + Duration::minutes(3) + Duration::seconds(4),\n \"\\\"P1DT2H3M4S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_duplicated_unit() {\n check_ok(parse_duration(\"P1D1D\"), Duration::days(2), \"\\\"P1D1D\\\"\");\n check_ok(parse_duration(\"PT1H1H\"), Duration::hours(2), \"\\\"PT1H1H\\\"\");\n }\n\n #[test]\n fn test_iso_out_of_order_unit() {\n check_ok(\n parse_duration(\"P1W1Y\"),\n Duration::days(365 + 7),\n \"\\\"P1W1Y\\\"\",\n );\n check_ok(\n parse_duration(\"PT2S1H\"),\n Duration::hours(1) + Duration::seconds(2),\n \"\\\"PT2S1H\\\"\",\n );\n check_ok(parse_duration(\"P3M\"), Duration::days(90), \"\\\"PT2S1H\\\"\");\n check_ok(parse_duration(\"PT3M\"), Duration::minutes(3), \"\\\"PT2S1H\\\"\");\n check_err_contains(\n parse_duration(\"P1H2D\"),\n \"Invalid unit 'H' in: P1H2D\", // Time part without 'T' is invalid\n \"\\\"P1H2D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_duration_p1d() {\n check_ok(parse_duration(\"-P1D\"), -Duration::days(1), \"\\\"-P1D\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pd0() {\n check_ok(parse_duration(\"P0D\"), Duration::zero(), \"\\\"P0D\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pt0s() {\n check_ok(parse_duration(\"PT0S\"), Duration::zero(), \"\\\"PT0S\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pt0h0m0s() {\n check_ok(parse_duration(\"PT0H0M0S\"), Duration::zero(), \"\\\"PT0H0M0S\\\"\");\n }\n\n #[test]\n fn test_iso_fractional_seconds() {\n check_ok(\n parse_duration(\"PT1.5S\"),\n Duration::seconds(1) + Duration::milliseconds(500),\n \"\\\"PT1.5S\\\"\",\n );\n check_ok(\n parse_duration(\"PT441010.456123S\"),\n Duration::seconds(441010) + Duration::microseconds(456123),\n \"\\\"PT441010.456123S\\\"\",\n );\n check_ok(\n parse_duration(\"PT0.000001S\"),\n Duration::microseconds(1),\n \"\\\"PT0.000001S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_fractional_date_units() {\n check_ok(\n parse_duration(\"P1.5D\"),\n Duration::microseconds((1.5 * 86_400_000_000.0) as i64),\n \"\\\"P1.5D\\\"\",\n );\n check_ok(\n parse_duration(\"P1.25Y\"),\n Duration::microseconds((1.25 * 365.0 * 86_400_000_000.0) as i64),\n \"\\\"P1.25Y\\\"\",\n );\n check_ok(\n parse_duration(\"P2.75M\"),\n Duration::microseconds((2.75 * 30.0 * 86_400_000_000.0) as i64),\n \"\\\"P2.75M\\\"\",\n );\n check_ok(\n parse_duration(\"P0.5W\"),\n Duration::microseconds((0.5 * 7.0 * 86_400_000_000.0) as i64),\n \"\\\"P0.5W\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_fractional_date_units() {\n check_ok(\n parse_duration(\"-P1.5D\"),\n -Duration::microseconds((1.5 * 86_400_000_000.0) as i64),\n \"\\\"-P1.5D\\\"\",\n );\n check_ok(\n parse_duration(\"-P0.25Y\"),\n -Duration::microseconds((0.25 * 365.0 * 86_400_000_000.0) as i64),\n \"\\\"-P0.25Y\\\"\",\n );\n }\n\n #[test]\n fn test_iso_combined_fractional_units() {\n check_ok(\n parse_duration(\"P1.5DT2.5H3.5M4.5S\"),\n Duration::microseconds((1.5 * 86_400_000_000.0) as i64)\n + Duration::microseconds((2.5 * 3_600_000_000.0) as i64)\n + Duration::microseconds((3.5 * 60_000_000.0) as i64)\n + Duration::seconds(4)\n + Duration::milliseconds(500),\n \"\\\"1.5DT2.5H3.5M4.5S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_multiple_fractional_time_units() {\n check_ok(\n parse_duration(\"PT1.5S2.5S\"),\n Duration::seconds(1 + 2) + Duration::milliseconds(500) + Duration::milliseconds(500),\n \"\\\"PT1.5S2.5S\\\"\",\n );\n check_ok(\n parse_duration(\"PT1.1H2.2M3.3S\"),\n Duration::hours(1)\n + Duration::seconds((0.1 * 3600.0) as i64)\n + Duration::minutes(2)\n + Duration::seconds((0.2 * 60.0) as i64)\n + Duration::seconds(3)\n + Duration::milliseconds(300),\n \"\\\"PT1.1H2.2M3.3S\\\"\",\n );\n }\n\n // Human-readable Tests\n #[test]\n fn test_human_missing_unit() {\n check_err_contains(\n parse_duration(\"1\"),\n \"Invalid human-readable duration format in: 1\",\n \"\\\"1\\\"\",\n );\n }\n\n #[test]\n fn test_human_missing_number() {\n check_err_contains(\n parse_duration(\"day\"),\n \"Invalid human-readable duration format in: day\",\n \"\\\"day\\\"\",\n );\n }\n\n #[test]\n fn test_human_incomplete_pair() {\n check_err_contains(\n parse_duration(\"1 day 2\"),\n \"Invalid human-readable duration format in: 1 day 2\",\n \"\\\"1 day 2\\\"\",\n );\n }\n\n #[test]\n fn test_human_invalid_number_at_start() {\n check_err_contains(\n parse_duration(\"one day\"),\n \"Invalid number 'one' in: one day\",\n \"\\\"one day\\\"\",\n );\n }\n\n #[test]\n fn test_human_invalid_unit() {\n check_err_contains(\n parse_duration(\"1 hour 2 minutes 3 seconds four seconds\"),\n \"Invalid number 'four' in: 1 hour 2 minutes 3 seconds four seconds\",\n \"\\\"1 hour 2 minutes 3 seconds four seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_float_number_fail() {\n check_err_contains(\n parse_duration(\"1.5 hours\"),\n \"Invalid number '1.5' in: 1.5 hours\",\n \"\\\"1.5 hours\\\"\",\n );\n }\n\n #[test]\n fn test_invalid_human_readable_no_pairs() {\n check_err_contains(\n parse_duration(\"just some words\"),\n \"Invalid human-readable duration format in: just some words\",\n \"\\\"just some words\\\"\",\n );\n }\n\n #[test]\n fn test_human_unknown_unit() {\n check_err_contains(\n parse_duration(\"1 year\"),\n \"Invalid unit 'year' in: 1 year\",\n \"\\\"1 year\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_day() {\n check_ok(parse_duration(\"1 day\"), Duration::days(1), \"\\\"1 day\\\"\");\n }\n\n #[test]\n fn test_human_valid_days_uppercase() {\n check_ok(parse_duration(\"2 DAYS\"), Duration::days(2), \"\\\"2 DAYS\\\"\");\n }\n\n #[test]\n fn test_human_valid_hour() {\n check_ok(parse_duration(\"3 hour\"), Duration::hours(3), \"\\\"3 hour\\\"\");\n }\n\n #[test]\n fn test_human_valid_hours_mixedcase() {\n check_ok(parse_duration(\"4 HoUrS\"), Duration::hours(4), \"\\\"4 HoUrS\\\"\");\n }\n\n #[test]\n fn test_human_valid_minute() {\n check_ok(\n parse_duration(\"5 minute\"),\n Duration::minutes(5),\n \"\\\"5 minute\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_minutes() {\n check_ok(\n parse_duration(\"6 minutes\"),\n Duration::minutes(6),\n \"\\\"6 minutes\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_second() {\n check_ok(\n parse_duration(\"7 second\"),\n Duration::seconds(7),\n \"\\\"7 second\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_seconds() {\n check_ok(\n parse_duration(\"8 seconds\"),\n Duration::seconds(8),\n \"\\\"8 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_millisecond() {\n check_ok(\n parse_duration(\"9 millisecond\"),\n Duration::milliseconds(9),\n \"\\\"9 millisecond\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_milliseconds() {\n check_ok(\n parse_duration(\"10 milliseconds\"),\n Duration::milliseconds(10),\n \"\\\"10 milliseconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_microsecond() {\n check_ok(\n parse_duration(\"11 microsecond\"),\n Duration::microseconds(11),\n \"\\\"11 microsecond\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_microseconds() {\n check_ok(\n parse_duration(\"12 microseconds\"),\n Duration::microseconds(12),\n \"\\\"12 microseconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_combined() {\n let expected =\n Duration::days(1) + Duration::hours(2) + Duration::minutes(3) + Duration::seconds(4);\n check_ok(\n parse_duration(\"1 day 2 hours 3 minutes 4 seconds\"),\n expected,\n \"\\\"1 day 2 hours 3 minutes 4 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_out_of_order() {\n check_ok(\n parse_duration(\"1 second 2 hours\"),\n Duration::hours(2) + Duration::seconds(1),\n \"\\\"1 second 2 hours\\\"\",\n );\n check_ok(\n parse_duration(\"7 minutes 6 hours 5 days\"),\n Duration::days(5) + Duration::hours(6) + Duration::minutes(7),\n \"\\\"7 minutes 6 hours 5 days\\\"\",\n )\n }\n\n #[test]\n fn test_human_zero_duration_seconds() {\n check_ok(\n parse_duration(\"0 seconds\"),\n Duration::zero(),\n \"\\\"0 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_zero_duration_days_hours() {\n check_ok(\n parse_duration(\"0 day 0 hour\"),\n Duration::zero(),\n \"\\\"0 day 0 hour\\\"\",\n );\n }\n\n #[test]\n fn test_human_zero_duration_multiple_zeros() {\n check_ok(\n parse_duration(\"0 days 0 hours 0 minutes 0 seconds\"),\n Duration::zero(),\n \"\\\"0 days 0 hours 0 minutes 0 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_no_space_between_num_unit() {\n check_err_contains(\n parse_duration(\"1day\"),\n \"Invalid human-readable duration format in: 1day\",\n \"\\\"1day\\\"\",\n );\n }\n\n #[test]\n fn test_human_trimmed() {\n check_ok(parse_duration(\" 1 day \"), Duration::days(1), \"\\\" 1 day \\\"\");\n }\n\n #[test]\n fn test_human_extra_whitespace() {\n check_ok(\n parse_duration(\" 1 day 2 hours \"),\n Duration::days(1) + Duration::hours(2),\n \"\\\" 1 day 2 hours \\\"\",\n );\n }\n\n #[test]\n fn test_human_negative_numbers() {\n check_ok(\n parse_duration(\"-1 day 2 hours\"),\n Duration::days(-1) + Duration::hours(2),\n \"\\\"-1 day 2 hours\\\"\",\n );\n check_ok(\n parse_duration(\"1 day -2 hours\"),\n Duration::days(1) + Duration::hours(-2),\n \"\\\"1 day -2 hours\\\"\",\n );\n }\n}\n"], ["/cocoindex/src/ops/registration.rs", "use super::{\n factory_bases::*, functions, registry::ExecutorFactoryRegistry, sdk::ExecutorFactory, sources,\n targets,\n};\nuse anyhow::Result;\nuse std::sync::{LazyLock, RwLock};\n\nfn register_executor_factories(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n let reqwest_client = reqwest::Client::new();\n\n sources::local_file::Factory.register(registry)?;\n sources::google_drive::Factory.register(registry)?;\n sources::amazon_s3::Factory.register(registry)?;\n sources::azure_blob::Factory.register(registry)?;\n\n functions::parse_json::Factory.register(registry)?;\n functions::split_recursively::register(registry)?;\n functions::extract_by_llm::Factory.register(registry)?;\n functions::embed_text::register(registry)?;\n\n targets::postgres::Factory::default().register(registry)?;\n targets::qdrant::register(registry)?;\n targets::kuzu::register(registry, reqwest_client)?;\n\n targets::neo4j::Factory::new().register(registry)?;\n\n Ok(())\n}\n\nstatic EXECUTOR_FACTORY_REGISTRY: LazyLock> = LazyLock::new(|| {\n let mut registry = ExecutorFactoryRegistry::new();\n register_executor_factories(&mut registry).expect(\"Failed to register executor factories\");\n RwLock::new(registry)\n});\n\npub fn get_optional_executor_factory(kind: &str) -> Option {\n let registry = EXECUTOR_FACTORY_REGISTRY.read().unwrap();\n registry.get(kind).cloned()\n}\n\npub fn get_executor_factory(kind: &str) -> Result {\n get_optional_executor_factory(kind)\n .ok_or_else(|| anyhow::anyhow!(\"Executor factory not found for op kind: {}\", kind))\n}\n\npub fn register_factory(name: String, factory: ExecutorFactory) -> Result<()> {\n let mut registry = EXECUTOR_FACTORY_REGISTRY.write().unwrap();\n registry.register(name, factory)\n}\n"], ["/cocoindex/src/prelude.rs", "#![allow(unused_imports)]\n\npub(crate) use anyhow::{Context, Result};\npub(crate) use async_trait::async_trait;\npub(crate) use chrono::{DateTime, Utc};\npub(crate) use futures::{FutureExt, StreamExt};\npub(crate) use futures::{\n future::{BoxFuture, Shared},\n prelude::*,\n stream::BoxStream,\n};\npub(crate) use indexmap::{IndexMap, IndexSet};\npub(crate) use itertools::Itertools;\npub(crate) use serde::{Deserialize, Serialize, de::DeserializeOwned};\npub(crate) use std::any::Any;\npub(crate) use std::borrow::Cow;\npub(crate) use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};\npub(crate) use std::hash::Hash;\npub(crate) use std::sync::{Arc, LazyLock, Mutex, OnceLock, RwLock, Weak};\n\npub(crate) use crate::base::{self, schema, spec, value};\npub(crate) use crate::builder::{self, exec_ctx, plan};\npub(crate) use crate::execution;\npub(crate) use crate::lib_context::{FlowContext, LibContext, get_lib_context, get_runtime};\npub(crate) use crate::ops::interface;\npub(crate) use crate::service::error::{ApiError, invariance_violation};\npub(crate) use crate::setup;\npub(crate) use crate::setup::AuthRegistry;\npub(crate) use crate::utils::{self, concur_control, retryable};\npub(crate) use crate::{api_bail, api_error};\n\npub(crate) use anyhow::{anyhow, bail};\npub(crate) use async_stream::{stream, try_stream};\npub(crate) use log::{debug, error, info, trace, warn};\n\npub(crate) use derivative::Derivative;\n"], ["/cocoindex/src/settings.rs", "use serde::Deserialize;\n\n#[derive(Deserialize, Debug)]\npub struct DatabaseConnectionSpec {\n pub url: String,\n pub user: Option,\n pub password: Option,\n}\n\n#[derive(Deserialize, Debug, Default)]\npub struct GlobalExecutionOptions {\n pub source_max_inflight_rows: Option,\n pub source_max_inflight_bytes: Option,\n}\n\n#[derive(Deserialize, Debug, Default)]\npub struct Settings {\n #[serde(default)]\n pub database: Option,\n #[serde(default)]\n #[allow(dead_code)] // Used via serialization/deserialization to Python\n pub app_namespace: String,\n #[serde(default)]\n pub global_execution_options: GlobalExecutionOptions,\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_settings_deserialize_with_database() {\n let json = r#\"{\n \"database\": {\n \"url\": \"postgresql://localhost:5432/test\",\n \"user\": \"testuser\",\n \"password\": \"testpass\"\n },\n \"app_namespace\": \"test_app\"\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_some());\n let db = settings.database.unwrap();\n assert_eq!(db.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db.user, Some(\"testuser\".to_string()));\n assert_eq!(db.password, Some(\"testpass\".to_string()));\n assert_eq!(settings.app_namespace, \"test_app\");\n }\n\n #[test]\n fn test_settings_deserialize_without_database() {\n let json = r#\"{\n \"app_namespace\": \"test_app\"\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_none());\n assert_eq!(settings.app_namespace, \"test_app\");\n }\n\n #[test]\n fn test_settings_deserialize_empty_object() {\n let json = r#\"{}\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_none());\n assert_eq!(settings.app_namespace, \"\");\n }\n\n #[test]\n fn test_settings_deserialize_database_without_user_password() {\n let json = r#\"{\n \"database\": {\n \"url\": \"postgresql://localhost:5432/test\"\n }\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_some());\n let db = settings.database.unwrap();\n assert_eq!(db.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db.user, None);\n assert_eq!(db.password, None);\n assert_eq!(settings.app_namespace, \"\");\n }\n\n #[test]\n fn test_database_connection_spec_deserialize() {\n let json = r#\"{\n \"url\": \"postgresql://localhost:5432/test\",\n \"user\": \"testuser\",\n \"password\": \"testpass\"\n }\"#;\n\n let db_spec: DatabaseConnectionSpec = serde_json::from_str(json).unwrap();\n\n assert_eq!(db_spec.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db_spec.user, Some(\"testuser\".to_string()));\n assert_eq!(db_spec.password, Some(\"testpass\".to_string()));\n }\n}\n"], ["/cocoindex/src/utils/db.rs", "#[derive(Debug, Clone, PartialEq, Eq)]\npub struct ValidIdentifier(pub String);\n\nimpl TryFrom for ValidIdentifier {\n type Error = anyhow::Error;\n\n fn try_from(s: String) -> Result {\n if !s.is_empty() && s.chars().all(|c| c.is_alphanumeric() || c == '_') {\n Ok(ValidIdentifier(s))\n } else {\n Err(anyhow::anyhow!(\"Invalid identifier: {s:?}\"))\n }\n }\n}\n\nimpl std::fmt::Display for ValidIdentifier {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Display::fmt(&self.0, f)\n }\n}\n\nimpl std::ops::Deref for ValidIdentifier {\n type Target = String;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\npub enum WriteAction {\n Insert,\n Update,\n}\n\npub fn sanitize_identifier(s: &str) -> String {\n let mut result = String::new();\n for c in s.chars() {\n if c.is_alphanumeric() || c == '_' {\n result.push(c);\n } else {\n result.push_str(\"__\");\n }\n }\n result\n}\n"], ["/cocoindex/src/utils/immutable.rs", "#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]\npub enum RefList<'a, T> {\n #[default]\n Nil,\n\n Cons(T, &'a RefList<'a, T>),\n}\n\nimpl<'a, T> RefList<'a, T> {\n pub fn prepend(&'a self, head: T) -> Self {\n Self::Cons(head, self)\n }\n\n pub fn iter(&'a self) -> impl Iterator {\n self\n }\n\n pub fn head(&'a self) -> Option<&'a T> {\n match self {\n RefList::Nil => None,\n RefList::Cons(head, _) => Some(head),\n }\n }\n\n pub fn headn(&'a self, n: usize) -> Option<&'a T> {\n match self {\n RefList::Nil => None,\n RefList::Cons(head, tail) => {\n if n == 0 {\n Some(head)\n } else {\n tail.headn(n - 1)\n }\n }\n }\n }\n\n pub fn tail(&'a self) -> Option<&'a RefList<'a, T>> {\n match self {\n RefList::Nil => None,\n RefList::Cons(_, tail) => Some(tail),\n }\n }\n\n pub fn tailn(&'a self, n: usize) -> Option<&'a RefList<'a, T>> {\n if n == 0 {\n Some(self)\n } else {\n match self {\n RefList::Nil => None,\n RefList::Cons(_, tail) => tail.tailn(n - 1),\n }\n }\n }\n}\n\nimpl<'a, T> Iterator for &'a RefList<'a, T> {\n type Item = &'a T;\n\n fn next(&mut self) -> Option {\n let current = *self;\n match current {\n RefList::Nil => None,\n RefList::Cons(head, tail) => {\n *self = *tail;\n Some(head)\n }\n }\n }\n}\n"], ["/cocoindex/src/llm/litellm.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_litellm(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"http://127.0.0.1:4000\".to_string());\n let api_key = std::env::var(\"LITELLM_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/llm/vllm.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_vllm(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"http://127.0.0.1:8000/v1\".to_string());\n let api_key = std::env::var(\"VLLM_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/llm/openrouter.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_openrouter(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"https://openrouter.ai/api/v1\".to_string());\n let api_key = std::env::var(\"OPENROUTER_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/ops/registry.rs", "use super::interface::ExecutorFactory;\nuse anyhow::Result;\nuse std::collections::HashMap;\n\npub struct ExecutorFactoryRegistry {\n factories: HashMap,\n}\n\nimpl Default for ExecutorFactoryRegistry {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl ExecutorFactoryRegistry {\n pub fn new() -> Self {\n Self {\n factories: HashMap::new(),\n }\n }\n\n pub fn register(&mut self, name: String, factory: ExecutorFactory) -> Result<()> {\n match self.factories.entry(name) {\n std::collections::hash_map::Entry::Occupied(entry) => Err(anyhow::anyhow!(\n \"Factory with name already exists: {}\",\n entry.key()\n )),\n std::collections::hash_map::Entry::Vacant(entry) => {\n entry.insert(factory);\n Ok(())\n }\n }\n }\n\n pub fn get(&self, name: &str) -> Option<&ExecutorFactory> {\n self.factories.get(name)\n }\n}\n"], ["/cocoindex/src/builder/mod.rs", "pub mod analyzer;\npub mod exec_ctx;\npub mod flow_builder;\npub mod plan;\n\nmod analyzed_flow;\n\npub use analyzed_flow::AnalyzedFlow;\npub use analyzed_flow::AnalyzedTransientFlow;\n"], ["/cocoindex/src/execution/mod.rs", "pub(crate) mod db_tracking_setup;\npub(crate) mod dumper;\npub(crate) mod evaluator;\npub(crate) mod indexing_status;\npub(crate) mod memoization;\npub(crate) mod row_indexer;\npub(crate) mod source_indexer;\npub(crate) mod stats;\n\nmod live_updater;\npub(crate) use live_updater::*;\n\nmod db_tracking;\n"], ["/cocoindex/src/lib.rs", "mod base;\nmod builder;\nmod execution;\nmod lib_context;\nmod llm;\nmod ops;\nmod prelude;\nmod py;\nmod server;\nmod service;\nmod settings;\nmod setup;\nmod utils;\n"], ["/cocoindex/src/base/field_attrs.rs", "use const_format::concatcp;\n\npub static COCOINDEX_PREFIX: &str = \"cocoindex.io/\";\n\n/// Present for bytes and str. It points to fields that represents the original file name for the data.\n/// Type: AnalyzedValueMapping\npub static CONTENT_FILENAME: &str = concatcp!(COCOINDEX_PREFIX, \"content_filename\");\n\n/// Present for bytes and str. It points to fields that represents mime types for the data.\n/// Type: AnalyzedValueMapping\npub static CONTENT_MIME_TYPE: &str = concatcp!(COCOINDEX_PREFIX, \"content_mime_type\");\n\n/// Present for chunks. It points to fields that the chunks are for.\n/// Type: AnalyzedValueMapping\npub static CHUNK_BASE_TEXT: &str = concatcp!(COCOINDEX_PREFIX, \"chunk_base_text\");\n\n/// Base text for an embedding vector.\npub static _EMBEDDING_ORIGIN_TEXT: &str = concatcp!(COCOINDEX_PREFIX, \"embedding_origin_text\");\n"], ["/cocoindex/src/base/mod.rs", "pub mod duration;\npub mod field_attrs;\npub mod json_schema;\npub mod schema;\npub mod spec;\npub mod value;\n"], ["/cocoindex/src/utils/mod.rs", "pub mod concur_control;\npub mod db;\npub mod fingerprint;\npub mod immutable;\npub mod retryable;\npub mod yaml_ser;\n"], ["/cocoindex/src/ops/mod.rs", "pub mod interface;\npub mod registry;\n\n// All operations\nmod factory_bases;\nmod functions;\nmod sources;\nmod targets;\n\nmod registration;\npub(crate) use registration::*;\npub(crate) mod py_factory;\n\n// SDK is used for help registration for operations.\nmod sdk;\n"], ["/cocoindex/src/setup/mod.rs", "mod auth_registry;\nmod db_metadata;\nmod driver;\nmod states;\n\npub mod components;\n\npub use auth_registry::AuthRegistry;\npub use driver::*;\npub use states::*;\n"], ["/cocoindex/src/ops/targets/shared/mod.rs", "pub mod property_graph;\npub mod table_columns;\n"], ["/cocoindex/src/ops/functions/mod.rs", "pub mod embed_text;\npub mod extract_by_llm;\npub mod parse_json;\npub mod split_recursively;\n\n#[cfg(test)]\nmod test_utils;\n"], ["/cocoindex/src/service/mod.rs", "pub(crate) mod error;\npub(crate) mod flows;\n"], ["/cocoindex/src/ops/sources/mod.rs", "pub mod amazon_s3;\npub mod azure_blob;\npub mod google_drive;\npub mod local_file;\n"], ["/cocoindex/src/ops/targets/mod.rs", "mod shared;\n\npub mod kuzu;\npub mod neo4j;\npub mod postgres;\npub mod qdrant;\n"]], "task_instance_info": {"created_time": "2025-08-21 01:30:28", "created_task_model": "DeepSeek-R1", "function_skeleton": "pub async fn get_flow (\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {}", "code_description": "This code segment functions to: Retrieve configuration details and metadata for a specified data processing pipeline, generate a unique identifier by combining structural elements and schema information, and return a composite response containing both the original specifications and computed verification data."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```rust\n[TASK_BEGIN]\npub async fn get_flow(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n Ok(Json(GetFlowResponse {\n flow_spec: flow_ctx.flow.flow_instance.spec.clone(),\n data_schema: flow_ctx.flow.data_schema.clone(),\n fingerprint: flow_ctx.fingerprint.clone(),\n }))\n}\n[TASK_END]\n```", "inference_time": "2025-08-21 01-30-53"}, "editdistance_info": {"edit_distance": 40.3409, "calculate_time": "2025-08-21 01:30:53", "true_code_clean": "pub async fn get_flow(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let flow_spec = flow_ctx.flow.flow_instance.clone();\n let data_schema = flow_ctx.flow.data_schema.clone();\n let fingerprint = utils::fingerprint::Fingerprinter::default()\n .with(&flow_spec)\n .map_err(|e| api_error!(\"failed to fingerprint flow spec: {e}\"))?\n .with(&data_schema)\n .map_err(|e| api_error!(\"failed to fingerprint data schema: {e}\"))?\n .into_fingerprint();\n Ok(Json(GetFlowResponse {\n flow_spec,\n data_schema,\n fingerprint,\n }))\n}", "predict_code_clean": "pub async fn get_flow(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n Ok(Json(GetFlowResponse {\n flow_spec: flow_ctx.flow.flow_instance.spec.clone(),\n data_schema: flow_ctx.flow.data_schema.clone(),\n fingerprint: flow_ctx.fingerprint.clone(),\n }))\n}"}} {"repo_name": "cocoindex", "file_name": "/cocoindex/src/llm/mod.rs", "inference_info": {"prefix_code": "use crate::prelude::*;\n\nuse crate::base::json_schema::ToJsonSchemaOptions;\nuse infer::Infer;\nuse schemars::schema::SchemaObject;\nuse std::borrow::Cow;\n\nstatic INFER: LazyLock = LazyLock::new(Infer::new);\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub enum LlmApiType {\n Ollama,\n OpenAi,\n Gemini,\n Anthropic,\n LiteLlm,\n OpenRouter,\n Voyage,\n Vllm,\n VertexAi,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct VertexAiConfig {\n pub project: String,\n pub region: Option,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum LlmApiConfig {\n VertexAi(VertexAiConfig),\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct LlmSpec {\n pub api_type: LlmApiType,\n pub address: Option,\n pub model: String,\n pub api_config: Option,\n}\n\n#[derive(Debug)]\npub enum OutputFormat<'a> {\n JsonSchema {\n name: Cow<'a, str>,\n schema: Cow<'a, SchemaObject>,\n },\n}\n\n#[derive(Debug)]\npub struct LlmGenerateRequest<'a> {\n pub model: &'a str,\n pub system_prompt: Option>,\n pub user_prompt: Cow<'a, str>,\n pub image: Option>,\n pub output_format: Option>,\n}\n\n#[derive(Debug)]\npub struct LlmGenerateResponse {\n pub text: String,\n}\n\n#[async_trait]\npub trait LlmGenerationClient: Send + Sync {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result;\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions;\n}\n\n#[derive(Debug)]\npub struct LlmEmbeddingRequest<'a> {\n pub model: &'a str,\n pub text: Cow<'a, str>,\n pub output_dimension: Option,\n pub task_type: Option>,\n}\n\npub struct LlmEmbeddingResponse {\n pub embedding: Vec,\n}\n\n#[async_trait]\npub trait LlmEmbeddingClient: Send + Sync {\n async fn embed_text<'req>(\n &self,\n request: LlmEmbeddingRequest<'req>,\n ) -> Result;\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option;\n}\n\nmod anthropic;\nmod gemini;\nmod litellm;\nmod ollama;\nmod openai;\nmod openrouter;\nmod vllm;\nmod voyage;\n\npub async fn new_llm_generation_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::Anthropic => {\n Box::new(anthropic::Client::new(address).await?) as Box\n }\n LlmApiType::LiteLlm => {\n Box::new(litellm::Client::new_litellm(address).await?) as Box\n }\n LlmApiType::OpenRouter => Box::new(openrouter::Client::new_openrouter(address).await?)\n as Box,\n LlmApiType::Voyage => {\n api_bail!(\"Voyage is not supported for generation\")\n }\n LlmApiType::Vllm => {\n Box::new(vllm::Client::new_vllm(address).await?) as Box\n }\n };\n Ok(client)\n}\n\n", "suffix_code": "\n\npub fn detect_image_mime_type(bytes: &[u8]) -> Result<&'static str> {\n let infer = &*INFER;\n match infer.get(bytes) {\n Some(info) if info.mime_type().starts_with(\"image/\") => Ok(info.mime_type()),\n _ => bail!(\"Unknown or unsupported image format\"),\n }\n}\n", "middle_code": "pub async fn new_llm_embedding_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Voyage => {\n Box::new(voyage::Client::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::OpenRouter | LlmApiType::LiteLlm | LlmApiType::Vllm | LlmApiType::Anthropic => {\n api_bail!(\"Embedding is not supported for API type {:?}\", api_type)\n }\n };\n Ok(client)\n}", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "rust", "sub_task_type": null}, "context_code": [["/cocoindex/src/llm/gemini.rs", "use crate::prelude::*;\n\nuse crate::llm::{\n LlmEmbeddingClient, LlmGenerateRequest, LlmGenerateResponse, LlmGenerationClient, OutputFormat,\n ToJsonSchemaOptions, detect_image_mime_type,\n};\nuse base64::prelude::*;\nuse google_cloud_aiplatform_v1 as vertexai;\nuse serde_json::Value;\nuse urlencoding::encode;\n\nfn get_embedding_dimension(model: &str) -> Option {\n let model = model.to_ascii_lowercase();\n if model.starts_with(\"gemini-embedding-\") {\n Some(3072)\n } else if model.starts_with(\"text-embedding-\") {\n Some(768)\n } else if model.starts_with(\"embedding-\") {\n Some(768)\n } else if model.starts_with(\"text-multilingual-embedding-\") {\n Some(768)\n } else {\n None\n }\n}\n\npub struct AiStudioClient {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl AiStudioClient {\n pub fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Gemini doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"GEMINI_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"GEMINI_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n// Recursively remove all `additionalProperties` fields from a JSON value\nfn remove_additional_properties(value: &mut Value) {\n match value {\n Value::Object(map) => {\n map.remove(\"additionalProperties\");\n for v in map.values_mut() {\n remove_additional_properties(v);\n }\n }\n Value::Array(arr) => {\n for v in arr {\n remove_additional_properties(v);\n }\n }\n _ => {}\n }\n}\n\nimpl AiStudioClient {\n fn get_api_url(&self, model: &str, api_name: &str) -> String {\n format!(\n \"https://generativelanguage.googleapis.com/v1beta/models/{}:{}?key={}\",\n encode(model),\n api_name,\n encode(&self.api_key)\n )\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for AiStudioClient {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result {\n let mut user_parts: Vec = Vec::new();\n\n // Add text part first\n user_parts.push(serde_json::json!({ \"text\": request.user_prompt }));\n\n // Add image part if present\n if let Some(image_bytes) = &request.image {\n let base64_image = BASE64_STANDARD.encode(image_bytes.as_ref());\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n user_parts.push(serde_json::json!({\n \"inlineData\": {\n \"mimeType\": mime_type,\n \"data\": base64_image\n }\n }));\n }\n\n // Compose the contents\n let contents = vec![serde_json::json!({\n \"role\": \"user\",\n \"parts\": user_parts\n })];\n\n // Prepare payload\n let mut payload = serde_json::json!({ \"contents\": contents });\n if let Some(system) = request.system_prompt {\n payload[\"systemInstruction\"] = serde_json::json!({\n \"parts\": [ { \"text\": system } ]\n });\n }\n\n // If structured output is requested, add schema and responseMimeType\n if let Some(OutputFormat::JsonSchema { schema, .. }) = &request.output_format {\n let mut schema_json = serde_json::to_value(schema)?;\n remove_additional_properties(&mut schema_json);\n payload[\"generationConfig\"] = serde_json::json!({\n \"responseMimeType\": \"application/json\",\n \"responseSchema\": schema_json\n });\n }\n\n let url = self.get_api_url(request.model, \"generateContent\");\n let resp = retryable::run(\n || self.client.post(&url).json(&payload).send(),\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Gemini API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let resp_json: Value = resp.json().await.context(\"Invalid JSON\")?;\n\n if let Some(error) = resp_json.get(\"error\") {\n bail!(\"Gemini API error: {:?}\", error);\n }\n let mut resp_json = resp_json;\n let text = match &mut resp_json[\"candidates\"][0][\"content\"][\"parts\"][0][\"text\"] {\n Value::String(s) => std::mem::take(s),\n _ => bail!(\"No text in response\"),\n };\n\n Ok(LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[derive(Deserialize)]\nstruct ContentEmbedding {\n values: Vec,\n}\n#[derive(Deserialize)]\nstruct EmbedContentResponse {\n embedding: ContentEmbedding,\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for AiStudioClient {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let url = self.get_api_url(request.model, \"embedContent\");\n let mut payload = serde_json::json!({\n \"model\": request.model,\n \"content\": { \"parts\": [{ \"text\": request.text }] },\n });\n if let Some(task_type) = request.task_type {\n payload[\"taskType\"] = serde_json::Value::String(task_type.into());\n }\n let resp = retryable::run(\n || self.client.post(&url).json(&payload).send(),\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Gemini API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let embedding_resp: EmbedContentResponse = resp.json().await.context(\"Invalid JSON\")?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding_resp.embedding.values,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n\npub struct VertexAiClient {\n client: vertexai::client::PredictionService,\n config: super::VertexAiConfig,\n}\n\nimpl VertexAiClient {\n pub async fn new(\n address: Option,\n api_config: Option,\n ) -> Result {\n if address.is_some() {\n api_bail!(\"VertexAi API address is not supported for VertexAi API type\");\n }\n let Some(super::LlmApiConfig::VertexAi(config)) = api_config else {\n api_bail!(\"VertexAi API config is required for VertexAi API type\");\n };\n let client = vertexai::client::PredictionService::builder()\n .build()\n .await?;\n Ok(Self { client, config })\n }\n\n fn get_model_path(&self, model: &str) -> String {\n format!(\n \"projects/{}/locations/{}/publishers/google/models/{}\",\n self.config.project,\n self.config.region.as_deref().unwrap_or(\"global\"),\n model\n )\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for VertexAiClient {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n use vertexai::model::{Blob, Content, GenerationConfig, Part, Schema, part::Data};\n\n // Compose parts\n let mut parts = Vec::new();\n // Add text part\n parts.push(Part::new().set_text(request.user_prompt.to_string()));\n // Add image part if present\n if let Some(image_bytes) = request.image {\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n parts.push(\n Part::new().set_inline_data(\n Blob::new()\n .set_data(image_bytes.into_owned())\n .set_mime_type(mime_type.to_string()),\n ),\n );\n }\n // Compose content\n let mut contents = Vec::new();\n contents.push(Content::new().set_role(\"user\".to_string()).set_parts(parts));\n // Compose system instruction if present\n let system_instruction = request.system_prompt.as_ref().map(|sys| {\n Content::new()\n .set_role(\"system\".to_string())\n .set_parts(vec![Part::new().set_text(sys.to_string())])\n });\n\n // Compose generation config\n let mut generation_config = None;\n if let Some(OutputFormat::JsonSchema { schema, .. }) = &request.output_format {\n let schema_json = serde_json::to_value(schema)?;\n generation_config = Some(\n GenerationConfig::new()\n .set_response_mime_type(\"application/json\".to_string())\n .set_response_schema(serde_json::from_value::(schema_json)?),\n );\n }\n\n let mut req = self\n .client\n .generate_content()\n .set_model(self.get_model_path(request.model))\n .set_contents(contents);\n if let Some(sys) = system_instruction {\n req = req.set_system_instruction(sys);\n }\n if let Some(config) = generation_config {\n req = req.set_generation_config(config);\n }\n\n // Call the API\n let resp = req.send().await?;\n // Extract text from response\n let Some(Data::Text(text)) = resp\n .candidates\n .into_iter()\n .next()\n .and_then(|c| c.content)\n .and_then(|content| content.parts.into_iter().next())\n .and_then(|part| part.data)\n else {\n bail!(\"No text in response\");\n };\n Ok(super::LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for VertexAiClient {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n // Create the instances for the request\n let mut instance = serde_json::json!({\n \"content\": request.text\n });\n // Add task type if specified\n if let Some(task_type) = &request.task_type {\n instance[\"task_type\"] = serde_json::Value::String(task_type.to_string());\n }\n\n let instances = vec![instance];\n\n // Prepare the request parameters\n let mut parameters = serde_json::json!({});\n if let Some(output_dimension) = request.output_dimension {\n parameters[\"outputDimensionality\"] = serde_json::Value::Number(output_dimension.into());\n }\n\n // Build the prediction request using the raw predict builder\n let response = self\n .client\n .predict()\n .set_endpoint(self.get_model_path(request.model))\n .set_instances(instances)\n .set_parameters(parameters)\n .send()\n .await?;\n\n // Extract the embedding from the response\n let embeddings = response\n .predictions\n .into_iter()\n .next()\n .and_then(|mut e| e.get_mut(\"embeddings\").map(|v| v.take()))\n .ok_or_else(|| anyhow::anyhow!(\"No embeddings in response\"))?;\n let embedding: ContentEmbedding = serde_json::from_value(embeddings)?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding.values,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n"], ["/cocoindex/src/ops/functions/embed_text.rs", "use crate::{\n llm::{\n LlmApiConfig, LlmApiType, LlmEmbeddingClient, LlmEmbeddingRequest, new_llm_embedding_client,\n },\n ops::sdk::*,\n};\n\n#[derive(Deserialize)]\nstruct Spec {\n api_type: LlmApiType,\n model: String,\n address: Option,\n api_config: Option,\n output_dimension: Option,\n task_type: Option,\n}\n\nstruct Args {\n client: Box,\n text: ResolvedOpArg,\n}\n\nstruct Executor {\n spec: Spec,\n args: Args,\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n fn behavior_version(&self) -> Option {\n Some(1)\n }\n\n fn enable_cache(&self) -> bool {\n true\n }\n\n async fn evaluate(&self, input: Vec) -> Result {\n let text = self.args.text.value(&input)?.as_str()?;\n let req = LlmEmbeddingRequest {\n model: &self.spec.model,\n text: Cow::Borrowed(text),\n output_dimension: self.spec.output_dimension,\n task_type: self\n .spec\n .task_type\n .as_ref()\n .map(|s| Cow::Borrowed(s.as_str())),\n };\n let embedding = self.args.client.embed_text(req).await?;\n Ok(embedding.embedding.into())\n }\n}\n\nstruct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"EmbedText\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Self::ResolvedArgs, EnrichedValueType)> {\n let text = args_resolver.next_arg(\"text\")?;\n let client =\n new_llm_embedding_client(spec.api_type, spec.address.clone(), spec.api_config.clone())\n .await?;\n let output_dimension = match spec.output_dimension {\n Some(output_dimension) => output_dimension,\n None => {\n client.get_default_embedding_dimension(spec.model.as_str())\n .ok_or_else(|| api_error!(\"model \\\"{}\\\" is unknown for {:?}, needs to specify `output_dimension` explicitly\", spec.model, spec.api_type))?\n }\n };\n let output_schema = make_output_type(BasicValueType::Vector(VectorTypeSchema {\n dimension: Some(output_dimension as usize),\n element_type: Box::new(BasicValueType::Float32),\n }));\n Ok((Args { client, text }, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor { spec, args }))\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory.register(registry)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n #[tokio::test]\n #[ignore = \"This test requires OpenAI API key or a configured local LLM and may make network calls.\"]\n async fn test_embed_text() {\n let spec = Spec {\n api_type: LlmApiType::OpenAi,\n model: \"text-embedding-ada-002\".to_string(),\n address: None,\n api_config: None,\n output_dimension: None,\n task_type: None,\n };\n\n let factory = Arc::new(Factory);\n let text_content = \"CocoIndex is a performant data transformation framework for AI.\";\n\n let input_args_values = vec![text_content.to_string().into()];\n\n let input_arg_schemas = vec![build_arg_schema(\"text\", BasicValueType::Str)];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n if result.is_err() {\n eprintln!(\n \"test_embed_text: test_flow_function returned error (potentially expected for evaluate): {:?}\",\n result.as_ref().err()\n );\n }\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed. NOTE: This test may require network access/API keys for OpenAI. Error: {:?}\",\n result.err()\n );\n\n let value = result.unwrap();\n\n match value {\n Value::Basic(BasicValue::Vector(arc_vec)) => {\n assert_eq!(arc_vec.len(), 1536, \"Embedding vector dimension mismatch\");\n for item in arc_vec.iter() {\n match item {\n BasicValue::Float32(_) => {}\n _ => panic!(\"Embedding vector element is not Float32: {item:?}\"),\n }\n }\n }\n _ => panic!(\"Expected Value::Basic(BasicValue::Vector), got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/llm/ollama.rs", "use crate::prelude::*;\n\nuse super::{LlmEmbeddingClient, LlmGenerationClient};\nuse schemars::schema::SchemaObject;\nuse serde_with::{base64::Base64, serde_as};\n\nfn get_embedding_dimension(model: &str) -> Option {\n match model.to_ascii_lowercase().as_str() {\n \"mxbai-embed-large\"\n | \"bge-m3\"\n | \"bge-large\"\n | \"snowflake-arctic-embed\"\n | \"snowflake-arctic-embed2\" => Some(1024),\n\n \"nomic-embed-text\"\n | \"paraphrase-multilingual\"\n | \"snowflake-arctic-embed:110m\"\n | \"snowflake-arctic-embed:137m\"\n | \"granite-embedding:278m\" => Some(768),\n\n \"all-minilm\"\n | \"snowflake-arctic-embed:22m\"\n | \"snowflake-arctic-embed:33m\"\n | \"granite-embedding\" => Some(384),\n\n _ => None,\n }\n}\n\npub struct Client {\n generate_url: String,\n embed_url: String,\n reqwest_client: reqwest::Client,\n}\n\n#[derive(Debug, Serialize)]\nenum OllamaFormat<'a> {\n #[serde(untagged)]\n JsonSchema(&'a SchemaObject),\n}\n\n#[serde_as]\n#[derive(Debug, Serialize)]\nstruct OllamaRequest<'a> {\n pub model: &'a str,\n pub prompt: &'a str,\n #[serde_as(as = \"Option>\")]\n pub images: Option>,\n pub format: Option>,\n pub system: Option<&'a str>,\n pub stream: Option,\n}\n\n#[derive(Debug, Deserialize)]\nstruct OllamaResponse {\n pub response: String,\n}\n\n#[derive(Debug, Serialize)]\nstruct OllamaEmbeddingRequest<'a> {\n pub model: &'a str,\n pub input: &'a str,\n}\n\n#[derive(Debug, Deserialize)]\nstruct OllamaEmbeddingResponse {\n pub embedding: Vec,\n}\n\nconst OLLAMA_DEFAULT_ADDRESS: &str = \"http://localhost:11434\";\n\nimpl Client {\n pub async fn new(address: Option) -> Result {\n let address = match &address {\n Some(addr) => addr.trim_end_matches('/'),\n None => OLLAMA_DEFAULT_ADDRESS,\n };\n Ok(Self {\n generate_url: format!(\"{address}/api/generate\"),\n embed_url: format!(\"{address}/api/embed\"),\n reqwest_client: reqwest::Client::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n let req = OllamaRequest {\n model: request.model,\n prompt: request.user_prompt.as_ref(),\n images: request.image.as_deref().map(|img| vec![img]),\n format: request.output_format.as_ref().map(\n |super::OutputFormat::JsonSchema { schema, .. }| {\n OllamaFormat::JsonSchema(schema.as_ref())\n },\n ),\n system: request.system_prompt.as_ref().map(|s| s.as_ref()),\n stream: Some(false),\n };\n let res = retryable::run(\n || {\n self.reqwest_client\n .post(self.generate_url.as_str())\n .json(&req)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !res.status().is_success() {\n bail!(\n \"Ollama API error: {:?}\\n{}\\n\",\n res.status(),\n res.text().await?\n );\n }\n let json: OllamaResponse = res.json().await?;\n Ok(super::LlmGenerateResponse {\n text: json.response,\n })\n }\n\n fn json_schema_options(&self) -> super::ToJsonSchemaOptions {\n super::ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: true,\n extract_descriptions: true,\n top_level_must_be_object: false,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let req = OllamaEmbeddingRequest {\n model: request.model,\n input: request.text.as_ref(),\n };\n let resp = retryable::run(\n || {\n self.reqwest_client\n .post(self.embed_url.as_str())\n .json(&req)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Ollama API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let embedding_resp: OllamaEmbeddingResponse = resp.json().await.context(\"Invalid JSON\")?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding_resp.embedding,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n"], ["/cocoindex/src/ops/functions/extract_by_llm.rs", "use crate::llm::{\n LlmGenerateRequest, LlmGenerationClient, LlmSpec, OutputFormat, new_llm_generation_client,\n};\nuse crate::ops::sdk::*;\nuse crate::prelude::*;\nuse base::json_schema::build_json_schema;\nuse schemars::schema::SchemaObject;\nuse std::borrow::Cow;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Spec {\n llm_spec: LlmSpec,\n output_type: EnrichedValueType,\n instruction: Option,\n}\n\npub struct Args {\n text: Option,\n image: Option,\n}\n\nstruct Executor {\n args: Args,\n client: Box,\n model: String,\n output_json_schema: SchemaObject,\n system_prompt: String,\n value_extractor: base::json_schema::ValueExtractor,\n}\n\nfn get_system_prompt(instructions: &Option, extra_instructions: Option) -> String {\n let mut message =\n \"You are a helpful assistant that processes user-provided inputs (text, images, or both) to produce structured outputs. \\\nYour task is to follow the provided instructions to generate or extract information and output valid JSON matching the specified schema. \\\nBase your response solely on the content of the input. \\\nFor generative tasks, respond accurately and relevantly based on what is provided. \\\nUnless explicitly instructed otherwise, output only the JSON. DO NOT include explanations, descriptions, or formatting outside the JSON.\"\n .to_string();\n\n if let Some(custom_instructions) = instructions {\n message.push_str(\"\\n\\n\");\n message.push_str(custom_instructions);\n }\n\n if let Some(extra_instructions) = extra_instructions {\n message.push_str(\"\\n\\n\");\n message.push_str(&extra_instructions);\n }\n\n message\n}\n\nimpl Executor {\n async fn new(spec: Spec, args: Args) -> Result {\n let client = new_llm_generation_client(\n spec.llm_spec.api_type,\n spec.llm_spec.address,\n spec.llm_spec.api_config,\n )\n .await?;\n let schema_output = build_json_schema(spec.output_type, client.json_schema_options())?;\n Ok(Self {\n args,\n client,\n model: spec.llm_spec.model,\n output_json_schema: schema_output.schema,\n system_prompt: get_system_prompt(&spec.instruction, schema_output.extra_instructions),\n value_extractor: schema_output.value_extractor,\n })\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n fn behavior_version(&self) -> Option {\n Some(1)\n }\n\n fn enable_cache(&self) -> bool {\n true\n }\n\n async fn evaluate(&self, input: Vec) -> Result {\n let image_bytes: Option> = self\n .args\n .image\n .as_ref()\n .map(|arg| arg.value(&input)?.as_bytes())\n .transpose()?\n .map(|bytes| Cow::Borrowed(bytes.as_ref()));\n let text = self\n .args\n .text\n .as_ref()\n .map(|arg| arg.value(&input)?.as_str())\n .transpose()?;\n\n if text.is_none() && image_bytes.is_none() {\n api_bail!(\"At least one of `text` or `image` must be provided\");\n }\n\n let user_prompt = text.map_or(\"\", |v| v);\n let req = LlmGenerateRequest {\n model: &self.model,\n system_prompt: Some(Cow::Borrowed(&self.system_prompt)),\n user_prompt: Cow::Borrowed(user_prompt),\n image: image_bytes,\n output_format: Some(OutputFormat::JsonSchema {\n name: Cow::Borrowed(\"ExtractedData\"),\n schema: Cow::Borrowed(&self.output_json_schema),\n }),\n };\n let res = self.client.generate(req).await?;\n let json_value: serde_json::Value = serde_json::from_str(res.text.as_str())?;\n let value = self.value_extractor.extract_value(json_value)?;\n Ok(value)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"ExtractByLlm\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_optional_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n image: args_resolver\n .next_optional_arg(\"image\")?\n .expect_type(&ValueType::Basic(BasicValueType::Bytes))?,\n };\n\n if args.text.is_none() && args.image.is_none() {\n api_bail!(\"At least one of 'text' or 'image' must be provided\");\n }\n\n Ok((args, spec.output_type.clone()))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n resolved_input_schema: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(spec, resolved_input_schema).await?))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n #[tokio::test]\n #[ignore = \"This test requires an OpenAI API key or a configured local LLM and may make network calls.\"]\n async fn test_extract_by_llm() {\n // Define the expected output structure\n let target_output_schema = StructSchema {\n fields: Arc::new(vec![\n FieldSchema::new(\n \"extracted_field_name\",\n make_output_type(BasicValueType::Str),\n ),\n FieldSchema::new(\n \"extracted_field_value\",\n make_output_type(BasicValueType::Int64),\n ),\n ]),\n description: Some(\"A test structure for extraction\".into()),\n };\n\n let output_type_spec = EnrichedValueType {\n typ: ValueType::Struct(target_output_schema.clone()),\n nullable: false,\n attrs: Arc::new(BTreeMap::new()),\n };\n\n let spec = Spec {\n llm_spec: LlmSpec {\n api_type: crate::llm::LlmApiType::OpenAi,\n model: \"gpt-4o\".to_string(),\n address: None,\n api_config: None,\n },\n output_type: output_type_spec,\n instruction: Some(\"Extract the name and value from the text. The name is a string, the value is an integer.\".to_string()),\n };\n\n let factory = Arc::new(Factory);\n let text_content = \"The item is called 'CocoIndex Test' and its value is 42.\";\n\n let input_args_values = vec![text_content.to_string().into()];\n\n let input_arg_schemas = vec![build_arg_schema(\"text\", BasicValueType::Str)];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n if result.is_err() {\n eprintln!(\n \"test_extract_by_llm: test_flow_function returned error (potentially expected for evaluate): {:?}\",\n result.as_ref().err()\n );\n }\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed. NOTE: This test may require network access/API keys for OpenAI. Error: {:?}\",\n result.err()\n );\n\n let value = result.unwrap();\n\n match value {\n Value::Struct(field_values) => {\n assert_eq!(\n field_values.fields.len(),\n target_output_schema.fields.len(),\n \"Mismatched number of fields in output struct\"\n );\n for (idx, field_schema) in target_output_schema.fields.iter().enumerate() {\n match (&field_values.fields[idx], &field_schema.value_type.typ) {\n (\n Value::Basic(BasicValue::Str(_)),\n ValueType::Basic(BasicValueType::Str),\n ) => {}\n (\n Value::Basic(BasicValue::Int64(_)),\n ValueType::Basic(BasicValueType::Int64),\n ) => {}\n (val, expected_type) => panic!(\n \"Field '{}' type mismatch. Got {:?}, expected type compatible with {:?}\",\n field_schema.name,\n val.kind(),\n expected_type\n ),\n }\n }\n }\n _ => panic!(\"Expected Value::Struct, got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/llm/openai.rs", "use crate::api_bail;\n\nuse super::{LlmEmbeddingClient, LlmGenerationClient, detect_image_mime_type};\nuse anyhow::Result;\nuse async_openai::{\n Client as OpenAIClient,\n config::OpenAIConfig,\n types::{\n ChatCompletionRequestMessage, ChatCompletionRequestMessageContentPartImage,\n ChatCompletionRequestMessageContentPartText, ChatCompletionRequestSystemMessage,\n ChatCompletionRequestSystemMessageContent, ChatCompletionRequestUserMessage,\n ChatCompletionRequestUserMessageContent, ChatCompletionRequestUserMessageContentPart,\n CreateChatCompletionRequest, CreateEmbeddingRequest, EmbeddingInput, ImageDetail,\n ResponseFormat, ResponseFormatJsonSchema,\n },\n};\nuse async_trait::async_trait;\nuse base64::prelude::*;\nuse phf::phf_map;\n\nstatic DEFAULT_EMBEDDING_DIMENSIONS: phf::Map<&str, u32> = phf_map! {\n \"text-embedding-3-small\" => 1536,\n \"text-embedding-3-large\" => 3072,\n \"text-embedding-ada-002\" => 1536,\n};\n\npub struct Client {\n client: async_openai::Client,\n}\n\nimpl Client {\n pub(crate) fn from_parts(client: async_openai::Client) -> Self {\n Self { client }\n }\n\n pub fn new(address: Option) -> Result {\n if let Some(address) = address {\n api_bail!(\"OpenAI doesn't support custom API address: {address}\");\n }\n // Verify API key is set\n if std::env::var(\"OPENAI_API_KEY\").is_err() {\n api_bail!(\"OPENAI_API_KEY environment variable must be set\");\n }\n Ok(Self {\n // OpenAI client will use OPENAI_API_KEY env variable by default\n client: OpenAIClient::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n let mut messages = Vec::new();\n\n // Add system prompt if provided\n if let Some(system) = request.system_prompt {\n messages.push(ChatCompletionRequestMessage::System(\n ChatCompletionRequestSystemMessage {\n content: ChatCompletionRequestSystemMessageContent::Text(system.into_owned()),\n ..Default::default()\n },\n ));\n }\n\n // Add user message\n let user_message_content = match request.image {\n Some(img_bytes) => {\n let base64_image = BASE64_STANDARD.encode(img_bytes.as_ref());\n let mime_type = detect_image_mime_type(img_bytes.as_ref())?;\n let image_url = format!(\"data:{mime_type};base64,{base64_image}\");\n ChatCompletionRequestUserMessageContent::Array(vec![\n ChatCompletionRequestUserMessageContentPart::Text(\n ChatCompletionRequestMessageContentPartText {\n text: request.user_prompt.into_owned(),\n },\n ),\n ChatCompletionRequestUserMessageContentPart::ImageUrl(\n ChatCompletionRequestMessageContentPartImage {\n image_url: async_openai::types::ImageUrl {\n url: image_url,\n detail: Some(ImageDetail::Auto),\n },\n },\n ),\n ])\n }\n None => ChatCompletionRequestUserMessageContent::Text(request.user_prompt.into_owned()),\n };\n messages.push(ChatCompletionRequestMessage::User(\n ChatCompletionRequestUserMessage {\n content: user_message_content,\n ..Default::default()\n },\n ));\n\n // Create the chat completion request\n let request = CreateChatCompletionRequest {\n model: request.model.to_string(),\n messages,\n response_format: match request.output_format {\n Some(super::OutputFormat::JsonSchema { name, schema }) => {\n Some(ResponseFormat::JsonSchema {\n json_schema: ResponseFormatJsonSchema {\n name: name.into_owned(),\n description: None,\n schema: Some(serde_json::to_value(&schema)?),\n strict: Some(true),\n },\n })\n }\n None => None,\n },\n ..Default::default()\n };\n\n // Send request and get response\n let response = self.client.chat().create(request).await?;\n\n // Extract the response text from the first choice\n let text = response\n .choices\n .into_iter()\n .next()\n .and_then(|choice| choice.message.content)\n .ok_or_else(|| anyhow::anyhow!(\"No response from OpenAI\"))?;\n\n Ok(super::LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> super::ToJsonSchemaOptions {\n super::ToJsonSchemaOptions {\n fields_always_required: true,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let response = self\n .client\n .embeddings()\n .create(CreateEmbeddingRequest {\n model: request.model.to_string(),\n input: EmbeddingInput::String(request.text.to_string()),\n dimensions: request.output_dimension,\n ..Default::default()\n })\n .await?;\n Ok(super::LlmEmbeddingResponse {\n embedding: response\n .data\n .into_iter()\n .next()\n .ok_or_else(|| anyhow::anyhow!(\"No embedding returned from OpenAI\"))?\n .embedding,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n DEFAULT_EMBEDDING_DIMENSIONS.get(model).copied()\n }\n}\n"], ["/cocoindex/src/llm/voyage.rs", "use crate::prelude::*;\n\nuse crate::llm::{LlmEmbeddingClient, LlmEmbeddingRequest, LlmEmbeddingResponse};\nuse phf::phf_map;\n\nstatic DEFAULT_EMBEDDING_DIMENSIONS: phf::Map<&str, u32> = phf_map! {\n // Current models\n \"voyage-3-large\" => 1024,\n \"voyage-3.5\" => 1024,\n \"voyage-3.5-lite\" => 1024,\n \"voyage-code-3\" => 1024,\n \"voyage-finance-2\" => 1024,\n \"voyage-law-2\" => 1024,\n \"voyage-code-2\" => 1536,\n\n // Legacy models\n \"voyage-3\" => 1024,\n \"voyage-3-lite\" => 512,\n \"voyage-multilingual-2\" => 1024,\n \"voyage-large-2-instruct\" => 1024,\n \"voyage-large-2\" => 1536,\n \"voyage-2\" => 1024,\n \"voyage-lite-02-instruct\" => 1024,\n \"voyage-02\" => 1024,\n \"voyage-01\" => 1024,\n \"voyage-lite-01\" => 1024,\n \"voyage-lite-01-instruct\" => 1024,\n};\n\npub struct Client {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl Client {\n pub fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Voyage AI doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"VOYAGE_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"VOYAGE_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n#[derive(Deserialize)]\nstruct EmbeddingData {\n embedding: Vec,\n}\n\n#[derive(Deserialize)]\nstruct EmbedResponse {\n data: Vec,\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: LlmEmbeddingRequest<'req>,\n ) -> Result {\n let url = \"https://api.voyageai.com/v1/embeddings\";\n\n let mut payload = serde_json::json!({\n \"input\": request.text,\n \"model\": request.model,\n });\n\n if let Some(task_type) = request.task_type {\n payload[\"input_type\"] = serde_json::Value::String(task_type.into());\n }\n\n let resp = retryable::run(\n || {\n self.client\n .post(url)\n .header(\"Authorization\", format!(\"Bearer {}\", self.api_key))\n .json(&payload)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n\n if !resp.status().is_success() {\n bail!(\n \"Voyage AI API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n\n let embedding_resp: EmbedResponse = resp.json().await.context(\"Invalid JSON\")?;\n\n if embedding_resp.data.is_empty() {\n bail!(\"No embedding data in response\");\n }\n\n Ok(LlmEmbeddingResponse {\n embedding: embedding_resp.data[0].embedding.clone(),\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n DEFAULT_EMBEDDING_DIMENSIONS.get(model).copied()\n }\n}\n"], ["/cocoindex/src/llm/anthropic.rs", "use crate::prelude::*;\nuse base64::prelude::*;\n\nuse crate::llm::{\n LlmGenerateRequest, LlmGenerateResponse, LlmGenerationClient, OutputFormat,\n ToJsonSchemaOptions, detect_image_mime_type,\n};\nuse anyhow::Context;\nuse urlencoding::encode;\n\npub struct Client {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl Client {\n pub async fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Anthropic doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"ANTHROPIC_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"ANTHROPIC_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result {\n let mut user_content_parts: Vec = Vec::new();\n\n // Add image part if present\n if let Some(image_bytes) = &request.image {\n let base64_image = BASE64_STANDARD.encode(image_bytes.as_ref());\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n user_content_parts.push(serde_json::json!({\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": mime_type,\n \"data\": base64_image,\n }\n }));\n }\n\n // Add text part\n user_content_parts.push(serde_json::json!({\n \"type\": \"text\",\n \"text\": request.user_prompt\n }));\n\n let messages = vec![serde_json::json!({\n \"role\": \"user\",\n \"content\": user_content_parts\n })];\n\n let mut payload = serde_json::json!({\n \"model\": request.model,\n \"messages\": messages,\n \"max_tokens\": 4096\n });\n\n // Add system prompt as top-level field if present (required)\n if let Some(system) = request.system_prompt {\n payload[\"system\"] = serde_json::json!(system);\n }\n\n // Extract schema from output_format, error if not JsonSchema\n let schema = match request.output_format.as_ref() {\n Some(OutputFormat::JsonSchema { schema, .. }) => schema,\n _ => api_bail!(\"Anthropic client expects OutputFormat::JsonSchema for all requests\"),\n };\n\n let schema_json = serde_json::to_value(schema)?;\n payload[\"tools\"] = serde_json::json!([\n { \"type\": \"custom\", \"name\": \"report_result\", \"input_schema\": schema_json }\n ]);\n\n let url = \"https://api.anthropic.com/v1/messages\";\n\n let encoded_api_key = encode(&self.api_key);\n\n let resp = retryable::run(\n || {\n self.client\n .post(url)\n .header(\"x-api-key\", encoded_api_key.as_ref())\n .header(\"anthropic-version\", \"2023-06-01\")\n .json(&payload)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Anthropic API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let mut resp_json: serde_json::Value = resp.json().await.context(\"Invalid JSON\")?;\n if let Some(error) = resp_json.get(\"error\") {\n bail!(\"Anthropic API error: {:?}\", error);\n }\n\n // Debug print full response\n // println!(\"Anthropic API full response: {resp_json:?}\");\n\n let resp_content = &resp_json[\"content\"];\n let tool_name = \"report_result\";\n let mut extracted_json: Option = None;\n if let Some(array) = resp_content.as_array() {\n for item in array {\n if item.get(\"type\") == Some(&serde_json::Value::String(\"tool_use\".to_string()))\n && item.get(\"name\") == Some(&serde_json::Value::String(tool_name.to_string()))\n {\n if let Some(input) = item.get(\"input\") {\n extracted_json = Some(input.clone());\n break;\n }\n }\n }\n }\n let text = if let Some(json) = extracted_json {\n // Try strict JSON serialization first\n serde_json::to_string(&json)?\n } else {\n // Fallback: try text if no tool output found\n match &mut resp_json[\"content\"][0][\"text\"] {\n serde_json::Value::String(s) => {\n // Try strict JSON parsing first\n match serde_json::from_str::(s) {\n Ok(_) => std::mem::take(s),\n Err(e) => {\n // Try permissive json5 parsing as fallback\n match json5::from_str::(s) {\n Ok(value) => {\n println!(\"[Anthropic] Used permissive JSON5 parser for output\");\n serde_json::to_string(&value)?\n }\n Err(e2) => {\n return Err(anyhow::anyhow!(format!(\n \"No structured tool output or text found in response, and permissive JSON5 parsing also failed: {e}; {e2}\"\n )));\n }\n }\n }\n }\n }\n _ => {\n return Err(anyhow::anyhow!(\n \"No structured tool output or text found in response\"\n ));\n }\n }\n };\n\n Ok(LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n"], ["/cocoindex/src/service/error.rs", "use crate::prelude::*;\n\nuse axum::{\n Json,\n http::StatusCode,\n response::{IntoResponse, Response},\n};\nuse pyo3::{exceptions::PyException, prelude::*};\nuse std::{\n error::Error,\n fmt::{Debug, Display},\n};\n\n#[derive(Debug)]\npub struct ApiError {\n pub err: anyhow::Error,\n pub status_code: StatusCode,\n}\n\nimpl ApiError {\n pub fn new(message: &str, status_code: StatusCode) -> Self {\n Self {\n err: anyhow!(\"{}\", message),\n status_code,\n }\n }\n}\n\nimpl Display for ApiError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Display::fmt(&self.err, f)\n }\n}\n\nimpl Error for ApiError {\n fn source(&self) -> Option<&(dyn Error + 'static)> {\n self.err.source()\n }\n}\n\n#[derive(Serialize)]\nstruct ErrorResponse {\n error: String,\n}\n\nimpl IntoResponse for ApiError {\n fn into_response(self) -> Response {\n debug!(\"Internal server error:\\n{:?}\", self.err);\n let error_response = ErrorResponse {\n error: self.err.to_string(),\n };\n (self.status_code, Json(error_response)).into_response()\n }\n}\n\nimpl From for ApiError {\n fn from(err: anyhow::Error) -> ApiError {\n if err.is::() {\n return err.downcast::().unwrap();\n }\n Self {\n err,\n status_code: StatusCode::INTERNAL_SERVER_ERROR,\n }\n }\n}\n\nimpl From for PyErr {\n fn from(val: ApiError) -> Self {\n PyException::new_err(val.err.to_string())\n }\n}\n\n#[derive(Clone)]\npub struct SharedError {\n pub err: Arc,\n}\n\nimpl SharedError {\n pub fn new(err: anyhow::Error) -> Self {\n Self { err: Arc::new(err) }\n }\n}\nimpl Debug for SharedError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.err, f)\n }\n}\n\nimpl Display for SharedError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Display::fmt(&self.err, f)\n }\n}\n\nimpl From for SharedError {\n fn from(err: E) -> Self {\n Self {\n err: Arc::new(anyhow::Error::from(err)),\n }\n }\n}\n\nimpl AsRef for SharedError {\n fn as_ref(&self) -> &(dyn std::error::Error + 'static) {\n self.err.as_ref().as_ref()\n }\n}\n\nimpl AsRef for SharedError {\n fn as_ref(&self) -> &(dyn std::error::Error + Send + Sync + 'static) {\n self.err.as_ref().as_ref()\n }\n}\n\npub fn shared_ok(value: T) -> Result {\n Ok(value)\n}\n\npub type SharedResult = Result;\n\npub struct SharedErrorWrapper(SharedError);\n\nimpl Display for SharedErrorWrapper {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.0, f)\n }\n}\n\nimpl Debug for SharedErrorWrapper {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.0, f)\n }\n}\n\nimpl Error for SharedErrorWrapper {\n fn source(&self) -> Option<&(dyn Error + 'static)> {\n self.0.err.as_ref().source()\n }\n}\n\npub trait SharedResultExt {\n fn std_result(self) -> Result;\n}\n\nimpl SharedResultExt for Result {\n fn std_result(self) -> Result {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(SharedErrorWrapper(err)),\n }\n }\n}\n\npub trait SharedResultExtRef<'a, T> {\n fn std_result(self) -> Result<&'a T, SharedErrorWrapper>;\n}\n\nimpl<'a, T> SharedResultExtRef<'a, T> for &'a Result {\n fn std_result(self) -> Result<&'a T, SharedErrorWrapper> {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(SharedErrorWrapper(err.clone())),\n }\n }\n}\n\npub fn invariance_violation() -> anyhow::Error {\n anyhow::anyhow!(\"Invariance violation\")\n}\n\n#[macro_export]\nmacro_rules! api_bail {\n ( $fmt:literal $(, $($arg:tt)*)?) => {\n return Err($crate::service::error::ApiError::new(&format!($fmt $(, $($arg)*)?), axum::http::StatusCode::BAD_REQUEST).into())\n };\n}\n\n#[macro_export]\nmacro_rules! api_error {\n ( $fmt:literal $(, $($arg:tt)*)?) => {\n $crate::service::error::ApiError::new(&format!($fmt $(, $($arg)*)?), axum::http::StatusCode::BAD_REQUEST)\n };\n}\n"], ["/cocoindex/src/base/schema.rs", "use crate::prelude::*;\n\nuse super::spec::*;\nuse crate::builder::plan::AnalyzedValueMapping;\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct VectorTypeSchema {\n pub element_type: Box,\n pub dimension: Option,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct UnionTypeSchema {\n pub types: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(tag = \"kind\")]\npub enum BasicValueType {\n /// A sequence of bytes in binary.\n Bytes,\n\n /// String encoded in UTF-8.\n Str,\n\n /// A boolean value.\n Bool,\n\n /// 64-bit integer.\n Int64,\n\n /// 32-bit floating point number.\n Float32,\n\n /// 64-bit floating point number.\n Float64,\n\n /// A range, with a start offset and a length.\n Range,\n\n /// A UUID.\n Uuid,\n\n /// Date (without time within the current day).\n Date,\n\n /// Time of the day.\n Time,\n\n /// Local date and time, without timezone.\n LocalDateTime,\n\n /// Date and time with timezone.\n OffsetDateTime,\n\n /// A time duration.\n TimeDelta,\n\n /// A JSON value.\n Json,\n\n /// A vector of values (usually numbers, for embeddings).\n Vector(VectorTypeSchema),\n\n /// A union\n Union(UnionTypeSchema),\n}\n\nimpl std::fmt::Display for BasicValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n BasicValueType::Bytes => write!(f, \"Bytes\"),\n BasicValueType::Str => write!(f, \"Str\"),\n BasicValueType::Bool => write!(f, \"Bool\"),\n BasicValueType::Int64 => write!(f, \"Int64\"),\n BasicValueType::Float32 => write!(f, \"Float32\"),\n BasicValueType::Float64 => write!(f, \"Float64\"),\n BasicValueType::Range => write!(f, \"Range\"),\n BasicValueType::Uuid => write!(f, \"Uuid\"),\n BasicValueType::Date => write!(f, \"Date\"),\n BasicValueType::Time => write!(f, \"Time\"),\n BasicValueType::LocalDateTime => write!(f, \"LocalDateTime\"),\n BasicValueType::OffsetDateTime => write!(f, \"OffsetDateTime\"),\n BasicValueType::TimeDelta => write!(f, \"TimeDelta\"),\n BasicValueType::Json => write!(f, \"Json\"),\n BasicValueType::Vector(s) => {\n write!(f, \"Vector[{}\", s.element_type)?;\n if let Some(dimension) = s.dimension {\n write!(f, \", {dimension}\")?;\n }\n write!(f, \"]\")\n }\n BasicValueType::Union(s) => {\n write!(f, \"Union[\")?;\n for (i, typ) in s.types.iter().enumerate() {\n if i > 0 {\n // Add type delimiter\n write!(f, \" | \")?;\n }\n write!(f, \"{typ}\")?;\n }\n write!(f, \"]\")\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]\npub struct StructSchema {\n pub fields: Arc>,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub description: Option>,\n}\n\nimpl StructSchema {\n pub fn without_attrs(&self) -> Self {\n Self {\n fields: Arc::new(self.fields.iter().map(|f| f.without_attrs()).collect()),\n description: None,\n }\n }\n}\n\nimpl std::fmt::Display for StructSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Struct(\")?;\n for (i, field) in self.fields.iter().enumerate() {\n if i > 0 {\n write!(f, \", \")?;\n }\n write!(f, \"{field}\")?;\n }\n write!(f, \")\")\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\n#[allow(clippy::enum_variant_names)]\npub enum TableKind {\n /// An table with unordered rows, without key.\n UTable,\n /// A table's first field is the key.\n #[serde(alias = \"Table\")]\n KTable,\n /// A table whose rows orders are preserved.\n #[serde(alias = \"List\")]\n LTable,\n}\n\nimpl std::fmt::Display for TableKind {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n TableKind::UTable => write!(f, \"Table\"),\n TableKind::KTable => write!(f, \"KTable\"),\n TableKind::LTable => write!(f, \"LTable\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct TableSchema {\n pub kind: TableKind,\n pub row: StructSchema,\n}\n\nimpl TableSchema {\n pub fn has_key(&self) -> bool {\n match self.kind {\n TableKind::KTable => true,\n TableKind::UTable | TableKind::LTable => false,\n }\n }\n\n pub fn key_type(&self) -> Option<&EnrichedValueType> {\n match self.kind {\n TableKind::KTable => self\n .row\n .fields\n .first()\n .as_ref()\n .map(|field| &field.value_type),\n TableKind::UTable | TableKind::LTable => None,\n }\n }\n\n pub fn without_attrs(&self) -> Self {\n Self {\n kind: self.kind,\n row: self.row.without_attrs(),\n }\n }\n}\n\nimpl std::fmt::Display for TableSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}({})\", self.kind, self.row)\n }\n}\n\nimpl TableSchema {\n pub fn new(kind: TableKind, row: StructSchema) -> Self {\n Self { kind, row }\n }\n\n pub fn key_field(&self) -> Option<&FieldSchema> {\n match self.kind {\n TableKind::KTable => Some(self.row.fields.first().unwrap()),\n TableKind::UTable | TableKind::LTable => None,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(tag = \"kind\")]\npub enum ValueType {\n Struct(StructSchema),\n\n #[serde(untagged)]\n Basic(BasicValueType),\n\n #[serde(untagged)]\n Table(TableSchema),\n}\n\nimpl ValueType {\n pub fn key_type(&self) -> Option<&EnrichedValueType> {\n match self {\n ValueType::Basic(_) => None,\n ValueType::Struct(_) => None,\n ValueType::Table(c) => c.key_type(),\n }\n }\n\n // Type equality, ignoring attributes.\n pub fn without_attrs(&self) -> Self {\n match self {\n ValueType::Basic(a) => ValueType::Basic(a.clone()),\n ValueType::Struct(a) => ValueType::Struct(a.without_attrs()),\n ValueType::Table(a) => ValueType::Table(a.without_attrs()),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct EnrichedValueType {\n #[serde(rename = \"type\")]\n pub typ: DataType,\n\n #[serde(default, skip_serializing_if = \"std::ops::Not::not\")]\n pub nullable: bool,\n\n #[serde(default, skip_serializing_if = \"BTreeMap::is_empty\")]\n pub attrs: Arc>,\n}\n\nimpl EnrichedValueType {\n pub fn without_attrs(&self) -> Self {\n Self {\n typ: self.typ.without_attrs(),\n nullable: self.nullable,\n attrs: Default::default(),\n }\n }\n}\n\nimpl EnrichedValueType {\n pub fn from_alternative(\n value_type: &EnrichedValueType,\n ) -> Result\n where\n for<'a> &'a AltDataType: TryInto,\n {\n Ok(Self {\n typ: (&value_type.typ).try_into()?,\n nullable: value_type.nullable,\n attrs: value_type.attrs.clone(),\n })\n }\n\n pub fn with_attr(mut self, key: &str, value: serde_json::Value) -> Self {\n Arc::make_mut(&mut self.attrs).insert(key.to_string(), value);\n self\n }\n}\n\nimpl std::fmt::Display for EnrichedValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.typ)?;\n if self.nullable {\n write!(f, \"?\")?;\n }\n if !self.attrs.is_empty() {\n write!(\n f,\n \" [{}]\",\n self.attrs\n .iter()\n .map(|(k, v)| format!(\"{k}: {v}\"))\n .collect::>()\n .join(\", \")\n )?;\n }\n Ok(())\n }\n}\n\nimpl std::fmt::Display for ValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ValueType::Basic(b) => write!(f, \"{b}\"),\n ValueType::Struct(s) => write!(f, \"{s}\"),\n ValueType::Table(c) => write!(f, \"{c}\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct FieldSchema {\n /// ID is used to identify the field in the schema.\n pub name: FieldName,\n\n #[serde(flatten)]\n pub value_type: EnrichedValueType,\n}\n\nimpl FieldSchema {\n pub fn new(name: impl ToString, value_type: EnrichedValueType) -> Self {\n Self {\n name: name.to_string(),\n value_type,\n }\n }\n\n pub fn without_attrs(&self) -> Self {\n Self {\n name: self.name.clone(),\n value_type: self.value_type.without_attrs(),\n }\n }\n}\n\nimpl FieldSchema {\n pub fn from_alternative(field: &FieldSchema) -> Result\n where\n for<'a> &'a AltDataType: TryInto,\n {\n Ok(Self {\n name: field.name.clone(),\n value_type: EnrichedValueType::from_alternative(&field.value_type)?,\n })\n }\n}\n\nimpl std::fmt::Display for FieldSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.name, self.value_type)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct CollectorSchema {\n pub fields: Vec,\n /// If specified, the collector will have an automatically generated UUID field with the given index.\n pub auto_uuid_field_idx: Option,\n}\n\nimpl std::fmt::Display for CollectorSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Collector(\")?;\n for (i, field) in self.fields.iter().enumerate() {\n if i > 0 {\n write!(f, \", \")?;\n }\n write!(f, \"{field}\")?;\n }\n write!(f, \")\")\n }\n}\n\nimpl CollectorSchema {\n pub fn from_fields(fields: Vec, auto_uuid_field: Option) -> Self {\n let mut fields = fields;\n let auto_uuid_field_idx = if let Some(auto_uuid_field) = auto_uuid_field {\n fields.insert(\n 0,\n FieldSchema::new(\n auto_uuid_field,\n EnrichedValueType {\n typ: ValueType::Basic(BasicValueType::Uuid),\n nullable: false,\n attrs: Default::default(),\n },\n ),\n );\n Some(0)\n } else {\n None\n };\n Self {\n fields,\n auto_uuid_field_idx,\n }\n }\n pub fn without_attrs(&self) -> Self {\n Self {\n fields: self.fields.iter().map(|f| f.without_attrs()).collect(),\n auto_uuid_field_idx: self.auto_uuid_field_idx,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct OpScopeSchema {\n /// Output schema for ops with output.\n pub op_output_types: HashMap,\n\n /// Child op scope for foreach ops.\n pub op_scopes: HashMap>,\n\n /// Collectors for the current scope.\n pub collectors: Vec>>,\n}\n\n/// Top-level schema for a flow instance.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FlowSchema {\n pub schema: StructSchema,\n\n pub root_op_scope: OpScopeSchema,\n}\n\nimpl std::ops::Deref for FlowSchema {\n type Target = StructSchema;\n\n fn deref(&self) -> &Self::Target {\n &self.schema\n }\n}\n\npub struct OpArgSchema {\n pub name: OpArgName,\n pub value_type: EnrichedValueType,\n pub analyzed_value: AnalyzedValueMapping,\n}\n"], ["/cocoindex/src/ops/factory_bases.rs", "use crate::prelude::*;\nuse crate::setup::ResourceSetupStatus;\nuse std::fmt::Debug;\nuse std::hash::Hash;\n\nuse super::interface::*;\nuse super::registry::*;\nuse crate::api_bail;\nuse crate::api_error;\nuse crate::base::schema::*;\nuse crate::base::spec::*;\nuse crate::builder::plan::AnalyzedValueMapping;\nuse crate::setup;\n// SourceFactoryBase\npub struct ResolvedOpArg {\n pub name: String,\n pub typ: EnrichedValueType,\n pub idx: usize,\n}\n\npub trait ResolvedOpArgExt: Sized {\n fn expect_type(self, expected_type: &ValueType) -> Result;\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value>;\n fn take_value(&self, args: &mut [value::Value]) -> Result;\n}\n\nimpl ResolvedOpArgExt for ResolvedOpArg {\n fn expect_type(self, expected_type: &ValueType) -> Result {\n if &self.typ.typ != expected_type {\n api_bail!(\n \"Expected argument `{}` to be of type `{}`, got `{}`\",\n self.name,\n expected_type,\n self.typ.typ\n );\n }\n Ok(self)\n }\n\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value> {\n if self.idx >= args.len() {\n api_bail!(\n \"Two few arguments, {} provided, expected at least {} for `{}`\",\n args.len(),\n self.idx + 1,\n self.name\n );\n }\n Ok(&args[self.idx])\n }\n\n fn take_value(&self, args: &mut [value::Value]) -> Result {\n if self.idx >= args.len() {\n api_bail!(\n \"Two few arguments, {} provided, expected at least {} for `{}`\",\n args.len(),\n self.idx + 1,\n self.name\n );\n }\n Ok(std::mem::take(&mut args[self.idx]))\n }\n}\n\nimpl ResolvedOpArgExt for Option {\n fn expect_type(self, expected_type: &ValueType) -> Result {\n self.map(|arg| arg.expect_type(expected_type)).transpose()\n }\n\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value> {\n Ok(self\n .as_ref()\n .map(|arg| arg.value(args))\n .transpose()?\n .unwrap_or(&value::Value::Null))\n }\n\n fn take_value(&self, args: &mut [value::Value]) -> Result {\n Ok(self\n .as_ref()\n .map(|arg| arg.take_value(args))\n .transpose()?\n .unwrap_or(value::Value::Null))\n }\n}\n\npub struct OpArgsResolver<'a> {\n args: &'a [OpArgSchema],\n num_positional_args: usize,\n next_positional_idx: usize,\n remaining_kwargs: HashMap<&'a str, usize>,\n}\n\nimpl<'a> OpArgsResolver<'a> {\n pub fn new(args: &'a [OpArgSchema]) -> Result {\n let mut num_positional_args = 0;\n let mut kwargs = HashMap::new();\n for (idx, arg) in args.iter().enumerate() {\n if let Some(name) = &arg.name.0 {\n kwargs.insert(name.as_str(), idx);\n } else {\n if !kwargs.is_empty() {\n api_bail!(\"Positional arguments must be provided before keyword arguments\");\n }\n num_positional_args += 1;\n }\n }\n Ok(Self {\n args,\n num_positional_args,\n next_positional_idx: 0,\n remaining_kwargs: kwargs,\n })\n }\n\n pub fn next_optional_arg(&mut self, name: &str) -> Result> {\n let idx = if let Some(idx) = self.remaining_kwargs.remove(name) {\n if self.next_positional_idx < self.num_positional_args {\n api_bail!(\"`{name}` is provided as both positional and keyword arguments\");\n } else {\n Some(idx)\n }\n } else if self.next_positional_idx < self.num_positional_args {\n let idx = self.next_positional_idx;\n self.next_positional_idx += 1;\n Some(idx)\n } else {\n None\n };\n Ok(idx.map(|idx| ResolvedOpArg {\n name: name.to_string(),\n typ: self.args[idx].value_type.clone(),\n idx,\n }))\n }\n\n pub fn next_arg(&mut self, name: &str) -> Result {\n Ok(self\n .next_optional_arg(name)?\n .ok_or_else(|| api_error!(\"Required argument `{name}` is missing\",))?)\n }\n\n pub fn done(self) -> Result<()> {\n if self.next_positional_idx < self.num_positional_args {\n api_bail!(\n \"Expected {} positional arguments, got {}\",\n self.next_positional_idx,\n self.num_positional_args\n );\n }\n if !self.remaining_kwargs.is_empty() {\n api_bail!(\n \"Unexpected keyword arguments: {}\",\n self.remaining_kwargs\n .keys()\n .map(|k| format!(\"`{k}`\"))\n .collect::>()\n .join(\", \")\n )\n }\n Ok(())\n }\n\n pub fn get_analyze_value(&self, resolved_arg: &ResolvedOpArg) -> &AnalyzedValueMapping {\n &self.args[resolved_arg.idx].analyzed_value\n }\n}\n\n#[async_trait]\npub trait SourceFactoryBase: SourceFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n\n fn name(&self) -> &str;\n\n async fn get_output_schema(\n &self,\n spec: &Self::Spec,\n context: &FlowInstanceContext,\n ) -> Result;\n\n async fn build_executor(\n self: Arc,\n spec: Self::Spec,\n context: Arc,\n ) -> Result>;\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::Source(Arc::new(self)),\n )\n }\n}\n\n#[async_trait]\nimpl SourceFactory for T {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let spec: T::Spec = serde_json::from_value(spec)?;\n let output_schema = self.get_output_schema(&spec, &context).await?;\n let executor = self.build_executor(spec, context);\n Ok((output_schema, executor))\n }\n}\n\n// SimpleFunctionFactoryBase\n\n#[async_trait]\npub trait SimpleFunctionFactoryBase: SimpleFunctionFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n type ResolvedArgs: Send + Sync;\n\n fn name(&self) -> &str;\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Self::Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n context: &FlowInstanceContext,\n ) -> Result<(Self::ResolvedArgs, EnrichedValueType)>;\n\n async fn build_executor(\n self: Arc,\n spec: Self::Spec,\n resolved_input_schema: Self::ResolvedArgs,\n context: Arc,\n ) -> Result>;\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::SimpleFunction(Arc::new(self)),\n )\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionFactory for T {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let spec: T::Spec = serde_json::from_value(spec)?;\n let mut args_resolver = OpArgsResolver::new(&input_schema)?;\n let (resolved_input_schema, output_schema) = self\n .resolve_schema(&spec, &mut args_resolver, &context)\n .await?;\n args_resolver.done()?;\n let executor = self.build_executor(spec, resolved_input_schema, context);\n Ok((output_schema, executor))\n }\n}\n\npub struct TypedExportDataCollectionBuildOutput {\n pub export_context: BoxFuture<'static, Result>>,\n pub setup_key: F::Key,\n pub desired_setup_state: F::SetupState,\n}\npub struct TypedExportDataCollectionSpec {\n pub name: String,\n pub spec: F::Spec,\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n pub index_options: IndexOptions,\n}\n\npub struct TypedResourceSetupChangeItem<'a, F: StorageFactoryBase + ?Sized> {\n pub key: F::Key,\n pub setup_status: &'a F::SetupStatus,\n}\n\n#[async_trait]\npub trait StorageFactoryBase: ExportTargetFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n type DeclarationSpec: DeserializeOwned + Send + Sync;\n type Key: Debug + Clone + Serialize + DeserializeOwned + Eq + Hash + Send + Sync;\n type SetupState: Debug + Clone + Serialize + DeserializeOwned + Send + Sync;\n type SetupStatus: ResourceSetupStatus;\n type ExportContext: Send + Sync + 'static;\n\n fn name(&self) -> &str;\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(Self::Key, Self::SetupState)>,\n )>;\n\n /// Deserialize the setup key from a JSON value.\n /// You can override this method to provide a custom deserialization logic, e.g. to perform backward compatible deserialization.\n fn deserialize_setup_key(key: serde_json::Value) -> Result {\n Ok(serde_json::from_value(key)?)\n }\n\n /// Will not be called if it's setup by user.\n /// It returns an error if the target only supports setup by user.\n async fn check_setup_status(\n &self,\n key: Self::Key,\n desired_state: Option,\n existing_states: setup::CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result;\n\n fn check_state_compatibility(\n &self,\n desired_state: &Self::SetupState,\n existing_state: &Self::SetupState,\n ) -> Result;\n\n fn describe_resource(&self, key: &Self::Key) -> Result;\n\n fn extract_additional_key(\n &self,\n _key: &value::KeyValue,\n _value: &value::FieldValues,\n _export_context: &Self::ExportContext,\n ) -> Result {\n Ok(serde_json::Value::Null)\n }\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::ExportTarget(Arc::new(self)),\n )\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()>;\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()>;\n}\n\n#[async_trait]\nimpl ExportTargetFactory for T {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )> {\n let (data_coll_output, decl_output) = StorageFactoryBase::build(\n self,\n data_collections\n .into_iter()\n .map(|d| {\n anyhow::Ok(TypedExportDataCollectionSpec {\n name: d.name,\n spec: serde_json::from_value(d.spec)?,\n key_fields_schema: d.key_fields_schema,\n value_fields_schema: d.value_fields_schema,\n index_options: d.index_options,\n })\n })\n .collect::>>()?,\n declarations\n .into_iter()\n .map(|d| anyhow::Ok(serde_json::from_value(d)?))\n .collect::>>()?,\n context,\n )\n .await?;\n\n let data_coll_output = data_coll_output\n .into_iter()\n .map(|d| {\n Ok(interface::ExportDataCollectionBuildOutput {\n export_context: async move {\n Ok(d.export_context.await? as Arc)\n }\n .boxed(),\n setup_key: serde_json::to_value(d.setup_key)?,\n desired_setup_state: serde_json::to_value(d.desired_setup_state)?,\n })\n })\n .collect::>>()?;\n let decl_output = decl_output\n .into_iter()\n .map(|(key, state)| Ok((serde_json::to_value(key)?, serde_json::to_value(state)?)))\n .collect::>>()?;\n Ok((data_coll_output, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result> {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n let desired_state: Option = desired_state\n .map(|v| serde_json::from_value(v.clone()))\n .transpose()?;\n let existing_states = from_json_combined_state(existing_states)?;\n let setup_status = StorageFactoryBase::check_setup_status(\n self,\n key,\n desired_state,\n existing_states,\n flow_instance_ctx,\n )\n .await?;\n Ok(Box::new(setup_status))\n }\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n StorageFactoryBase::describe_resource(self, &key)\n }\n\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n Ok(serde_json::to_value(key)?)\n }\n\n fn check_state_compatibility(\n &self,\n desired_state: &serde_json::Value,\n existing_state: &serde_json::Value,\n ) -> Result {\n let result = StorageFactoryBase::check_state_compatibility(\n self,\n &serde_json::from_value(desired_state.clone())?,\n &serde_json::from_value(existing_state.clone())?,\n )?;\n Ok(result)\n }\n\n fn extract_additional_key(\n &self,\n key: &value::KeyValue,\n value: &value::FieldValues,\n export_context: &(dyn Any + Send + Sync),\n ) -> Result {\n StorageFactoryBase::extract_additional_key(\n self,\n key,\n value,\n export_context\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n )\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mutations = mutations\n .into_iter()\n .map(|m| {\n anyhow::Ok(ExportTargetMutationWithContext {\n mutation: m.mutation,\n export_context: m\n .export_context\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n })\n })\n .collect::>()?;\n StorageFactoryBase::apply_mutation(self, mutations).await\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n StorageFactoryBase::apply_setup_changes(\n self,\n setup_status\n .into_iter()\n .map(|item| -> anyhow::Result<_> {\n Ok(TypedResourceSetupChangeItem {\n key: serde_json::from_value(item.key.clone())?,\n setup_status: (item.setup_status as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n })\n })\n .collect::>>()?,\n context,\n )\n .await\n }\n}\nfn from_json_combined_state(\n existing_states: setup::CombinedState,\n) -> Result> {\n Ok(setup::CombinedState {\n current: existing_states\n .current\n .map(|v| serde_json::from_value(v))\n .transpose()?,\n staging: existing_states\n .staging\n .into_iter()\n .map(|v| {\n anyhow::Ok(match v {\n setup::StateChange::Upsert(v) => {\n setup::StateChange::Upsert(serde_json::from_value(v)?)\n }\n setup::StateChange::Delete => setup::StateChange::Delete,\n })\n })\n .collect::>()?,\n legacy_state_key: existing_states.legacy_state_key,\n })\n}\n"], ["/cocoindex/src/ops/targets/kuzu.rs", "use chrono::TimeDelta;\nuse serde_json::json;\n\nuse std::fmt::Write;\n\nuse super::shared::property_graph::GraphElementMapping;\nuse super::shared::property_graph::*;\nuse super::shared::table_columns::{\n TableColumnsSchema, TableMainSetupAction, TableUpsertionAction, check_table_compatibility,\n};\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::prelude::*;\n\nuse crate::setup::SetupChangeType;\nuse crate::{ops::sdk::*, setup::CombinedState};\n\nconst SELF_CONTAINED_TAG_FIELD_NAME: &str = \"__self_contained\";\n\n////////////////////////////////////////////////////////////\n// Public Types\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n /// The URL of the [Kuzu API server](https://kuzu.com/docs/api/server/overview),\n /// e.g. `http://localhost:8000`.\n api_server_url: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n connection: spec::AuthEntryReference,\n mapping: GraphElementMapping,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Declaration {\n connection: spec::AuthEntryReference,\n #[serde(flatten)]\n decl: GraphDeclaration,\n}\n\n////////////////////////////////////////////////////////////\n// Utils to deal with Kuzu\n////////////////////////////////////////////////////////////\n\nstruct CypherBuilder {\n query: String,\n}\n\nimpl CypherBuilder {\n fn new() -> Self {\n Self {\n query: String::new(),\n }\n }\n\n fn query_mut(&mut self) -> &mut String {\n &mut self.query\n }\n}\n\nstruct KuzuThinClient {\n reqwest_client: reqwest::Client,\n query_url: String,\n}\n\nimpl KuzuThinClient {\n fn new(conn_spec: &ConnectionSpec, reqwest_client: reqwest::Client) -> Self {\n Self {\n reqwest_client,\n query_url: format!(\"{}/cypher\", conn_spec.api_server_url.trim_end_matches('/')),\n }\n }\n\n async fn run_cypher(&self, cyper_builder: CypherBuilder) -> Result<()> {\n if cyper_builder.query.is_empty() {\n return Ok(());\n }\n let query = json!({\n \"query\": cyper_builder.query\n });\n let response = self\n .reqwest_client\n .post(&self.query_url)\n .json(&query)\n .send()\n .await?;\n if !response.status().is_success() {\n return Err(anyhow::anyhow!(\n \"Failed to run cypher: {}\",\n response.text().await?\n ));\n }\n Ok(())\n }\n}\n\nfn kuzu_table_type(elem_type: &ElementType) -> &'static str {\n match elem_type {\n ElementType::Node(_) => \"NODE\",\n ElementType::Relationship(_) => \"REL\",\n }\n}\n\nfn basic_type_to_kuzu(basic_type: &BasicValueType) -> Result {\n Ok(match basic_type {\n BasicValueType::Bytes => \"BLOB\".to_string(),\n BasicValueType::Str => \"STRING\".to_string(),\n BasicValueType::Bool => \"BOOL\".to_string(),\n BasicValueType::Int64 => \"INT64\".to_string(),\n BasicValueType::Float32 => \"FLOAT\".to_string(),\n BasicValueType::Float64 => \"DOUBLE\".to_string(),\n BasicValueType::Range => \"UINT64[2]\".to_string(),\n BasicValueType::Uuid => \"UUID\".to_string(),\n BasicValueType::Date => \"DATE\".to_string(),\n BasicValueType::LocalDateTime => \"TIMESTAMP\".to_string(),\n BasicValueType::OffsetDateTime => \"TIMESTAMP\".to_string(),\n BasicValueType::TimeDelta => \"INTERVAL\".to_string(),\n BasicValueType::Vector(t) => format!(\n \"{}[{}]\",\n basic_type_to_kuzu(&t.element_type)?,\n t.dimension\n .map_or_else(|| \"\".to_string(), |d| d.to_string())\n ),\n t @ (BasicValueType::Union(_) | BasicValueType::Time | BasicValueType::Json) => {\n api_bail!(\"{t} is not supported in Kuzu\")\n }\n })\n}\n\nfn struct_schema_to_kuzu(struct_schema: &StructSchema) -> Result {\n Ok(format!(\n \"STRUCT({})\",\n struct_schema\n .fields\n .iter()\n .map(|f| Ok(format!(\n \"{} {}\",\n f.name,\n value_type_to_kuzu(&f.value_type.typ)?\n )))\n .collect::>>()?\n .join(\", \")\n ))\n}\n\nfn value_type_to_kuzu(value_type: &ValueType) -> Result {\n Ok(match value_type {\n ValueType::Basic(basic_type) => basic_type_to_kuzu(basic_type)?,\n ValueType::Struct(struct_type) => struct_schema_to_kuzu(struct_type)?,\n ValueType::Table(table_type) => format!(\"{}[]\", struct_schema_to_kuzu(&table_type.row)?),\n })\n}\n\n////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]\nstruct ReferencedNodeTable {\n table_name: String,\n\n #[serde(with = \"indexmap::map::serde_seq\")]\n key_columns: IndexMap,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\nstruct SetupState {\n schema: TableColumnsSchema,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n referenced_node_tables: Option<(ReferencedNodeTable, ReferencedNodeTable)>,\n}\n\nimpl<'a> From<&'a SetupState> for Cow<'a, TableColumnsSchema> {\n fn from(val: &'a SetupState) -> Self {\n Cow::Borrowed(&val.schema)\n }\n}\n\n#[derive(Debug)]\nstruct GraphElementDataSetupStatus {\n actions: TableMainSetupAction,\n referenced_node_tables: Option<(String, String)>,\n drop_affected_referenced_node_tables: IndexSet,\n}\n\nimpl setup::ResourceSetupStatus for GraphElementDataSetupStatus {\n fn describe_changes(&self) -> Vec {\n self.actions.describe_changes()\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.actions.change_type(false)\n }\n}\n\nfn append_drop_table(\n cypher: &mut CypherBuilder,\n setup_status: &GraphElementDataSetupStatus,\n elem_type: &ElementType,\n) -> Result<()> {\n if !setup_status.actions.drop_existing {\n return Ok(());\n }\n writeln!(\n cypher.query_mut(),\n \"DROP TABLE IF EXISTS {};\",\n elem_type.label()\n )?;\n Ok(())\n}\n\nfn append_delete_orphaned_nodes(cypher: &mut CypherBuilder, node_table: &str) -> Result<()> {\n writeln!(\n cypher.query_mut(),\n \"MATCH (n:{node_table}) WITH n WHERE NOT (n)--() DELETE n;\"\n )?;\n Ok(())\n}\n\nfn append_upsert_table(\n cypher: &mut CypherBuilder,\n setup_status: &GraphElementDataSetupStatus,\n elem_type: &ElementType,\n) -> Result<()> {\n let table_upsertion = if let Some(table_upsertion) = &setup_status.actions.table_upsertion {\n table_upsertion\n } else {\n return Ok(());\n };\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n write!(\n cypher.query_mut(),\n \"CREATE {kuzu_table_type} TABLE IF NOT EXISTS {table_name} (\",\n kuzu_table_type = kuzu_table_type(elem_type),\n table_name = elem_type.label(),\n )?;\n if let Some((src, tgt)) = &setup_status.referenced_node_tables {\n write!(cypher.query_mut(), \"FROM {src} TO {tgt}, \")?;\n }\n cypher.query_mut().push_str(\n keys.iter()\n .chain(values.iter())\n .map(|(name, kuzu_type)| format!(\"{name} {kuzu_type}\"))\n .join(\", \")\n .as_str(),\n );\n match elem_type {\n ElementType::Node(_) => {\n write!(\n cypher.query_mut(),\n \", {SELF_CONTAINED_TAG_FIELD_NAME} BOOL, PRIMARY KEY ({})\",\n keys.iter().map(|(name, _)| name).join(\", \")\n )?;\n }\n ElementType::Relationship(_) => {}\n }\n write!(cypher.query_mut(), \");\\n\\n\")?;\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n let table_name = elem_type.label();\n for name in columns_to_delete\n .iter()\n .chain(columns_to_upsert.iter().map(|(name, _)| name))\n {\n writeln!(\n cypher.query_mut(),\n \"ALTER TABLE {table_name} DROP IF EXISTS {name};\"\n )?;\n }\n for (name, kuzu_type) in columns_to_upsert.iter() {\n writeln!(\n cypher.query_mut(),\n \"ALTER TABLE {table_name} ADD {name} {kuzu_type};\",\n )?;\n }\n }\n }\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Utils to convert value to Kuzu literals\n////////////////////////////////////////////////////////////\n\nfn append_string_literal(cypher: &mut CypherBuilder, s: &str) -> Result<()> {\n let out = cypher.query_mut();\n out.push('\"');\n for c in s.chars() {\n match c {\n '\\\\' => out.push_str(\"\\\\\\\\\"),\n '\"' => out.push_str(\"\\\\\\\"\"),\n // Control characters (0x00..=0x1F)\n c if (c as u32) < 0x20 => write!(out, \"\\\\u{:04X}\", c as u32)?,\n // BMP Unicode\n c if (c as u32) <= 0xFFFF => out.push(c),\n // Non-BMP Unicode: Encode as surrogate pairs for Cypher \\uXXXX\\uXXXX\n c => {\n let code = c as u32;\n let high = 0xD800 + ((code - 0x10000) >> 10);\n let low = 0xDC00 + ((code - 0x10000) & 0x3FF);\n write!(out, \"\\\\u{high:04X}\\\\u{low:04X}\")?;\n }\n }\n }\n out.push('\"');\n Ok(())\n}\n\nfn append_basic_value(cypher: &mut CypherBuilder, basic_value: &BasicValue) -> Result<()> {\n match basic_value {\n BasicValue::Bytes(bytes) => {\n write!(cypher.query_mut(), \"BLOB(\")?;\n for byte in bytes {\n write!(cypher.query_mut(), \"\\\\\\\\x{byte:02X}\")?;\n }\n write!(cypher.query_mut(), \")\")?;\n }\n BasicValue::Str(s) => {\n append_string_literal(cypher, s)?;\n }\n BasicValue::Bool(b) => {\n write!(cypher.query_mut(), \"{b}\")?;\n }\n BasicValue::Int64(i) => {\n write!(cypher.query_mut(), \"{i}\")?;\n }\n BasicValue::Float32(f) => {\n write!(cypher.query_mut(), \"{f}\")?;\n }\n BasicValue::Float64(f) => {\n write!(cypher.query_mut(), \"{f}\")?;\n }\n BasicValue::Range(r) => {\n write!(cypher.query_mut(), \"[{}, {}]\", r.start, r.end)?;\n }\n BasicValue::Uuid(u) => {\n write!(cypher.query_mut(), \"UUID(\\\"{u}\\\")\")?;\n }\n BasicValue::Date(d) => {\n write!(cypher.query_mut(), \"DATE(\\\"{d}\\\")\")?;\n }\n BasicValue::LocalDateTime(dt) => write!(cypher.query_mut(), \"TIMESTAMP(\\\"{dt}\\\")\")?,\n BasicValue::OffsetDateTime(dt) => write!(cypher.query_mut(), \"TIMESTAMP(\\\"{dt}\\\")\")?,\n BasicValue::TimeDelta(td) => {\n let num_days = td.num_days();\n let sub_day_duration = *td - TimeDelta::days(num_days);\n write!(cypher.query_mut(), \"INTERVAL(\\\"\")?;\n if num_days != 0 {\n write!(cypher.query_mut(), \"{num_days} days \")?;\n }\n let microseconds = sub_day_duration\n .num_microseconds()\n .ok_or_else(invariance_violation)?;\n write!(cypher.query_mut(), \"{microseconds} microseconds\\\")\")?;\n }\n BasicValue::Vector(v) => {\n write!(cypher.query_mut(), \"[\")?;\n let mut prefix = \"\";\n for elem in v.iter() {\n cypher.query_mut().push_str(prefix);\n append_basic_value(cypher, elem)?;\n prefix = \", \";\n }\n write!(cypher.query_mut(), \"]\")?;\n }\n v @ (BasicValue::UnionVariant { .. } | BasicValue::Time(_) | BasicValue::Json(_)) => {\n bail!(\"value types are not supported in Kuzu: {}\", v.kind());\n }\n }\n Ok(())\n}\n\nfn append_struct_fields<'a>(\n cypher: &'a mut CypherBuilder,\n field_schema: &[schema::FieldSchema],\n field_values: impl Iterator,\n) -> Result<()> {\n let mut prefix = \"\";\n for (f, v) in std::iter::zip(field_schema.iter(), field_values) {\n write!(cypher.query_mut(), \"{prefix}{}: \", f.name)?;\n append_value(cypher, &f.value_type.typ, v)?;\n prefix = \", \";\n }\n Ok(())\n}\n\nfn append_value(\n cypher: &mut CypherBuilder,\n typ: &schema::ValueType,\n value: &value::Value,\n) -> Result<()> {\n match value {\n value::Value::Null => {\n write!(cypher.query_mut(), \"NULL\")?;\n }\n value::Value::Basic(basic_value) => append_basic_value(cypher, basic_value)?,\n value::Value::Struct(struct_value) => {\n let struct_schema = match typ {\n schema::ValueType::Struct(struct_schema) => struct_schema,\n _ => {\n api_bail!(\"Expected struct type, got {}\", typ);\n }\n };\n cypher.query_mut().push('{');\n append_struct_fields(cypher, &struct_schema.fields, struct_value.fields.iter())?;\n cypher.query_mut().push('}');\n }\n value::Value::KTable(map) => {\n let row_schema = match typ {\n schema::ValueType::Table(table_schema) => &table_schema.row,\n _ => {\n api_bail!(\"Expected table type, got {}\", typ);\n }\n };\n cypher.query_mut().push('[');\n let mut prefix = \"\";\n for (k, v) in map.iter() {\n let key_value = value::Value::from(k);\n cypher.query_mut().push_str(prefix);\n cypher.query_mut().push('{');\n append_struct_fields(\n cypher,\n &row_schema.fields,\n std::iter::once(&key_value).chain(v.fields.iter()),\n )?;\n cypher.query_mut().push('}');\n prefix = \", \";\n }\n cypher.query_mut().push(']');\n }\n value::Value::LTable(rows) | value::Value::UTable(rows) => {\n let row_schema = match typ {\n schema::ValueType::Table(table_schema) => &table_schema.row,\n _ => {\n api_bail!(\"Expected table type, got {}\", typ);\n }\n };\n cypher.query_mut().push('[');\n let mut prefix = \"\";\n for v in rows.iter() {\n cypher.query_mut().push_str(prefix);\n cypher.query_mut().push('{');\n append_struct_fields(cypher, &row_schema.fields, v.fields.iter())?;\n cypher.query_mut().push('}');\n prefix = \", \";\n }\n cypher.query_mut().push(']');\n }\n }\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Deal with mutations\n////////////////////////////////////////////////////////////\n\nstruct ExportContext {\n conn_ref: AuthEntryReference,\n kuzu_client: KuzuThinClient,\n analyzed_data_coll: AnalyzedDataCollection,\n}\n\nfn append_key_pattern<'a>(\n cypher: &'a mut CypherBuilder,\n key_fields: &'a [FieldSchema],\n values: impl Iterator>,\n) -> Result<()> {\n write!(cypher.query_mut(), \"{{\")?;\n let mut prefix = \"\";\n for (f, v) in std::iter::zip(key_fields.iter(), values) {\n write!(cypher.query_mut(), \"{prefix}{}: \", f.name)?;\n append_value(cypher, &f.value_type.typ, v.as_ref())?;\n prefix = \", \";\n }\n write!(cypher.query_mut(), \"}}\")?;\n Ok(())\n}\n\nfn append_set_value_fields(\n cypher: &mut CypherBuilder,\n var_name: &str,\n value_fields: &[FieldSchema],\n value_fields_idx: &[usize],\n upsert_entry: &ExportTargetUpsertEntry,\n set_self_contained_tag: bool,\n) -> Result<()> {\n let mut prefix = \" SET \";\n if set_self_contained_tag {\n write!(\n cypher.query_mut(),\n \"{prefix}{var_name}.{SELF_CONTAINED_TAG_FIELD_NAME} = TRUE\"\n )?;\n prefix = \", \";\n }\n for (value_field, value_idx) in std::iter::zip(value_fields.iter(), value_fields_idx.iter()) {\n let field_name = &value_field.name;\n write!(cypher.query_mut(), \"{prefix}{var_name}.{field_name}=\")?;\n append_value(\n cypher,\n &value_field.value_type.typ,\n &upsert_entry.value.fields[*value_idx],\n )?;\n prefix = \", \";\n }\n Ok(())\n}\n\nfn append_upsert_node(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n {\n write!(\n cypher.query_mut(),\n \"MERGE ({NODE_VAR_NAME}:{label} \",\n label = data_coll.schema.elem_type.label(),\n )?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n upsert_entry\n .key\n .fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n write!(cypher.query_mut(), \")\")?;\n }\n append_set_value_fields(\n cypher,\n NODE_VAR_NAME,\n &data_coll.schema.value_fields,\n &data_coll.value_fields_input_idx,\n upsert_entry,\n true,\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_merge_node_for_rel(\n cypher: &mut CypherBuilder,\n var_name: &str,\n field_mapping: &AnalyzedGraphElementFieldMapping,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n {\n write!(\n cypher.query_mut(),\n \"MERGE ({var_name}:{label} \",\n label = field_mapping.schema.elem_type.label(),\n )?;\n append_key_pattern(\n cypher,\n &field_mapping.schema.key_fields,\n field_mapping\n .fields_input_idx\n .key\n .iter()\n .map(|idx| Cow::Borrowed(&upsert_entry.value.fields[*idx])),\n )?;\n write!(cypher.query_mut(), \")\")?;\n }\n append_set_value_fields(\n cypher,\n var_name,\n &field_mapping.schema.value_fields,\n &field_mapping.fields_input_idx.value,\n upsert_entry,\n false,\n )?;\n writeln!(cypher.query_mut())?;\n Ok(())\n}\n\nfn append_upsert_rel(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n const REL_VAR_NAME: &str = \"r\";\n const SRC_NODE_VAR_NAME: &str = \"s\";\n const TGT_NODE_VAR_NAME: &str = \"t\";\n\n let rel_info = if let Some(rel_info) = &data_coll.rel {\n rel_info\n } else {\n return Ok(());\n };\n append_merge_node_for_rel(cypher, SRC_NODE_VAR_NAME, &rel_info.source, upsert_entry)?;\n append_merge_node_for_rel(cypher, TGT_NODE_VAR_NAME, &rel_info.target, upsert_entry)?;\n {\n let rel_type = data_coll.schema.elem_type.label();\n write!(\n cypher.query_mut(),\n \"MERGE ({SRC_NODE_VAR_NAME})-[{REL_VAR_NAME}:{rel_type} \"\n )?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n upsert_entry\n .key\n .fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n write!(cypher.query_mut(), \"]->({TGT_NODE_VAR_NAME})\")?;\n }\n append_set_value_fields(\n cypher,\n REL_VAR_NAME,\n &data_coll.schema.value_fields,\n &data_coll.value_fields_input_idx,\n upsert_entry,\n false,\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_delete_node(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n key: &KeyValue,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n let node_label = data_coll.schema.elem_type.label();\n write!(cypher.query_mut(), \"MATCH ({NODE_VAR_NAME}:{node_label} \")?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n key.fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n writeln!(cypher.query_mut(), \")\")?;\n writeln!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} SET {NODE_VAR_NAME}.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\"\n )?;\n writeln!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} WHERE NOT ({NODE_VAR_NAME})--() DELETE {NODE_VAR_NAME}\"\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_delete_rel(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n key: &KeyValue,\n src_node_key: &KeyValue,\n tgt_node_key: &KeyValue,\n) -> Result<()> {\n const REL_VAR_NAME: &str = \"r\";\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n let rel_type = data_coll.schema.elem_type.label();\n\n write!(\n cypher.query_mut(),\n \"MATCH (:{label} \",\n label = rel.source.schema.elem_type.label()\n )?;\n let src_key_schema = &rel.source.schema.key_fields;\n append_key_pattern(\n cypher,\n src_key_schema,\n src_node_key\n .fields_iter(src_key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n\n write!(cypher.query_mut(), \")-[{REL_VAR_NAME}:{rel_type} \")?;\n let key_schema = &data_coll.schema.key_fields;\n append_key_pattern(\n cypher,\n key_schema,\n key.fields_iter(key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n\n write!(\n cypher.query_mut(),\n \"]->(:{label} \",\n label = rel.target.schema.elem_type.label()\n )?;\n let tgt_key_schema = &rel.target.schema.key_fields;\n append_key_pattern(\n cypher,\n tgt_key_schema,\n tgt_node_key\n .fields_iter(tgt_key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n write!(cypher.query_mut(), \") DELETE {REL_VAR_NAME}\")?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_maybe_gc_node(\n cypher: &mut CypherBuilder,\n schema: &GraphElementSchema,\n key: &KeyValue,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n let node_label = schema.elem_type.label();\n write!(cypher.query_mut(), \"MATCH ({NODE_VAR_NAME}:{node_label} \")?;\n append_key_pattern(\n cypher,\n &schema.key_fields,\n key.fields_iter(schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n writeln!(cypher.query_mut(), \")\")?;\n write!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} WHERE NOT ({NODE_VAR_NAME})--() DELETE {NODE_VAR_NAME}\"\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Factory implementation\n////////////////////////////////////////////////////////////\n\ntype KuzuGraphElement = GraphElementType;\n\nstruct Factory {\n reqwest_client: reqwest::Client,\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = Declaration;\n type SetupState = SetupState;\n type SetupStatus = GraphElementDataSetupStatus;\n\n type Key = KuzuGraphElement;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Kuzu\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(KuzuGraphElement, SetupState)>,\n )> {\n let (analyzed_data_colls, declared_graph_elements) = analyze_graph_mappings(\n data_collections\n .iter()\n .map(|d| DataCollectionGraphMappingInput {\n auth_ref: &d.spec.connection,\n mapping: &d.spec.mapping,\n index_options: &d.index_options,\n key_fields_schema: d.key_fields_schema.clone(),\n value_fields_schema: d.value_fields_schema.clone(),\n }),\n declarations.iter().map(|d| (&d.connection, &d.decl)),\n )?;\n fn to_kuzu_cols(fields: &[FieldSchema]) -> Result> {\n fields\n .iter()\n .map(|f| Ok((f.name.clone(), value_type_to_kuzu(&f.value_type.typ)?)))\n .collect::>>()\n }\n let data_coll_outputs: Vec> =\n std::iter::zip(data_collections, analyzed_data_colls.into_iter())\n .map(|(data_coll, analyzed)| {\n fn to_dep_table(\n field_mapping: &AnalyzedGraphElementFieldMapping,\n ) -> Result {\n Ok(ReferencedNodeTable {\n table_name: field_mapping.schema.elem_type.label().to_string(),\n key_columns: to_kuzu_cols(&field_mapping.schema.key_fields)?,\n })\n }\n let setup_key = KuzuGraphElement {\n connection: data_coll.spec.connection.clone(),\n typ: analyzed.schema.elem_type.clone(),\n };\n let desired_setup_state = SetupState {\n schema: TableColumnsSchema {\n key_columns: to_kuzu_cols(&analyzed.schema.key_fields)?,\n value_columns: to_kuzu_cols(&analyzed.schema.value_fields)?,\n },\n referenced_node_tables: (analyzed.rel.as_ref())\n .map(|rel| {\n anyhow::Ok((to_dep_table(&rel.source)?, to_dep_table(&rel.target)?))\n })\n .transpose()?,\n };\n\n let export_context = ExportContext {\n conn_ref: data_coll.spec.connection.clone(),\n kuzu_client: KuzuThinClient::new(\n &context\n .auth_registry\n .get::(&data_coll.spec.connection)?,\n self.reqwest_client.clone(),\n ),\n analyzed_data_coll: analyzed,\n };\n Ok(TypedExportDataCollectionBuildOutput {\n export_context: async move { Ok(Arc::new(export_context)) }.boxed(),\n setup_key,\n desired_setup_state,\n })\n })\n .collect::>()?;\n let decl_output = std::iter::zip(declarations, declared_graph_elements)\n .map(|(decl, graph_elem_schema)| {\n let setup_state = SetupState {\n schema: TableColumnsSchema {\n key_columns: to_kuzu_cols(&graph_elem_schema.key_fields)?,\n value_columns: to_kuzu_cols(&graph_elem_schema.value_fields)?,\n },\n referenced_node_tables: None,\n };\n let setup_key = GraphElementType {\n connection: decl.connection,\n typ: graph_elem_schema.elem_type.clone(),\n };\n Ok((setup_key, setup_state))\n })\n .collect::>()?;\n Ok((data_coll_outputs, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n _key: KuzuGraphElement,\n desired: Option,\n existing: CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n let existing_invalidated = desired.as_ref().is_some_and(|desired| {\n existing\n .possible_versions()\n .any(|v| v.referenced_node_tables != desired.referenced_node_tables)\n });\n let actions =\n TableMainSetupAction::from_states(desired.as_ref(), &existing, existing_invalidated);\n let drop_affected_referenced_node_tables = if actions.drop_existing {\n existing\n .possible_versions()\n .flat_map(|v| &v.referenced_node_tables)\n .flat_map(|(src, tgt)| [src.table_name.clone(), tgt.table_name.clone()].into_iter())\n .collect()\n } else {\n IndexSet::new()\n };\n Ok(GraphElementDataSetupStatus {\n actions,\n referenced_node_tables: desired\n .and_then(|desired| desired.referenced_node_tables)\n .map(|(src, tgt)| (src.table_name, tgt.table_name)),\n drop_affected_referenced_node_tables,\n })\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(\n if desired.referenced_node_tables != existing.referenced_node_tables {\n SetupStateCompatibility::NotCompatible\n } else {\n check_table_compatibility(&desired.schema, &existing.schema)\n },\n )\n }\n\n fn describe_resource(&self, key: &KuzuGraphElement) -> Result {\n Ok(format!(\n \"Kuzu {} TABLE {}\",\n kuzu_table_type(&key.typ),\n key.typ.label()\n ))\n }\n\n fn extract_additional_key(\n &self,\n _key: &KeyValue,\n value: &FieldValues,\n export_context: &ExportContext,\n ) -> Result {\n let additional_key = if let Some(rel_info) = &export_context.analyzed_data_coll.rel {\n serde_json::to_value((\n (rel_info.source.fields_input_idx).extract_key(&value.fields)?,\n (rel_info.target.fields_input_idx).extract_key(&value.fields)?,\n ))?\n } else {\n serde_json::Value::Null\n };\n Ok(additional_key)\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut mutations_by_conn = IndexMap::new();\n for mutation in mutations.into_iter() {\n mutations_by_conn\n .entry(mutation.export_context.conn_ref.clone())\n .or_insert_with(Vec::new)\n .push(mutation);\n }\n for mutations in mutations_by_conn.into_values() {\n let kuzu_client = &mutations[0].export_context.kuzu_client;\n let mut cypher = CypherBuilder::new();\n writeln!(cypher.query_mut(), \"BEGIN TRANSACTION;\")?;\n\n let (mut rel_mutations, nodes_mutations): (Vec<_>, Vec<_>) = mutations\n .into_iter()\n .partition(|m| m.export_context.analyzed_data_coll.rel.is_some());\n\n struct NodeTableGcInfo {\n schema: Arc,\n keys: IndexSet,\n }\n fn register_gc_node(\n map: &mut IndexMap,\n schema: &Arc,\n key: KeyValue,\n ) {\n map.entry(schema.elem_type.clone())\n .or_insert_with(|| NodeTableGcInfo {\n schema: schema.clone(),\n keys: IndexSet::new(),\n })\n .keys\n .insert(key);\n }\n fn resolve_gc_node(\n map: &mut IndexMap,\n schema: &Arc,\n key: &KeyValue,\n ) {\n map.get_mut(&schema.elem_type)\n .map(|info| info.keys.shift_remove(key));\n }\n let mut gc_info = IndexMap::::new();\n\n // Deletes for relationships\n for rel_mutation in rel_mutations.iter_mut() {\n let data_coll = &rel_mutation.export_context.analyzed_data_coll;\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n for delete in rel_mutation.mutation.deletes.iter_mut() {\n let mut additional_keys = match delete.additional_key.take() {\n serde_json::Value::Array(keys) => keys,\n _ => return Err(invariance_violation()),\n };\n if additional_keys.len() != 2 {\n api_bail!(\n \"Expected additional key with 2 fields, got {}\",\n delete.additional_key\n );\n }\n let src_key = KeyValue::from_json(\n additional_keys[0].take(),\n &rel.source.schema.key_fields,\n )?;\n let tgt_key = KeyValue::from_json(\n additional_keys[1].take(),\n &rel.target.schema.key_fields,\n )?;\n append_delete_rel(&mut cypher, data_coll, &delete.key, &src_key, &tgt_key)?;\n register_gc_node(&mut gc_info, &rel.source.schema, src_key);\n register_gc_node(&mut gc_info, &rel.target.schema, tgt_key);\n }\n }\n\n for node_mutation in nodes_mutations.iter() {\n let data_coll = &node_mutation.export_context.analyzed_data_coll;\n // Deletes for nodes\n for delete in node_mutation.mutation.deletes.iter() {\n append_delete_node(&mut cypher, data_coll, &delete.key)?;\n resolve_gc_node(&mut gc_info, &data_coll.schema, &delete.key);\n }\n\n // Upserts for nodes\n for upsert in node_mutation.mutation.upserts.iter() {\n append_upsert_node(&mut cypher, data_coll, upsert)?;\n resolve_gc_node(&mut gc_info, &data_coll.schema, &upsert.key);\n }\n }\n // Upserts for relationships\n for rel_mutation in rel_mutations.iter() {\n let data_coll = &rel_mutation.export_context.analyzed_data_coll;\n for upsert in rel_mutation.mutation.upserts.iter() {\n append_upsert_rel(&mut cypher, data_coll, upsert)?;\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n resolve_gc_node(\n &mut gc_info,\n &rel.source.schema,\n &(rel.source.fields_input_idx).extract_key(&upsert.value.fields)?,\n );\n resolve_gc_node(\n &mut gc_info,\n &rel.target.schema,\n &(rel.target.fields_input_idx).extract_key(&upsert.value.fields)?,\n );\n }\n }\n\n // GC orphaned nodes\n for info in gc_info.into_values() {\n for key in info.keys {\n append_maybe_gc_node(&mut cypher, &info.schema, &key)?;\n }\n }\n\n writeln!(cypher.query_mut(), \"COMMIT;\")?;\n kuzu_client.run_cypher(cypher).await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n let mut changes_by_conn = IndexMap::new();\n for change in changes.into_iter() {\n changes_by_conn\n .entry(change.key.connection.clone())\n .or_insert_with(Vec::new)\n .push(change);\n }\n for (conn, changes) in changes_by_conn.into_iter() {\n let conn_spec = context.auth_registry.get::(&conn)?;\n let kuzu_client = KuzuThinClient::new(&conn_spec, self.reqwest_client.clone());\n\n let (node_changes, rel_changes): (Vec<_>, Vec<_>) =\n changes.into_iter().partition(|c| match &c.key.typ {\n ElementType::Node(_) => true,\n ElementType::Relationship(_) => false,\n });\n\n let mut partial_affected_node_tables = IndexSet::new();\n let mut cypher = CypherBuilder::new();\n // Relationships first when dropping.\n for change in rel_changes.iter().chain(node_changes.iter()) {\n if !change.setup_status.actions.drop_existing {\n continue;\n }\n append_drop_table(&mut cypher, change.setup_status, &change.key.typ)?;\n\n partial_affected_node_tables.extend(\n change\n .setup_status\n .drop_affected_referenced_node_tables\n .iter(),\n );\n if let ElementType::Node(label) = &change.key.typ {\n partial_affected_node_tables.swap_remove(label);\n }\n }\n // Nodes first when creating.\n for change in node_changes.iter().chain(rel_changes.iter()) {\n append_upsert_table(&mut cypher, change.setup_status, &change.key.typ)?;\n }\n\n for table in partial_affected_node_tables {\n append_delete_orphaned_nodes(&mut cypher, table)?;\n }\n\n kuzu_client.run_cypher(cypher).await?;\n }\n Ok(())\n }\n}\n\npub fn register(\n registry: &mut ExecutorFactoryRegistry,\n reqwest_client: reqwest::Client,\n) -> Result<()> {\n Factory { reqwest_client }.register(registry)\n}\n"], ["/cocoindex/src/ops/targets/qdrant.rs", "use crate::ops::sdk::*;\nuse crate::prelude::*;\n\nuse std::fmt::Display;\n\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::setup;\nuse qdrant_client::Qdrant;\nuse qdrant_client::qdrant::{\n CreateCollectionBuilder, DeletePointsBuilder, DenseVector, Distance, MultiDenseVector,\n MultiVectorComparator, MultiVectorConfigBuilder, NamedVectors, PointId, PointStruct,\n PointsIdsList, UpsertPointsBuilder, Value as QdrantValue, Vector as QdrantVector,\n VectorParamsBuilder, VectorsConfigBuilder,\n};\n\nconst DEFAULT_VECTOR_SIMILARITY_METRIC: spec::VectorSimilarityMetric =\n spec::VectorSimilarityMetric::CosineSimilarity;\nconst DEFAULT_URL: &str = \"http://localhost:6334/\";\n\n////////////////////////////////////////////////////////////\n// Public Types\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n grpc_url: String,\n api_key: Option,\n}\n\n#[derive(Debug, Deserialize, Clone)]\nstruct Spec {\n connection: Option>,\n collection_name: String,\n}\n\n////////////////////////////////////////////////////////////\n// Common\n////////////////////////////////////////////////////////////\n\nstruct FieldInfo {\n field_schema: schema::FieldSchema,\n vector_shape: Option,\n}\n\nenum VectorShape {\n Vector(usize),\n MultiVector(usize),\n}\n\nimpl VectorShape {\n fn vector_size(&self) -> usize {\n match self {\n VectorShape::Vector(size) => *size,\n VectorShape::MultiVector(size) => *size,\n }\n }\n\n fn multi_vector_comparator(&self) -> Option {\n match self {\n VectorShape::MultiVector(_) => Some(MultiVectorComparator::MaxSim),\n _ => None,\n }\n }\n}\n\nfn parse_vector_schema_shape(vector_schema: &schema::VectorTypeSchema) -> Option {\n match &*vector_schema.element_type {\n schema::BasicValueType::Float32\n | schema::BasicValueType::Float64\n | schema::BasicValueType::Int64 => vector_schema.dimension.map(VectorShape::Vector),\n\n schema::BasicValueType::Vector(nested_vector_schema) => {\n match parse_vector_schema_shape(nested_vector_schema) {\n Some(VectorShape::Vector(dim)) => Some(VectorShape::MultiVector(dim)),\n _ => None,\n }\n }\n _ => None,\n }\n}\n\nfn parse_vector_shape(typ: &schema::ValueType) -> Option {\n match typ {\n schema::ValueType::Basic(schema::BasicValueType::Vector(vector_schema)) => {\n parse_vector_schema_shape(vector_schema)\n }\n _ => None,\n }\n}\n\nfn encode_dense_vector(v: &BasicValue) -> Result {\n let vec = match v {\n BasicValue::Vector(v) => v\n .iter()\n .map(|elem| {\n Ok(match elem {\n BasicValue::Float32(f) => *f,\n BasicValue::Float64(f) => *f as f32,\n BasicValue::Int64(i) => *i as f32,\n _ => bail!(\"Unsupported vector type: {:?}\", elem.kind()),\n })\n })\n .collect::>>()?,\n _ => bail!(\"Expected a vector field, got {:?}\", v),\n };\n Ok(vec.into())\n}\n\nfn encode_multi_dense_vector(v: &BasicValue) -> Result {\n let vecs = match v {\n BasicValue::Vector(v) => v\n .iter()\n .map(encode_dense_vector)\n .collect::>>()?,\n _ => bail!(\"Expected a vector field, got {:?}\", v),\n };\n Ok(vecs.into())\n}\n\nfn embedding_metric_to_qdrant(metric: spec::VectorSimilarityMetric) -> Result {\n Ok(match metric {\n spec::VectorSimilarityMetric::CosineSimilarity => Distance::Cosine,\n spec::VectorSimilarityMetric::L2Distance => Distance::Euclid,\n spec::VectorSimilarityMetric::InnerProduct => Distance::Dot,\n })\n}\n\n////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct CollectionKey {\n connection: Option>,\n collection_name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\nstruct VectorDef {\n vector_size: usize,\n metric: spec::VectorSimilarityMetric,\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n multi_vector_comparator: Option,\n}\n#[derive(Debug, Clone, Serialize, Deserialize)]\nstruct SetupState {\n #[serde(default)]\n vectors: BTreeMap,\n\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n unsupported_vector_fields: Vec<(String, ValueType)>,\n}\n\n#[derive(Debug)]\nstruct SetupStatus {\n delete_collection: bool,\n add_collection: Option,\n}\n\nimpl setup::ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n if self.delete_collection {\n result.push(setup::ChangeDescription::Action(\n \"Delete collection\".to_string(),\n ));\n }\n if let Some(add_collection) = &self.add_collection {\n let vector_descriptions = add_collection\n .vectors\n .iter()\n .map(|(name, vector_def)| {\n format!(\n \"{}[{}], {}\",\n name, vector_def.vector_size, vector_def.metric\n )\n })\n .collect::>()\n .join(\"; \");\n result.push(setup::ChangeDescription::Action(format!(\n \"Create collection{}\",\n if vector_descriptions.is_empty() {\n \"\".to_string()\n } else {\n format!(\" with vectors: {vector_descriptions}\")\n }\n )));\n for (name, schema) in add_collection.unsupported_vector_fields.iter() {\n result.push(setup::ChangeDescription::Note(format!(\n \"Field `{}` has type `{}`. Only number vector with fixed size is supported by Qdrant. It will be stored in payload.\",\n name, schema\n )));\n }\n }\n result\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n match (self.delete_collection, self.add_collection.is_some()) {\n (false, false) => setup::SetupChangeType::NoChange,\n (false, true) => setup::SetupChangeType::Create,\n (true, false) => setup::SetupChangeType::Delete,\n (true, true) => setup::SetupChangeType::Update,\n }\n }\n}\n\nimpl SetupStatus {\n async fn apply_delete(&self, collection_name: &String, qdrant_client: &Qdrant) -> Result<()> {\n if self.delete_collection {\n qdrant_client.delete_collection(collection_name).await?;\n }\n Ok(())\n }\n\n async fn apply_create(&self, collection_name: &String, qdrant_client: &Qdrant) -> Result<()> {\n if let Some(add_collection) = &self.add_collection {\n let mut builder = CreateCollectionBuilder::new(collection_name);\n if !add_collection.vectors.is_empty() {\n let mut vectors_config = VectorsConfigBuilder::default();\n for (name, vector_def) in add_collection.vectors.iter() {\n let mut params = VectorParamsBuilder::new(\n vector_def.vector_size as u64,\n embedding_metric_to_qdrant(vector_def.metric)?,\n );\n if let Some(multi_vector_comparator) = &vector_def.multi_vector_comparator {\n params = params.multivector_config(MultiVectorConfigBuilder::new(\n MultiVectorComparator::from_str_name(multi_vector_comparator)\n .ok_or_else(|| {\n anyhow!(\n \"unrecognized multi vector comparator: {}\",\n multi_vector_comparator\n )\n })?,\n ));\n }\n vectors_config.add_named_vector_params(name, params);\n }\n builder = builder.vectors_config(vectors_config);\n }\n qdrant_client.create_collection(builder).await?;\n }\n Ok(())\n }\n}\n\n////////////////////////////////////////////////////////////\n// Deal with mutations\n////////////////////////////////////////////////////////////\n\nstruct ExportContext {\n qdrant_client: Arc,\n collection_name: String,\n fields_info: Vec,\n}\n\nimpl ExportContext {\n async fn apply_mutation(&self, mutation: ExportTargetMutation) -> Result<()> {\n let mut points: Vec = Vec::with_capacity(mutation.upserts.len());\n for upsert in mutation.upserts.iter() {\n let point_id = key_to_point_id(&upsert.key)?;\n let (payload, vectors) = values_to_payload(&upsert.value.fields, &self.fields_info)?;\n\n points.push(PointStruct::new(point_id, vectors, payload));\n }\n\n if !points.is_empty() {\n self.qdrant_client\n .upsert_points(UpsertPointsBuilder::new(&self.collection_name, points).wait(true))\n .await?;\n }\n\n let ids = mutation\n .deletes\n .iter()\n .map(|deletion| key_to_point_id(&deletion.key))\n .collect::>>()?;\n\n if !ids.is_empty() {\n self.qdrant_client\n .delete_points(\n DeletePointsBuilder::new(&self.collection_name)\n .points(PointsIdsList { ids })\n .wait(true),\n )\n .await?;\n }\n\n Ok(())\n }\n}\nfn key_to_point_id(key_value: &KeyValue) -> Result {\n let point_id = match key_value {\n KeyValue::Str(v) => PointId::from(v.to_string()),\n KeyValue::Int64(v) => PointId::from(*v as u64),\n KeyValue::Uuid(v) => PointId::from(v.to_string()),\n e => bail!(\"Invalid Qdrant point ID: {e}\"),\n };\n\n Ok(point_id)\n}\n\nfn values_to_payload(\n value_fields: &[Value],\n fields_info: &[FieldInfo],\n) -> Result<(HashMap, NamedVectors)> {\n let mut payload = HashMap::with_capacity(value_fields.len());\n let mut vectors = NamedVectors::default();\n\n for (value, field_info) in value_fields.iter().zip(fields_info.iter()) {\n let field_name = &field_info.field_schema.name;\n\n match &field_info.vector_shape {\n Some(vector_shape) => {\n if value.is_null() {\n continue;\n }\n let vector: QdrantVector = match value {\n Value::Basic(basic_value) => match vector_shape {\n VectorShape::Vector(_) => encode_dense_vector(&basic_value)?.into(),\n VectorShape::MultiVector(_) => {\n encode_multi_dense_vector(&basic_value)?.into()\n }\n },\n _ => {\n bail!(\"Expected a vector field, got {:?}\", value);\n }\n };\n vectors = vectors.add_vector(field_name.clone(), vector);\n }\n None => {\n let json_value = serde_json::to_value(TypedValue {\n t: &field_info.field_schema.value_type.typ,\n v: value,\n })?;\n payload.insert(field_name.clone(), json_value.into());\n }\n }\n }\n\n Ok((payload, vectors))\n}\n\n////////////////////////////////////////////////////////////\n// Factory implementation\n////////////////////////////////////////////////////////////\n\n#[derive(Default)]\nstruct Factory {\n qdrant_clients: Mutex>, Arc>>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct CollectionId {\n collection_name: String,\n}\n\nimpl Display for CollectionId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.collection_name)?;\n Ok(())\n }\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = ();\n type SetupState = SetupState;\n type SetupStatus = SetupStatus;\n type Key = CollectionKey;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Qdrant\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n _declarations: Vec<()>,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(CollectionKey, SetupState)>,\n )> {\n let data_coll_output = data_collections\n .into_iter()\n .map(|d| {\n if d.key_fields_schema.len() != 1 {\n api_bail!(\n \"Expected one primary key field for the point ID. Got {}.\",\n d.key_fields_schema.len()\n )\n }\n\n let mut fields_info = Vec::::new();\n let mut vector_def = BTreeMap::::new();\n let mut unsupported_vector_fields = Vec::<(String, ValueType)>::new();\n\n for field in d.value_fields_schema.iter() {\n let vector_shape = parse_vector_shape(&field.value_type.typ);\n if let Some(vector_shape) = &vector_shape {\n vector_def.insert(\n field.name.clone(),\n VectorDef {\n vector_size: vector_shape.vector_size(),\n metric: DEFAULT_VECTOR_SIMILARITY_METRIC,\n multi_vector_comparator: vector_shape.multi_vector_comparator().map(|s| s.as_str_name().to_string()),\n },\n );\n } else if matches!(\n &field.value_type.typ,\n schema::ValueType::Basic(schema::BasicValueType::Vector(_))\n ) {\n // This is a vector field but not supported by Qdrant\n unsupported_vector_fields.push((field.name.clone(), field.value_type.typ.clone()));\n }\n fields_info.push(FieldInfo {\n field_schema: field.clone(),\n vector_shape,\n });\n }\n\n let mut specified_vector_fields = HashSet::new();\n for vector_index in d.index_options.vector_indexes {\n match vector_def.get_mut(&vector_index.field_name) {\n Some(vector_def) => {\n if specified_vector_fields.insert(vector_index.field_name.clone()) {\n // Validate the metric is supported by Qdrant\n embedding_metric_to_qdrant(vector_index.metric)\n .with_context(||\n format!(\"Parsing vector index metric {} for field `{}`\", vector_index.metric, vector_index.field_name))?;\n vector_def.metric = vector_index.metric;\n } else {\n api_bail!(\"Field `{}` specified more than once in vector index definition\", vector_index.field_name);\n }\n }\n None => {\n if let Some(field) = d.value_fields_schema.iter().find(|f| f.name == vector_index.field_name) {\n api_bail!(\n \"Field `{}` specified in vector index is expected to be a number vector with fixed size, actual type: {}\",\n vector_index.field_name, field.value_type.typ\n );\n } else {\n api_bail!(\"Field `{}` specified in vector index is not found\", vector_index.field_name);\n }\n }\n }\n }\n\n let export_context = Arc::new(ExportContext {\n qdrant_client: self\n .get_qdrant_client(&d.spec.connection, &context.auth_registry)?,\n collection_name: d.spec.collection_name.clone(),\n fields_info,\n });\n Ok(TypedExportDataCollectionBuildOutput {\n export_context: Box::pin(async move { Ok(export_context) }),\n setup_key: CollectionKey {\n connection: d.spec.connection,\n collection_name: d.spec.collection_name,\n },\n desired_setup_state: SetupState {\n vectors: vector_def,\n unsupported_vector_fields,\n },\n })\n })\n .collect::>>()?;\n Ok((data_coll_output, vec![]))\n }\n\n fn deserialize_setup_key(key: serde_json::Value) -> Result {\n Ok(match key {\n serde_json::Value::String(s) => {\n // For backward compatibility.\n CollectionKey {\n collection_name: s,\n connection: None,\n }\n }\n _ => serde_json::from_value(key)?,\n })\n }\n\n async fn check_setup_status(\n &self,\n _key: CollectionKey,\n desired: Option,\n existing: setup::CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n let desired_exists = desired.is_some();\n let add_collection = desired.filter(|state| {\n !existing.always_exists()\n || existing\n .possible_versions()\n .any(|v| v.vectors != state.vectors)\n });\n let delete_collection = existing.possible_versions().next().is_some()\n && (!desired_exists || add_collection.is_some());\n Ok(SetupStatus {\n delete_collection,\n add_collection,\n })\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(if desired.vectors == existing.vectors {\n SetupStateCompatibility::Compatible\n } else {\n SetupStateCompatibility::NotCompatible\n })\n }\n\n fn describe_resource(&self, key: &CollectionKey) -> Result {\n Ok(format!(\n \"Qdrant collection {}{}\",\n key.collection_name,\n key.connection\n .as_ref()\n .map_or_else(|| \"\".to_string(), |auth_entry| format!(\" @ {auth_entry}\"))\n ))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n for mutation_w_ctx in mutations.into_iter() {\n mutation_w_ctx\n .export_context\n .apply_mutation(mutation_w_ctx.mutation)\n .await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n for setup_change in setup_status.iter() {\n let qdrant_client =\n self.get_qdrant_client(&setup_change.key.connection, &context.auth_registry)?;\n setup_change\n .setup_status\n .apply_delete(&setup_change.key.collection_name, &qdrant_client)\n .await?;\n }\n for setup_change in setup_status.iter() {\n let qdrant_client =\n self.get_qdrant_client(&setup_change.key.connection, &context.auth_registry)?;\n setup_change\n .setup_status\n .apply_create(&setup_change.key.collection_name, &qdrant_client)\n .await?;\n }\n Ok(())\n }\n}\n\nimpl Factory {\n fn new() -> Self {\n Self {\n qdrant_clients: Mutex::new(HashMap::new()),\n }\n }\n\n fn get_qdrant_client(\n &self,\n auth_entry: &Option>,\n auth_registry: &AuthRegistry,\n ) -> Result> {\n let mut clients = self.qdrant_clients.lock().unwrap();\n if let Some(client) = clients.get(auth_entry) {\n return Ok(client.clone());\n }\n\n let spec = auth_entry.as_ref().map_or_else(\n || {\n Ok(ConnectionSpec {\n grpc_url: DEFAULT_URL.to_string(),\n api_key: None,\n })\n },\n |auth_entry| auth_registry.get(auth_entry),\n )?;\n let client = Arc::new(\n Qdrant::from_url(&spec.grpc_url)\n .api_key(spec.api_key)\n .skip_compatibility_check()\n .build()?,\n );\n clients.insert(auth_entry.clone(), client.clone());\n Ok(client)\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory::new().register(registry)\n}\n"], ["/cocoindex/src/setup/states.rs", "/// Concepts:\n/// - Resource: some setup that needs to be tracked and maintained.\n/// - Setup State: current state of a resource.\n/// - Staging Change: states changes that may not be really applied yet.\n/// - Combined Setup State: Setup State + Staging Change.\n/// - Status Check: information about changes that are being applied / need to be applied.\n///\n/// Resource hierarchy:\n/// - [resource: setup metadata table] /// - Flow\n/// - [resource: metadata]\n/// - [resource: tracking table]\n/// - Target\n/// - [resource: target-specific stuff]\nuse crate::prelude::*;\n\nuse indenter::indented;\nuse owo_colors::{AnsiColors, OwoColorize};\nuse std::any::Any;\nuse std::fmt::Debug;\nuse std::fmt::{Display, Write};\nuse std::hash::Hash;\n\nuse super::db_metadata;\nuse crate::execution::db_tracking_setup::{\n self, TrackingTableSetupState, TrackingTableSetupStatus,\n};\n\nconst INDENT: &str = \" \";\n\npub trait StateMode: Clone + Copy {\n type State: Debug + Clone;\n type DefaultState: Debug + Clone + Default;\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct DesiredMode;\nimpl StateMode for DesiredMode {\n type State = T;\n type DefaultState = T;\n}\n\n#[derive(Debug, Clone)]\npub struct CombinedState {\n pub current: Option,\n pub staging: Vec>,\n /// Legacy state keys that no longer identical to the latest serialized form (usually caused by code change).\n /// They will be deleted when the next change is applied.\n pub legacy_state_key: Option,\n}\n\nimpl CombinedState {\n pub fn from_desired(desired: T) -> Self {\n Self {\n current: Some(desired),\n staging: vec![],\n legacy_state_key: None,\n }\n }\n\n pub fn from_change(prev: Option>, change: Option>) -> Self\n where\n T: Clone,\n {\n Self {\n current: match change {\n Some(Some(state)) => Some(state.clone()),\n Some(None) => None,\n None => prev.and_then(|v| v.current),\n },\n staging: vec![],\n legacy_state_key: None,\n }\n }\n\n pub fn possible_versions(&self) -> impl Iterator {\n self.current\n .iter()\n .chain(self.staging.iter().flat_map(|s| s.state().into_iter()))\n }\n\n pub fn always_exists(&self) -> bool {\n self.current.is_some() && self.staging.iter().all(|s| !s.is_delete())\n }\n\n pub fn legacy_values &V>(\n &self,\n desired: Option<&T>,\n f: F,\n ) -> BTreeSet<&V> {\n let desired_value = desired.map(&f);\n self.possible_versions()\n .map(f)\n .filter(|v| Some(*v) != desired_value)\n .collect()\n }\n}\n\nimpl Default for CombinedState {\n fn default() -> Self {\n Self {\n current: None,\n staging: vec![],\n legacy_state_key: None,\n }\n }\n}\n\nimpl PartialEq for CombinedState {\n fn eq(&self, other: &T) -> bool {\n self.staging.is_empty() && self.current.as_ref() == Some(other)\n }\n}\n\n#[derive(Clone, Copy)]\npub struct ExistingMode;\nimpl StateMode for ExistingMode {\n type State = CombinedState;\n type DefaultState = CombinedState;\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub enum StateChange {\n Upsert(State),\n Delete,\n}\n\nimpl StateChange {\n pub fn is_delete(&self) -> bool {\n matches!(self, StateChange::Delete)\n }\n\n pub fn desired_state(&self) -> Option<&State> {\n match self {\n StateChange::Upsert(state) => Some(state),\n StateChange::Delete => None,\n }\n }\n\n pub fn state(&self) -> Option<&State> {\n match self {\n StateChange::Upsert(state) => Some(state),\n StateChange::Delete => None,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct SourceSetupState {\n pub source_id: i32,\n pub key_schema: schema::ValueType,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct ResourceIdentifier {\n pub key: serde_json::Value,\n pub target_kind: String,\n}\n\nimpl Display for ResourceIdentifier {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}:{}\", self.target_kind, self.key)\n }\n}\n\n/// Common state (i.e. not specific to a target kind) for a target.\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TargetSetupStateCommon {\n pub target_id: i32,\n pub schema_version_id: i32,\n pub max_schema_version_id: i32,\n #[serde(default)]\n pub setup_by_user: bool,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TargetSetupState {\n pub common: TargetSetupStateCommon,\n\n pub state: serde_json::Value,\n}\n\nimpl TargetSetupState {\n pub fn state_unless_setup_by_user(self) -> Option {\n (!self.common.setup_by_user).then_some(self.state)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]\npub struct FlowSetupMetadata {\n pub last_source_id: i32,\n pub last_target_id: i32,\n pub sources: BTreeMap,\n}\n\n#[derive(Debug, Clone)]\npub struct FlowSetupState {\n // The version number for the flow, last seen in the metadata table.\n pub seen_flow_metadata_version: Option,\n pub metadata: Mode::DefaultState,\n pub tracking_table: Mode::State,\n pub targets: IndexMap>,\n}\n\nimpl Default for FlowSetupState {\n fn default() -> Self {\n Self {\n seen_flow_metadata_version: None,\n metadata: Default::default(),\n tracking_table: Default::default(),\n targets: IndexMap::new(),\n }\n }\n}\n\nimpl PartialEq for FlowSetupState {\n fn eq(&self, other: &Self) -> bool {\n self.metadata == other.metadata\n && self.tracking_table == other.tracking_table\n && self.targets == other.targets\n }\n}\n\n#[derive(Debug, Clone)]\npub struct AllSetupStates {\n pub has_metadata_table: bool,\n pub flows: BTreeMap>,\n}\n\nimpl Default for AllSetupStates {\n fn default() -> Self {\n Self {\n has_metadata_table: false,\n flows: BTreeMap::new(),\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum SetupChangeType {\n NoChange,\n Create,\n Update,\n Delete,\n Invalid,\n}\n\npub enum ChangeDescription {\n Action(String),\n Note(String),\n}\n\npub trait ResourceSetupStatus: Send + Sync + Debug + Any + 'static {\n fn describe_changes(&self) -> Vec;\n\n fn change_type(&self) -> SetupChangeType;\n}\n\nimpl ResourceSetupStatus for Box {\n fn describe_changes(&self) -> Vec {\n self.as_ref().describe_changes()\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.as_ref().change_type()\n }\n}\n\nimpl ResourceSetupStatus for std::convert::Infallible {\n fn describe_changes(&self) -> Vec {\n unreachable!()\n }\n\n fn change_type(&self) -> SetupChangeType {\n unreachable!()\n }\n}\n\n#[derive(Debug)]\npub struct ResourceSetupInfo {\n pub key: K,\n pub state: Option,\n pub description: String,\n\n /// If `None`, the resource is managed by users.\n pub setup_status: Option,\n\n pub legacy_key: Option,\n}\n\nimpl std::fmt::Display for ResourceSetupInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let status_code = match self.setup_status.as_ref().map(|c| c.change_type()) {\n Some(SetupChangeType::NoChange) => \"READY\",\n Some(SetupChangeType::Create) => \"TO CREATE\",\n Some(SetupChangeType::Update) => \"TO UPDATE\",\n Some(SetupChangeType::Delete) => \"TO DELETE\",\n Some(SetupChangeType::Invalid) => \"INVALID\",\n None => \"USER MANAGED\",\n };\n let status_str = format!(\"[ {status_code:^9} ]\");\n let status_full = status_str.color(AnsiColors::Cyan);\n let desc_colored = &self.description;\n writeln!(f, \"{status_full} {desc_colored}\")?;\n if let Some(setup_status) = &self.setup_status {\n let changes = setup_status.describe_changes();\n if !changes.is_empty() {\n let mut f = indented(f).with_str(INDENT);\n writeln!(f, \"\")?;\n for change in changes {\n match change {\n ChangeDescription::Action(action) => {\n writeln!(\n f,\n \"{} {}\",\n \"TODO:\".color(AnsiColors::BrightBlack).bold(),\n action.color(AnsiColors::BrightBlack)\n )?;\n }\n ChangeDescription::Note(note) => {\n writeln!(\n f,\n \"{} {}\",\n \"NOTE:\".color(AnsiColors::Yellow).bold(),\n note.color(AnsiColors::Yellow)\n )?;\n }\n }\n }\n writeln!(f)?;\n }\n }\n Ok(())\n }\n}\n\nimpl ResourceSetupInfo {\n pub fn is_up_to_date(&self) -> bool {\n self.setup_status\n .as_ref()\n .is_none_or(|c| c.change_type() == SetupChangeType::NoChange)\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum ObjectStatus {\n Invalid,\n New,\n Existing,\n Deleted,\n}\n\npub trait ObjectSetupStatus {\n fn status(&self) -> Option;\n fn is_up_to_date(&self) -> bool;\n}\n\n#[derive(Debug)]\npub struct FlowSetupStatus {\n pub status: Option,\n pub seen_flow_metadata_version: Option,\n\n pub metadata_change: Option>,\n\n pub tracking_table:\n Option>,\n pub target_resources:\n Vec>>,\n\n pub unknown_resources: Vec,\n}\n\nimpl ObjectSetupStatus for FlowSetupStatus {\n fn status(&self) -> Option {\n self.status\n }\n\n fn is_up_to_date(&self) -> bool {\n self.metadata_change.is_none()\n && self\n .tracking_table\n .as_ref()\n .is_none_or(|t| t.is_up_to_date())\n && self\n .target_resources\n .iter()\n .all(|target| target.is_up_to_date())\n }\n}\n\n#[derive(Debug)]\npub struct GlobalSetupStatus {\n pub metadata_table: ResourceSetupInfo<(), (), db_metadata::MetadataTableSetup>,\n}\n\nimpl GlobalSetupStatus {\n pub fn from_setup_states(setup_states: &AllSetupStates) -> Self {\n Self {\n metadata_table: db_metadata::MetadataTableSetup {\n metadata_table_missing: !setup_states.has_metadata_table,\n }\n .into_setup_info(),\n }\n }\n\n pub fn is_up_to_date(&self) -> bool {\n self.metadata_table.is_up_to_date()\n }\n}\n\npub struct ObjectSetupStatusCode<'a, Status: ObjectSetupStatus>(&'a Status);\nimpl std::fmt::Display for ObjectSetupStatusCode<'_, Status> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let Some(status) = self.0.status() else {\n return Ok(());\n };\n write!(\n f,\n \"[ {:^9} ]\",\n match status {\n ObjectStatus::New => \"TO CREATE\",\n ObjectStatus::Existing =>\n if self.0.is_up_to_date() {\n \"READY\"\n } else {\n \"TO UPDATE\"\n },\n ObjectStatus::Deleted => \"TO DELETE\",\n ObjectStatus::Invalid => \"INVALID\",\n }\n )\n }\n}\n\nimpl std::fmt::Display for GlobalSetupStatus {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n writeln!(f, \"{}\", self.metadata_table)\n }\n}\n\npub struct FormattedFlowSetupStatus<'a>(pub &'a str, pub &'a FlowSetupStatus);\n\nimpl std::fmt::Display for FormattedFlowSetupStatus<'_> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let flow_ssc = self.1;\n if flow_ssc.status.is_none() {\n return Ok(());\n }\n\n writeln!(\n f,\n \"{} Flow: {}\",\n ObjectSetupStatusCode(flow_ssc)\n .to_string()\n .color(AnsiColors::Cyan),\n self.0\n )?;\n\n let mut f = indented(f).with_str(INDENT);\n if let Some(tracking_table) = &flow_ssc.tracking_table {\n write!(f, \"{tracking_table}\")?;\n }\n for target_resource in &flow_ssc.target_resources {\n write!(f, \"{target_resource}\")?;\n }\n for resource in &flow_ssc.unknown_resources {\n writeln!(f, \"[ UNKNOWN ] {resource}\")?;\n }\n\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/sources/amazon_s3.rs", "use crate::fields_value;\nuse async_stream::try_stream;\nuse aws_config::BehaviorVersion;\nuse aws_sdk_s3::Client;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse std::sync::Arc;\nuse urlencoding;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\n/// Decode a form-encoded URL string, treating '+' as spaces\nfn decode_form_encoded_url(input: &str) -> Result> {\n // Replace '+' with spaces (form encoding convention), then decode\n // This handles both cases correctly:\n // - Literal '+' would be encoded as '%2B' and remain unchanged after replacement\n // - Space would be encoded as '+' and become ' ' after replacement\n let with_spaces = input.replace(\"+\", \" \");\n Ok(urlencoding::decode(&with_spaces)?.into())\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n bucket_name: String,\n prefix: Option,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n sqs_queue_url: Option,\n}\n\nstruct SqsContext {\n client: aws_sdk_sqs::Client,\n queue_url: String,\n}\n\nimpl SqsContext {\n async fn delete_message(&self, receipt_handle: String) -> Result<()> {\n self.client\n .delete_message()\n .queue_url(&self.queue_url)\n .receipt_handle(receipt_handle)\n .send()\n .await?;\n Ok(())\n }\n}\n\nstruct Executor {\n client: Client,\n bucket_name: String,\n prefix: Option,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n sqs_context: Option>,\n}\n\nimpl Executor {\n fn is_excluded(&self, key: &str) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(key))\n }\n\n fn is_file_included(&self, key: &str) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(key))\n && !self.is_excluded(key)\n }\n}\n\nfn datetime_to_ordinal(dt: &aws_sdk_s3::primitives::DateTime) -> Ordinal {\n Ordinal(Some((dt.as_nanos() / 1000) as i64))\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n _options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n try_stream! {\n let mut continuation_token = None;\n loop {\n let mut req = self.client\n .list_objects_v2()\n .bucket(&self.bucket_name);\n if let Some(ref p) = self.prefix {\n req = req.prefix(p);\n }\n if let Some(ref token) = continuation_token {\n req = req.continuation_token(token);\n }\n let resp = req.send().await?;\n if let Some(contents) = &resp.contents {\n let mut batch = Vec::new();\n for obj in contents {\n if let Some(key) = obj.key() {\n // Only include files (not folders)\n if key.ends_with('/') { continue; }\n let include = self.included_glob_set\n .as_ref()\n .map(|gs| gs.is_match(key))\n .unwrap_or(true);\n let exclude = self.excluded_glob_set\n .as_ref()\n .map(|gs| gs.is_match(key))\n .unwrap_or(false);\n if include && !exclude {\n batch.push(PartialSourceRowMetadata {\n key: KeyValue::Str(key.to_string().into()),\n ordinal: obj.last_modified().map(datetime_to_ordinal),\n });\n }\n }\n }\n if !batch.is_empty() {\n yield batch;\n }\n }\n if resp.is_truncated == Some(true) {\n continuation_token = resp.next_continuation_token.clone().map(|s| s.to_string());\n } else {\n break;\n }\n }\n }.boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let key_str = key.str_value()?;\n if !self.is_file_included(key_str) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n let resp = self\n .client\n .get_object()\n .bucket(&self.bucket_name)\n .key(key_str.as_ref())\n .send()\n .await;\n let obj = match resp {\n Err(e) if e.as_service_error().is_some_and(|e| e.is_no_such_key()) => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n r => r?,\n };\n let ordinal = if options.include_ordinal {\n obj.last_modified().map(datetime_to_ordinal)\n } else {\n None\n };\n let value = if options.include_value {\n let bytes = obj.body.collect().await?.into_bytes();\n Some(SourceValue::Existence(if self.binary {\n fields_value!(bytes.to_vec())\n } else {\n fields_value!(String::from_utf8_lossy(&bytes).to_string())\n }))\n } else {\n None\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n let sqs_context = if let Some(sqs_context) = &self.sqs_context {\n sqs_context\n } else {\n return Ok(None);\n };\n let stream = stream! {\n loop {\n match self.poll_sqs(sqs_context).await {\n Ok(messages) => {\n for message in messages {\n yield Ok(message);\n }\n }\n Err(e) => {\n yield Err(e);\n }\n };\n }\n };\n Ok(Some(stream.boxed()))\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3EventNotification {\n #[serde(default, rename = \"Records\")]\n pub records: Vec,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3EventRecord {\n #[serde(rename = \"eventName\")]\n pub event_name: String,\n pub s3: Option,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Entity {\n pub bucket: S3Bucket,\n pub object: S3Object,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Bucket {\n pub name: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Object {\n pub key: String,\n}\n\nimpl Executor {\n async fn poll_sqs(&self, sqs_context: &Arc) -> Result> {\n let resp = sqs_context\n .client\n .receive_message()\n .queue_url(&sqs_context.queue_url)\n .max_number_of_messages(10)\n .wait_time_seconds(20)\n .send()\n .await?;\n let messages = if let Some(messages) = resp.messages {\n messages\n } else {\n return Ok(Vec::new());\n };\n let mut change_messages = vec![];\n for message in messages.into_iter() {\n if let Some(body) = message.body {\n let notification: S3EventNotification = serde_json::from_str(&body)?;\n let mut changes = vec![];\n for record in notification.records {\n let s3 = if let Some(s3) = record.s3 {\n s3\n } else {\n continue;\n };\n if s3.bucket.name != self.bucket_name {\n continue;\n }\n if !self\n .prefix\n .as_ref()\n .is_none_or(|prefix| s3.object.key.starts_with(prefix))\n {\n continue;\n }\n if record.event_name.starts_with(\"ObjectCreated:\")\n || record.event_name.starts_with(\"ObjectRemoved:\")\n {\n let decoded_key = decode_form_encoded_url(&s3.object.key)?;\n changes.push(SourceChange {\n key: KeyValue::Str(decoded_key),\n data: None,\n });\n }\n }\n if let Some(receipt_handle) = message.receipt_handle {\n if !changes.is_empty() {\n let sqs_context = sqs_context.clone();\n change_messages.push(SourceChangeMessage {\n changes,\n ack_fn: Some(Box::new(move || {\n async move { sqs_context.delete_message(receipt_handle).await }\n .boxed()\n })),\n });\n } else {\n sqs_context.delete_message(receipt_handle).await?;\n }\n }\n }\n }\n Ok(change_messages)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"AmazonS3\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n let config = aws_config::load_defaults(BehaviorVersion::latest()).await;\n Ok(Box::new(Executor {\n client: Client::new(&config),\n bucket_name: spec.bucket_name,\n prefix: spec.prefix,\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n sqs_context: spec.sqs_queue_url.map(|url| {\n Arc::new(SqsContext {\n client: aws_sdk_sqs::Client::new(&config),\n queue_url: url,\n })\n }),\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/base/value.rs", "use super::schema::*;\nuse crate::base::duration::parse_duration;\nuse crate::prelude::invariance_violation;\nuse crate::{api_bail, api_error};\nuse anyhow::Result;\nuse base64::prelude::*;\nuse bytes::Bytes;\nuse chrono::Offset;\nuse log::warn;\nuse serde::{\n Deserialize, Serialize,\n de::{SeqAccess, Visitor},\n ser::{SerializeMap, SerializeSeq, SerializeTuple},\n};\nuse std::{collections::BTreeMap, ops::Deref, sync::Arc};\n\npub trait EstimatedByteSize: Sized {\n fn estimated_detached_byte_size(&self) -> usize;\n\n fn estimated_byte_size(&self) -> usize {\n self.estimated_detached_byte_size() + std::mem::size_of::()\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]\npub struct RangeValue {\n pub start: usize,\n pub end: usize,\n}\n\nimpl RangeValue {\n pub fn new(start: usize, end: usize) -> Self {\n RangeValue { start, end }\n }\n\n pub fn len(&self) -> usize {\n self.end - self.start\n }\n\n pub fn extract_str<'s>(&self, s: &'s (impl AsRef + ?Sized)) -> &'s str {\n let s = s.as_ref();\n &s[self.start..self.end]\n }\n}\n\nimpl Serialize for RangeValue {\n fn serialize(&self, serializer: S) -> Result {\n let mut tuple = serializer.serialize_tuple(2)?;\n tuple.serialize_element(&self.start)?;\n tuple.serialize_element(&self.end)?;\n tuple.end()\n }\n}\n\nimpl<'de> Deserialize<'de> for RangeValue {\n fn deserialize>(deserializer: D) -> Result {\n struct RangeVisitor;\n\n impl<'de> Visitor<'de> for RangeVisitor {\n type Value = RangeValue;\n\n fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {\n formatter.write_str(\"a tuple of two u64\")\n }\n\n fn visit_seq(self, mut seq: V) -> Result\n where\n V: SeqAccess<'de>,\n {\n let start = seq\n .next_element()?\n .ok_or_else(|| serde::de::Error::missing_field(\"missing begin\"))?;\n let end = seq\n .next_element()?\n .ok_or_else(|| serde::de::Error::missing_field(\"missing end\"))?;\n Ok(RangeValue { start, end })\n }\n }\n deserializer.deserialize_tuple(2, RangeVisitor)\n }\n}\n\n/// Value of key.\n#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize)]\npub enum KeyValue {\n Bytes(Bytes),\n Str(Arc),\n Bool(bool),\n Int64(i64),\n Range(RangeValue),\n Uuid(uuid::Uuid),\n Date(chrono::NaiveDate),\n Struct(Vec),\n}\n\nimpl From for KeyValue {\n fn from(value: Bytes) -> Self {\n KeyValue::Bytes(value)\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Vec) -> Self {\n KeyValue::Bytes(Bytes::from(value))\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Arc) -> Self {\n KeyValue::Str(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: String) -> Self {\n KeyValue::Str(Arc::from(value))\n }\n}\n\nimpl From for KeyValue {\n fn from(value: bool) -> Self {\n KeyValue::Bool(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: i64) -> Self {\n KeyValue::Int64(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: RangeValue) -> Self {\n KeyValue::Range(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: uuid::Uuid) -> Self {\n KeyValue::Uuid(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: chrono::NaiveDate) -> Self {\n KeyValue::Date(value)\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Vec) -> Self {\n KeyValue::Struct(value)\n }\n}\n\nimpl serde::Serialize for KeyValue {\n fn serialize(&self, serializer: S) -> Result {\n Value::from(self.clone()).serialize(serializer)\n }\n}\n\nimpl std::fmt::Display for KeyValue {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n KeyValue::Bytes(v) => write!(f, \"{}\", BASE64_STANDARD.encode(v)),\n KeyValue::Str(v) => write!(f, \"\\\"{}\\\"\", v.escape_default()),\n KeyValue::Bool(v) => write!(f, \"{v}\"),\n KeyValue::Int64(v) => write!(f, \"{v}\"),\n KeyValue::Range(v) => write!(f, \"[{}, {})\", v.start, v.end),\n KeyValue::Uuid(v) => write!(f, \"{v}\"),\n KeyValue::Date(v) => write!(f, \"{v}\"),\n KeyValue::Struct(v) => {\n write!(\n f,\n \"[{}]\",\n v.iter()\n .map(|v| v.to_string())\n .collect::>()\n .join(\", \")\n )\n }\n }\n }\n}\n\nimpl KeyValue {\n pub fn from_json(value: serde_json::Value, fields_schema: &[FieldSchema]) -> Result {\n let value = if fields_schema.len() == 1 {\n Value::from_json(value, &fields_schema[0].value_type.typ)?\n } else {\n let field_values: FieldValues = FieldValues::from_json(value, fields_schema)?;\n Value::Struct(field_values)\n };\n value.as_key()\n }\n\n pub fn from_values<'a>(values: impl ExactSizeIterator) -> Result {\n let key = if values.len() == 1 {\n let mut values = values;\n values.next().ok_or_else(invariance_violation)?.as_key()?\n } else {\n KeyValue::Struct(values.map(|v| v.as_key()).collect::>>()?)\n };\n Ok(key)\n }\n\n pub fn fields_iter(&self, num_fields: usize) -> Result> {\n let slice = if num_fields == 1 {\n std::slice::from_ref(self)\n } else {\n match self {\n KeyValue::Struct(v) => v,\n _ => api_bail!(\"Invalid key value type\"),\n }\n };\n Ok(slice.iter())\n }\n\n fn parts_from_str(\n values_iter: &mut impl Iterator,\n schema: &ValueType,\n ) -> Result {\n let result = match schema {\n ValueType::Basic(basic_type) => {\n let v = values_iter\n .next()\n .ok_or_else(|| api_error!(\"Key parts less than expected\"))?;\n match basic_type {\n BasicValueType::Bytes => {\n KeyValue::Bytes(Bytes::from(BASE64_STANDARD.decode(v)?))\n }\n BasicValueType::Str => KeyValue::Str(Arc::from(v)),\n BasicValueType::Bool => KeyValue::Bool(v.parse()?),\n BasicValueType::Int64 => KeyValue::Int64(v.parse()?),\n BasicValueType::Range => {\n let v2 = values_iter\n .next()\n .ok_or_else(|| api_error!(\"Key parts less than expected\"))?;\n KeyValue::Range(RangeValue {\n start: v.parse()?,\n end: v2.parse()?,\n })\n }\n BasicValueType::Uuid => KeyValue::Uuid(v.parse()?),\n BasicValueType::Date => KeyValue::Date(v.parse()?),\n schema => api_bail!(\"Invalid key type {schema}\"),\n }\n }\n ValueType::Struct(s) => KeyValue::Struct(\n s.fields\n .iter()\n .map(|f| KeyValue::parts_from_str(values_iter, &f.value_type.typ))\n .collect::>>()?,\n ),\n _ => api_bail!(\"Invalid key type {schema}\"),\n };\n Ok(result)\n }\n\n fn parts_to_strs(&self, output: &mut Vec) {\n match self {\n KeyValue::Bytes(v) => output.push(BASE64_STANDARD.encode(v)),\n KeyValue::Str(v) => output.push(v.to_string()),\n KeyValue::Bool(v) => output.push(v.to_string()),\n KeyValue::Int64(v) => output.push(v.to_string()),\n KeyValue::Range(v) => {\n output.push(v.start.to_string());\n output.push(v.end.to_string());\n }\n KeyValue::Uuid(v) => output.push(v.to_string()),\n KeyValue::Date(v) => output.push(v.to_string()),\n KeyValue::Struct(v) => {\n for part in v {\n part.parts_to_strs(output);\n }\n }\n }\n }\n\n pub fn from_strs(value: impl IntoIterator, schema: &ValueType) -> Result {\n let mut values_iter = value.into_iter();\n let result = Self::parts_from_str(&mut values_iter, schema)?;\n if values_iter.next().is_some() {\n api_bail!(\"Key parts more than expected\");\n }\n Ok(result)\n }\n\n pub fn to_strs(&self) -> Vec {\n let mut output = Vec::with_capacity(self.num_parts());\n self.parts_to_strs(&mut output);\n output\n }\n\n pub fn kind_str(&self) -> &'static str {\n match self {\n KeyValue::Bytes(_) => \"bytes\",\n KeyValue::Str(_) => \"str\",\n KeyValue::Bool(_) => \"bool\",\n KeyValue::Int64(_) => \"int64\",\n KeyValue::Range { .. } => \"range\",\n KeyValue::Uuid(_) => \"uuid\",\n KeyValue::Date(_) => \"date\",\n KeyValue::Struct(_) => \"struct\",\n }\n }\n\n pub fn bytes_value(&self) -> Result<&Bytes> {\n match self {\n KeyValue::Bytes(v) => Ok(v),\n _ => anyhow::bail!(\"expected bytes value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn str_value(&self) -> Result<&Arc> {\n match self {\n KeyValue::Str(v) => Ok(v),\n _ => anyhow::bail!(\"expected str value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn bool_value(&self) -> Result {\n match self {\n KeyValue::Bool(v) => Ok(*v),\n _ => anyhow::bail!(\"expected bool value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn int64_value(&self) -> Result {\n match self {\n KeyValue::Int64(v) => Ok(*v),\n _ => anyhow::bail!(\"expected int64 value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn range_value(&self) -> Result {\n match self {\n KeyValue::Range(v) => Ok(*v),\n _ => anyhow::bail!(\"expected range value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn uuid_value(&self) -> Result {\n match self {\n KeyValue::Uuid(v) => Ok(*v),\n _ => anyhow::bail!(\"expected uuid value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn date_value(&self) -> Result {\n match self {\n KeyValue::Date(v) => Ok(*v),\n _ => anyhow::bail!(\"expected date value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn struct_value(&self) -> Result<&Vec> {\n match self {\n KeyValue::Struct(v) => Ok(v),\n _ => anyhow::bail!(\"expected struct value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn num_parts(&self) -> usize {\n match self {\n KeyValue::Range(_) => 2,\n KeyValue::Struct(v) => v.iter().map(|v| v.num_parts()).sum(),\n _ => 1,\n }\n }\n\n fn estimated_detached_byte_size(&self) -> usize {\n match self {\n KeyValue::Bytes(v) => v.len(),\n KeyValue::Str(v) => v.len(),\n KeyValue::Struct(v) => {\n v.iter()\n .map(KeyValue::estimated_detached_byte_size)\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n KeyValue::Bool(_)\n | KeyValue::Int64(_)\n | KeyValue::Range(_)\n | KeyValue::Uuid(_)\n | KeyValue::Date(_) => 0,\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Deserialize)]\npub enum BasicValue {\n Bytes(Bytes),\n Str(Arc),\n Bool(bool),\n Int64(i64),\n Float32(f32),\n Float64(f64),\n Range(RangeValue),\n Uuid(uuid::Uuid),\n Date(chrono::NaiveDate),\n Time(chrono::NaiveTime),\n LocalDateTime(chrono::NaiveDateTime),\n OffsetDateTime(chrono::DateTime),\n TimeDelta(chrono::Duration),\n Json(Arc),\n Vector(Arc<[BasicValue]>),\n UnionVariant {\n tag_id: usize,\n value: Box,\n },\n}\n\nimpl From for BasicValue {\n fn from(value: Bytes) -> Self {\n BasicValue::Bytes(value)\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: Vec) -> Self {\n BasicValue::Bytes(Bytes::from(value))\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: Arc) -> Self {\n BasicValue::Str(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: String) -> Self {\n BasicValue::Str(Arc::from(value))\n }\n}\n\nimpl From for BasicValue {\n fn from(value: bool) -> Self {\n BasicValue::Bool(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: i64) -> Self {\n BasicValue::Int64(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: f32) -> Self {\n BasicValue::Float32(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: f64) -> Self {\n BasicValue::Float64(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: uuid::Uuid) -> Self {\n BasicValue::Uuid(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveDate) -> Self {\n BasicValue::Date(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveTime) -> Self {\n BasicValue::Time(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveDateTime) -> Self {\n BasicValue::LocalDateTime(value)\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: chrono::DateTime) -> Self {\n BasicValue::OffsetDateTime(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::Duration) -> Self {\n BasicValue::TimeDelta(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: serde_json::Value) -> Self {\n BasicValue::Json(Arc::from(value))\n }\n}\n\nimpl> From> for BasicValue {\n fn from(value: Vec) -> Self {\n BasicValue::Vector(Arc::from(\n value.into_iter().map(|v| v.into()).collect::>(),\n ))\n }\n}\n\nimpl BasicValue {\n pub fn into_key(self) -> Result {\n let result = match self {\n BasicValue::Bytes(v) => KeyValue::Bytes(v),\n BasicValue::Str(v) => KeyValue::Str(v),\n BasicValue::Bool(v) => KeyValue::Bool(v),\n BasicValue::Int64(v) => KeyValue::Int64(v),\n BasicValue::Range(v) => KeyValue::Range(v),\n BasicValue::Uuid(v) => KeyValue::Uuid(v),\n BasicValue::Date(v) => KeyValue::Date(v),\n BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_)\n | BasicValue::Json(_)\n | BasicValue::Vector(_)\n | BasicValue::UnionVariant { .. } => api_bail!(\"invalid key value type\"),\n };\n Ok(result)\n }\n\n pub fn as_key(&self) -> Result {\n let result = match self {\n BasicValue::Bytes(v) => KeyValue::Bytes(v.clone()),\n BasicValue::Str(v) => KeyValue::Str(v.clone()),\n BasicValue::Bool(v) => KeyValue::Bool(*v),\n BasicValue::Int64(v) => KeyValue::Int64(*v),\n BasicValue::Range(v) => KeyValue::Range(*v),\n BasicValue::Uuid(v) => KeyValue::Uuid(*v),\n BasicValue::Date(v) => KeyValue::Date(*v),\n BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_)\n | BasicValue::Json(_)\n | BasicValue::Vector(_)\n | BasicValue::UnionVariant { .. } => api_bail!(\"invalid key value type\"),\n };\n Ok(result)\n }\n\n pub fn kind(&self) -> &'static str {\n match &self {\n BasicValue::Bytes(_) => \"bytes\",\n BasicValue::Str(_) => \"str\",\n BasicValue::Bool(_) => \"bool\",\n BasicValue::Int64(_) => \"int64\",\n BasicValue::Float32(_) => \"float32\",\n BasicValue::Float64(_) => \"float64\",\n BasicValue::Range(_) => \"range\",\n BasicValue::Uuid(_) => \"uuid\",\n BasicValue::Date(_) => \"date\",\n BasicValue::Time(_) => \"time\",\n BasicValue::LocalDateTime(_) => \"local_datetime\",\n BasicValue::OffsetDateTime(_) => \"offset_datetime\",\n BasicValue::TimeDelta(_) => \"timedelta\",\n BasicValue::Json(_) => \"json\",\n BasicValue::Vector(_) => \"vector\",\n BasicValue::UnionVariant { .. } => \"union\",\n }\n }\n\n /// Returns the estimated byte size of the value, for detached data (i.e. allocated on heap).\n fn estimated_detached_byte_size(&self) -> usize {\n fn json_estimated_detached_byte_size(val: &serde_json::Value) -> usize {\n match val {\n serde_json::Value::String(s) => s.len(),\n serde_json::Value::Array(arr) => {\n arr.iter()\n .map(json_estimated_detached_byte_size)\n .sum::()\n + arr.len() * std::mem::size_of::()\n }\n serde_json::Value::Object(map) => map\n .iter()\n .map(|(k, v)| {\n std::mem::size_of::()\n + k.len()\n + json_estimated_detached_byte_size(v)\n })\n .sum(),\n serde_json::Value::Null\n | serde_json::Value::Bool(_)\n | serde_json::Value::Number(_) => 0,\n }\n }\n match self {\n BasicValue::Bytes(v) => v.len(),\n BasicValue::Str(v) => v.len(),\n BasicValue::Json(v) => json_estimated_detached_byte_size(v),\n BasicValue::Vector(v) => {\n v.iter()\n .map(BasicValue::estimated_detached_byte_size)\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n BasicValue::UnionVariant { value, .. } => {\n value.estimated_detached_byte_size() + std::mem::size_of::()\n }\n BasicValue::Bool(_)\n | BasicValue::Int64(_)\n | BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Range(_)\n | BasicValue::Uuid(_)\n | BasicValue::Date(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_) => 0,\n }\n }\n}\n\n#[derive(Debug, Clone, Default, PartialEq, Deserialize)]\npub enum Value {\n #[default]\n Null,\n Basic(BasicValue),\n Struct(FieldValues),\n UTable(Vec),\n KTable(BTreeMap),\n LTable(Vec),\n}\n\nimpl> From for Value {\n fn from(value: T) -> Self {\n Value::Basic(value.into())\n }\n}\n\nimpl From for Value {\n fn from(value: KeyValue) -> Self {\n match value {\n KeyValue::Bytes(v) => Value::Basic(BasicValue::Bytes(v)),\n KeyValue::Str(v) => Value::Basic(BasicValue::Str(v)),\n KeyValue::Bool(v) => Value::Basic(BasicValue::Bool(v)),\n KeyValue::Int64(v) => Value::Basic(BasicValue::Int64(v)),\n KeyValue::Range(v) => Value::Basic(BasicValue::Range(v)),\n KeyValue::Uuid(v) => Value::Basic(BasicValue::Uuid(v)),\n KeyValue::Date(v) => Value::Basic(BasicValue::Date(v)),\n KeyValue::Struct(v) => Value::Struct(FieldValues {\n fields: v.into_iter().map(Value::from).collect(),\n }),\n }\n }\n}\n\nimpl From<&KeyValue> for Value {\n fn from(value: &KeyValue) -> Self {\n match value {\n KeyValue::Bytes(v) => Value::Basic(BasicValue::Bytes(v.clone())),\n KeyValue::Str(v) => Value::Basic(BasicValue::Str(v.clone())),\n KeyValue::Bool(v) => Value::Basic(BasicValue::Bool(*v)),\n KeyValue::Int64(v) => Value::Basic(BasicValue::Int64(*v)),\n KeyValue::Range(v) => Value::Basic(BasicValue::Range(*v)),\n KeyValue::Uuid(v) => Value::Basic(BasicValue::Uuid(*v)),\n KeyValue::Date(v) => Value::Basic(BasicValue::Date(*v)),\n KeyValue::Struct(v) => Value::Struct(FieldValues {\n fields: v.iter().map(Value::from).collect(),\n }),\n }\n }\n}\n\nimpl From for Value {\n fn from(value: FieldValues) -> Self {\n Value::Struct(value)\n }\n}\n\nimpl> From> for Value {\n fn from(value: Option) -> Self {\n match value {\n Some(v) => v.into(),\n None => Value::Null,\n }\n }\n}\n\nimpl Value {\n pub fn from_alternative(value: Value) -> Self\n where\n AltVS: Into,\n {\n match value {\n Value::Null => Value::Null,\n Value::Basic(v) => Value::Basic(v),\n Value::Struct(v) => Value::Struct(FieldValues:: {\n fields: v\n .fields\n .into_iter()\n .map(|v| Value::::from_alternative(v))\n .collect(),\n }),\n Value::UTable(v) => Value::UTable(v.into_iter().map(|v| v.into()).collect()),\n Value::KTable(v) => {\n Value::KTable(v.into_iter().map(|(k, v)| (k.clone(), v.into())).collect())\n }\n Value::LTable(v) => Value::LTable(v.into_iter().map(|v| v.into()).collect()),\n }\n }\n\n pub fn from_alternative_ref(value: &Value) -> Self\n where\n for<'a> &'a AltVS: Into,\n {\n match value {\n Value::Null => Value::Null,\n Value::Basic(v) => Value::Basic(v.clone()),\n Value::Struct(v) => Value::Struct(FieldValues:: {\n fields: v\n .fields\n .iter()\n .map(|v| Value::::from_alternative_ref(v))\n .collect(),\n }),\n Value::UTable(v) => Value::UTable(v.iter().map(|v| v.into()).collect()),\n Value::KTable(v) => {\n Value::KTable(v.iter().map(|(k, v)| (k.clone(), v.into())).collect())\n }\n Value::LTable(v) => Value::LTable(v.iter().map(|v| v.into()).collect()),\n }\n }\n\n pub fn is_null(&self) -> bool {\n matches!(self, Value::Null)\n }\n\n pub fn into_key(self) -> Result {\n let result = match self {\n Value::Basic(v) => v.into_key()?,\n Value::Struct(v) => KeyValue::Struct(\n v.fields\n .into_iter()\n .map(|v| v.into_key())\n .collect::>>()?,\n ),\n Value::Null | Value::UTable(_) | Value::KTable(_) | Value::LTable(_) => {\n anyhow::bail!(\"invalid key value type\")\n }\n };\n Ok(result)\n }\n\n pub fn as_key(&self) -> Result {\n let result = match self {\n Value::Basic(v) => v.as_key()?,\n Value::Struct(v) => KeyValue::Struct(\n v.fields\n .iter()\n .map(|v| v.as_key())\n .collect::>>()?,\n ),\n Value::Null | Value::UTable(_) | Value::KTable(_) | Value::LTable(_) => {\n anyhow::bail!(\"invalid key value type\")\n }\n };\n Ok(result)\n }\n\n pub fn kind(&self) -> &'static str {\n match self {\n Value::Null => \"null\",\n Value::Basic(v) => v.kind(),\n Value::Struct(_) => \"Struct\",\n Value::UTable(_) => \"UTable\",\n Value::KTable(_) => \"KTable\",\n Value::LTable(_) => \"LTable\",\n }\n }\n\n pub fn optional(&self) -> Option<&Self> {\n match self {\n Value::Null => None,\n _ => Some(self),\n }\n }\n\n pub fn as_bytes(&self) -> Result<&Bytes> {\n match self {\n Value::Basic(BasicValue::Bytes(v)) => Ok(v),\n _ => anyhow::bail!(\"expected bytes value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_str(&self) -> Result<&Arc> {\n match self {\n Value::Basic(BasicValue::Str(v)) => Ok(v),\n _ => anyhow::bail!(\"expected str value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_bool(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Bool(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected bool value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_int64(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Int64(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected int64 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_float32(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Float32(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected float32 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_float64(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Float64(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected float64 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_range(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Range(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected range value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_json(&self) -> Result<&Arc> {\n match self {\n Value::Basic(BasicValue::Json(v)) => Ok(v),\n _ => anyhow::bail!(\"expected json value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_vector(&self) -> Result<&Arc<[BasicValue]>> {\n match self {\n Value::Basic(BasicValue::Vector(v)) => Ok(v),\n _ => anyhow::bail!(\"expected vector value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_struct(&self) -> Result<&FieldValues> {\n match self {\n Value::Struct(v) => Ok(v),\n _ => anyhow::bail!(\"expected struct value, but got {}\", self.kind()),\n }\n }\n}\n\nimpl Value {\n pub fn estimated_byte_size(&self) -> usize {\n std::mem::size_of::()\n + match self {\n Value::Null => 0,\n Value::Basic(v) => v.estimated_detached_byte_size(),\n Value::Struct(v) => v.estimated_detached_byte_size(),\n Value::UTable(v) | Value::LTable(v) => {\n v.iter()\n .map(|v| v.estimated_detached_byte_size())\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n Value::KTable(v) => {\n v.iter()\n .map(|(k, v)| {\n k.estimated_detached_byte_size() + v.estimated_detached_byte_size()\n })\n .sum::()\n + v.len() * std::mem::size_of::<(String, ScopeValue)>()\n }\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Deserialize)]\npub struct FieldValues {\n pub fields: Vec>,\n}\n\nimpl EstimatedByteSize for FieldValues {\n fn estimated_detached_byte_size(&self) -> usize {\n self.fields\n .iter()\n .map(Value::::estimated_byte_size)\n .sum::()\n + self.fields.len() * std::mem::size_of::>()\n }\n}\n\nimpl serde::Serialize for FieldValues {\n fn serialize(&self, serializer: S) -> Result {\n self.fields.serialize(serializer)\n }\n}\n\nimpl FieldValues\nwhere\n FieldValues: Into,\n{\n pub fn new(num_fields: usize) -> Self {\n let mut fields = Vec::with_capacity(num_fields);\n fields.resize(num_fields, Value::::Null);\n Self { fields }\n }\n\n fn from_json_values<'a>(\n fields: impl Iterator,\n ) -> Result {\n Ok(Self {\n fields: fields\n .map(|(s, v)| {\n let value = Value::::from_json(v, &s.value_type.typ)?;\n if value.is_null() && !s.value_type.nullable {\n api_bail!(\"expected non-null value for `{}`\", s.name);\n }\n Ok(value)\n })\n .collect::>>()?,\n })\n }\n\n fn from_json_object<'a>(\n values: serde_json::Map,\n fields_schema: impl Iterator,\n ) -> Result {\n let mut values = values;\n Ok(Self {\n fields: fields_schema\n .map(|field| {\n let value = match values.get_mut(&field.name) {\n Some(v) => {\n Value::::from_json(std::mem::take(v), &field.value_type.typ)?\n }\n None => Value::::default(),\n };\n if value.is_null() && !field.value_type.nullable {\n api_bail!(\"expected non-null value for `{}`\", field.name);\n }\n Ok(value)\n })\n .collect::>>()?,\n })\n }\n\n pub fn from_json(value: serde_json::Value, fields_schema: &[FieldSchema]) -> Result {\n match value {\n serde_json::Value::Array(v) => {\n if v.len() != fields_schema.len() {\n api_bail!(\"unmatched value length\");\n }\n Self::from_json_values(fields_schema.iter().zip(v))\n }\n serde_json::Value::Object(v) => Self::from_json_object(v, fields_schema.iter()),\n _ => api_bail!(\"invalid value type\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct ScopeValue(pub FieldValues);\n\nimpl EstimatedByteSize for ScopeValue {\n fn estimated_detached_byte_size(&self) -> usize {\n self.0.estimated_detached_byte_size()\n }\n}\n\nimpl Deref for ScopeValue {\n type Target = FieldValues;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl From for ScopeValue {\n fn from(value: FieldValues) -> Self {\n Self(value)\n }\n}\n\nimpl serde::Serialize for BasicValue {\n fn serialize(&self, serializer: S) -> Result {\n match self {\n BasicValue::Bytes(v) => serializer.serialize_str(&BASE64_STANDARD.encode(v)),\n BasicValue::Str(v) => serializer.serialize_str(v),\n BasicValue::Bool(v) => serializer.serialize_bool(*v),\n BasicValue::Int64(v) => serializer.serialize_i64(*v),\n BasicValue::Float32(v) => serializer.serialize_f32(*v),\n BasicValue::Float64(v) => serializer.serialize_f64(*v),\n BasicValue::Range(v) => v.serialize(serializer),\n BasicValue::Uuid(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Date(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Time(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::LocalDateTime(v) => {\n serializer.serialize_str(&v.format(\"%Y-%m-%dT%H:%M:%S%.6f\").to_string())\n }\n BasicValue::OffsetDateTime(v) => {\n serializer.serialize_str(&v.to_rfc3339_opts(chrono::SecondsFormat::AutoSi, true))\n }\n BasicValue::TimeDelta(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Json(v) => v.serialize(serializer),\n BasicValue::Vector(v) => v.serialize(serializer),\n BasicValue::UnionVariant { tag_id, value } => {\n let mut s = serializer.serialize_tuple(2)?;\n s.serialize_element(tag_id)?;\n s.serialize_element(value)?;\n s.end()\n }\n }\n }\n}\n\nimpl BasicValue {\n pub fn from_json(value: serde_json::Value, schema: &BasicValueType) -> Result {\n let result = match (value, schema) {\n (serde_json::Value::String(v), BasicValueType::Bytes) => {\n BasicValue::Bytes(Bytes::from(BASE64_STANDARD.decode(v)?))\n }\n (serde_json::Value::String(v), BasicValueType::Str) => BasicValue::Str(Arc::from(v)),\n (serde_json::Value::Bool(v), BasicValueType::Bool) => BasicValue::Bool(v),\n (serde_json::Value::Number(v), BasicValueType::Int64) => BasicValue::Int64(\n v.as_i64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid int64 value {v}\"))?,\n ),\n (serde_json::Value::Number(v), BasicValueType::Float32) => BasicValue::Float32(\n v.as_f64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid fp32 value {v}\"))?\n as f32,\n ),\n (serde_json::Value::Number(v), BasicValueType::Float64) => BasicValue::Float64(\n v.as_f64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid fp64 value {v}\"))?,\n ),\n (v, BasicValueType::Range) => BasicValue::Range(serde_json::from_value(v)?),\n (serde_json::Value::String(v), BasicValueType::Uuid) => BasicValue::Uuid(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::Date) => BasicValue::Date(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::Time) => BasicValue::Time(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::LocalDateTime) => {\n BasicValue::LocalDateTime(v.parse()?)\n }\n (serde_json::Value::String(v), BasicValueType::OffsetDateTime) => {\n match chrono::DateTime::parse_from_rfc3339(&v) {\n Ok(dt) => BasicValue::OffsetDateTime(dt),\n Err(e) => {\n if let Ok(dt) = v.parse::() {\n warn!(\"Datetime without timezone offset, assuming UTC\");\n BasicValue::OffsetDateTime(chrono::DateTime::from_naive_utc_and_offset(\n dt,\n chrono::Utc.fix(),\n ))\n } else {\n Err(e)?\n }\n }\n }\n }\n (serde_json::Value::String(v), BasicValueType::TimeDelta) => {\n BasicValue::TimeDelta(parse_duration(&v)?)\n }\n (v, BasicValueType::Json) => BasicValue::Json(Arc::from(v)),\n (\n serde_json::Value::Array(v),\n BasicValueType::Vector(VectorTypeSchema { element_type, .. }),\n ) => {\n let vec = v\n .into_iter()\n .map(|v| BasicValue::from_json(v, element_type))\n .collect::>>()?;\n BasicValue::Vector(Arc::from(vec))\n }\n (v, BasicValueType::Union(typ)) => {\n let arr = match v {\n serde_json::Value::Array(arr) => arr,\n _ => anyhow::bail!(\"Invalid JSON value for union, expect array\"),\n };\n\n if arr.len() != 2 {\n anyhow::bail!(\n \"Invalid union tuple: expect 2 values, received {}\",\n arr.len()\n );\n }\n\n let mut obj_iter = arr.into_iter();\n\n // Take first element\n let tag_id = obj_iter\n .next()\n .and_then(|value| value.as_u64().map(|num_u64| num_u64 as usize))\n .unwrap();\n\n // Take second element\n let value = obj_iter.next().unwrap();\n\n let cur_type = typ\n .types\n .get(tag_id)\n .ok_or_else(|| anyhow::anyhow!(\"No type in `tag_id` \\\"{tag_id}\\\" found\"))?;\n\n BasicValue::UnionVariant {\n tag_id,\n value: Box::new(BasicValue::from_json(value, cur_type)?),\n }\n }\n (v, t) => {\n anyhow::bail!(\"Value and type not matched.\\nTarget type {t:?}\\nJSON value: {v}\\n\")\n }\n };\n Ok(result)\n }\n}\n\nstruct TableEntry<'a>(&'a KeyValue, &'a ScopeValue);\n\nimpl serde::Serialize for Value {\n fn serialize(&self, serializer: S) -> Result {\n match self {\n Value::Null => serializer.serialize_none(),\n Value::Basic(v) => v.serialize(serializer),\n Value::Struct(v) => v.serialize(serializer),\n Value::UTable(v) => v.serialize(serializer),\n Value::KTable(m) => {\n let mut seq = serializer.serialize_seq(Some(m.len()))?;\n for (k, v) in m.iter() {\n seq.serialize_element(&TableEntry(k, v))?;\n }\n seq.end()\n }\n Value::LTable(v) => v.serialize(serializer),\n }\n }\n}\n\nimpl serde::Serialize for TableEntry<'_> {\n fn serialize(&self, serializer: S) -> Result {\n let &TableEntry(key, value) = self;\n let mut seq = serializer.serialize_seq(Some(value.0.fields.len() + 1))?;\n seq.serialize_element(key)?;\n for item in value.0.fields.iter() {\n seq.serialize_element(item)?;\n }\n seq.end()\n }\n}\n\nimpl Value\nwhere\n FieldValues: Into,\n{\n pub fn from_json(value: serde_json::Value, schema: &ValueType) -> Result {\n let result = match (value, schema) {\n (serde_json::Value::Null, _) => Value::::Null,\n (v, ValueType::Basic(t)) => Value::::Basic(BasicValue::from_json(v, t)?),\n (v, ValueType::Struct(s)) => {\n Value::::Struct(FieldValues::::from_json(v, &s.fields)?)\n }\n (serde_json::Value::Array(v), ValueType::Table(s)) => match s.kind {\n TableKind::UTable => {\n let rows = v\n .into_iter()\n .map(|v| Ok(FieldValues::from_json(v, &s.row.fields)?.into()))\n .collect::>>()?;\n Value::LTable(rows)\n }\n TableKind::KTable => {\n let rows = v\n .into_iter()\n .map(|v| {\n let mut fields_iter = s.row.fields.iter();\n let key_field = fields_iter\n .next()\n .ok_or_else(|| api_error!(\"Empty struct field values\"))?;\n\n match v {\n serde_json::Value::Array(v) => {\n let mut field_vals_iter = v.into_iter();\n let key = Self::from_json(\n field_vals_iter.next().ok_or_else(|| {\n api_error!(\"Empty struct field values\")\n })?,\n &key_field.value_type.typ,\n )?\n .into_key()?;\n let values = FieldValues::from_json_values(\n fields_iter.zip(field_vals_iter),\n )?;\n Ok((key, values.into()))\n }\n serde_json::Value::Object(mut v) => {\n let key = Self::from_json(\n std::mem::take(v.get_mut(&key_field.name).ok_or_else(\n || {\n api_error!(\n \"key field `{}` doesn't exist in value\",\n key_field.name\n )\n },\n )?),\n &key_field.value_type.typ,\n )?\n .into_key()?;\n let values = FieldValues::from_json_object(v, fields_iter)?;\n Ok((key, values.into()))\n }\n _ => api_bail!(\"Table value must be a JSON array or object\"),\n }\n })\n .collect::>>()?;\n Value::KTable(rows)\n }\n TableKind::LTable => {\n let rows = v\n .into_iter()\n .map(|v| Ok(FieldValues::from_json(v, &s.row.fields)?.into()))\n .collect::>>()?;\n Value::LTable(rows)\n }\n },\n (v, t) => {\n anyhow::bail!(\"Value and type not matched.\\nTarget type {t:?}\\nJSON value: {v}\\n\")\n }\n };\n Ok(result)\n }\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct TypedValue<'a> {\n pub t: &'a ValueType,\n pub v: &'a Value,\n}\n\nimpl Serialize for TypedValue<'_> {\n fn serialize(&self, serializer: S) -> Result {\n match (self.t, self.v) {\n (_, Value::Null) => serializer.serialize_none(),\n (ValueType::Basic(t), v) => match t {\n BasicValueType::Union(_) => match v {\n Value::Basic(BasicValue::UnionVariant { value, .. }) => {\n value.serialize(serializer)\n }\n _ => Err(serde::ser::Error::custom(\n \"Unmatched union type and value for `TypedValue`\",\n )),\n },\n _ => v.serialize(serializer),\n },\n (ValueType::Struct(s), Value::Struct(field_values)) => TypedFieldsValue {\n schema: &s.fields,\n values_iter: field_values.fields.iter(),\n }\n .serialize(serializer),\n (ValueType::Table(c), Value::UTable(rows) | Value::LTable(rows)) => {\n let mut seq = serializer.serialize_seq(Some(rows.len()))?;\n for row in rows {\n seq.serialize_element(&TypedFieldsValue {\n schema: &c.row.fields,\n values_iter: row.fields.iter(),\n })?;\n }\n seq.end()\n }\n (ValueType::Table(c), Value::KTable(rows)) => {\n let mut seq = serializer.serialize_seq(Some(rows.len()))?;\n for (k, v) in rows {\n seq.serialize_element(&TypedFieldsValue {\n schema: &c.row.fields,\n values_iter: std::iter::once(&Value::from(k.clone()))\n .chain(v.fields.iter()),\n })?;\n }\n seq.end()\n }\n _ => Err(serde::ser::Error::custom(format!(\n \"Incompatible value type: {:?} {:?}\",\n self.t, self.v\n ))),\n }\n }\n}\n\npub struct TypedFieldsValue<'a, I: Iterator + Clone> {\n pub schema: &'a [FieldSchema],\n pub values_iter: I,\n}\n\nimpl<'a, I: Iterator + Clone> Serialize for TypedFieldsValue<'a, I> {\n fn serialize(&self, serializer: S) -> Result {\n let mut map = serializer.serialize_map(Some(self.schema.len()))?;\n let values_iter = self.values_iter.clone();\n for (field, value) in self.schema.iter().zip(values_iter) {\n map.serialize_entry(\n &field.name,\n &TypedValue {\n t: &field.value_type.typ,\n v: value,\n },\n )?;\n }\n map.end()\n }\n}\n\npub mod test_util {\n use super::*;\n\n pub fn seder_roundtrip(value: &Value, typ: &ValueType) -> Result {\n let json_value = serde_json::to_value(value)?;\n let roundtrip_value = Value::from_json(json_value, typ)?;\n Ok(roundtrip_value)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use std::collections::BTreeMap;\n\n #[test]\n fn test_estimated_byte_size_null() {\n let value = Value::::Null;\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_primitive() {\n // Test primitives that should have 0 detached byte size\n let value = Value::::Basic(BasicValue::Bool(true));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n let value = Value::::Basic(BasicValue::Int64(42));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n let value = Value::::Basic(BasicValue::Float64(3.14));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_string() {\n let test_str = \"hello world\";\n let value = Value::::Basic(BasicValue::Str(Arc::from(test_str)));\n let size = value.estimated_byte_size();\n\n let expected_size = std::mem::size_of::>() + test_str.len();\n assert_eq!(size, expected_size);\n }\n\n #[test]\n fn test_estimated_byte_size_basic_bytes() {\n let test_bytes = b\"hello world\";\n let value = Value::::Basic(BasicValue::Bytes(Bytes::from(test_bytes.to_vec())));\n let size = value.estimated_byte_size();\n\n let expected_size = std::mem::size_of::>() + test_bytes.len();\n assert_eq!(size, expected_size);\n }\n\n #[test]\n fn test_estimated_byte_size_basic_json() {\n let json_val = serde_json::json!({\"key\": \"value\", \"number\": 42});\n let value = Value::::Basic(BasicValue::Json(Arc::from(json_val)));\n let size = value.estimated_byte_size();\n\n // Should include the size of the JSON structure\n // The exact size depends on the internal JSON representation\n assert!(size > std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_vector() {\n let vec_elements = vec![\n BasicValue::Str(Arc::from(\"hello\")),\n BasicValue::Str(Arc::from(\"world\")),\n BasicValue::Int64(42),\n ];\n let value = Value::::Basic(BasicValue::Vector(Arc::from(vec_elements)));\n let size = value.estimated_byte_size();\n\n // Should include the size of the vector elements\n let expected_min_size = std::mem::size_of::>()\n + \"hello\".len()\n + \"world\".len()\n + 3 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_struct() {\n let fields = vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"test\"))),\n Value::::Basic(BasicValue::Int64(123)),\n ];\n let field_values = FieldValues { fields };\n let value = Value::::Struct(field_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"test\".len()\n + 2 * std::mem::size_of::>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_utable() {\n let scope_values = vec![\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"item1\",\n )))],\n }),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"item2\",\n )))],\n }),\n ];\n let value = Value::::UTable(scope_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"item1\".len()\n + \"item2\".len()\n + 2 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_ltable() {\n let scope_values = vec![\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"list1\",\n )))],\n }),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"list2\",\n )))],\n }),\n ];\n let value = Value::::LTable(scope_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"list1\".len()\n + \"list2\".len()\n + 2 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_ktable() {\n let mut map = BTreeMap::new();\n map.insert(\n KeyValue::Str(Arc::from(\"key1\")),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"value1\",\n )))],\n }),\n );\n map.insert(\n KeyValue::Str(Arc::from(\"key2\")),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"value2\",\n )))],\n }),\n );\n let value = Value::::KTable(map);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"key1\".len()\n + \"key2\".len()\n + \"value1\".len()\n + \"value2\".len()\n + 2 * std::mem::size_of::<(String, ScopeValue)>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_nested_struct() {\n let inner_struct = Value::::Struct(FieldValues {\n fields: vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"inner\"))),\n Value::::Basic(BasicValue::Int64(456)),\n ],\n });\n\n let outer_struct = Value::::Struct(FieldValues {\n fields: vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"outer\"))),\n inner_struct,\n ],\n });\n\n let size = outer_struct.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"outer\".len()\n + \"inner\".len()\n + 4 * std::mem::size_of::>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_empty_collections() {\n // Empty UTable\n let value = Value::::UTable(vec![]);\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty LTable\n let value = Value::::LTable(vec![]);\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty KTable\n let value = Value::::KTable(BTreeMap::new());\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty Struct\n let value = Value::::Struct(FieldValues { fields: vec![] });\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n}\n"], ["/cocoindex/src/ops/sources/google_drive.rs", "use chrono::Duration;\nuse google_drive3::{\n DriveHub,\n api::{File, Scope},\n yup_oauth2::{ServiceAccountAuthenticator, read_service_account_key},\n};\nuse http_body_util::BodyExt;\nuse hyper_rustls::HttpsConnector;\nuse hyper_util::client::legacy::connect::HttpConnector;\nuse phf::phf_map;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\nstruct ExportMimeType {\n text: &'static str,\n binary: &'static str,\n}\n\nconst FOLDER_MIME_TYPE: &str = \"application/vnd.google-apps.folder\";\nconst FILE_MIME_TYPE: &str = \"application/vnd.google-apps.file\";\nstatic EXPORT_MIME_TYPES: phf::Map<&'static str, ExportMimeType> = phf_map! {\n \"application/vnd.google-apps.document\" =>\n ExportMimeType {\n text: \"text/markdown\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.spreadsheet\" =>\n ExportMimeType {\n text: \"text/csv\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.presentation\" =>\n ExportMimeType {\n text: \"text/plain\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.drawing\" =>\n ExportMimeType {\n text: \"image/svg+xml\",\n binary: \"image/png\",\n },\n \"application/vnd.google-apps.script\" =>\n ExportMimeType {\n text: \"application/vnd.google-apps.script+json\",\n binary: \"application/vnd.google-apps.script+json\",\n },\n};\n\nfn is_supported_file_type(mime_type: &str) -> bool {\n !mime_type.starts_with(\"application/vnd.google-apps.\")\n || EXPORT_MIME_TYPES.contains_key(mime_type)\n || mime_type == FILE_MIME_TYPE\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n service_account_credential_path: String,\n binary: bool,\n root_folder_ids: Vec,\n recent_changes_poll_interval: Option,\n}\n\nstruct Executor {\n drive_hub: DriveHub>,\n binary: bool,\n root_folder_ids: IndexSet>,\n recent_updates_poll_interval: Option,\n}\n\nimpl Executor {\n async fn new(spec: Spec) -> Result {\n let service_account_key =\n read_service_account_key(spec.service_account_credential_path).await?;\n let auth = ServiceAccountAuthenticator::builder(service_account_key)\n .build()\n .await?;\n let client =\n hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new())\n .build(\n hyper_rustls::HttpsConnectorBuilder::new()\n .with_provider_and_native_roots(\n rustls::crypto::aws_lc_rs::default_provider(),\n )?\n .https_only()\n .enable_http2()\n .build(),\n );\n let drive_hub = DriveHub::new(client, auth);\n Ok(Self {\n drive_hub,\n binary: spec.binary,\n root_folder_ids: spec.root_folder_ids.into_iter().map(Arc::from).collect(),\n recent_updates_poll_interval: spec.recent_changes_poll_interval,\n })\n }\n}\n\nfn escape_string(s: &str) -> String {\n let mut escaped = String::with_capacity(s.len());\n for c in s.chars() {\n match c {\n '\\'' | '\\\\' => escaped.push('\\\\'),\n _ => {}\n }\n escaped.push(c);\n }\n escaped\n}\n\nconst CUTOFF_TIME_BUFFER: Duration = Duration::seconds(1);\nimpl Executor {\n fn visit_file(\n &self,\n file: File,\n new_folder_ids: &mut Vec>,\n seen_ids: &mut HashSet>,\n ) -> Result> {\n if file.trashed == Some(true) {\n return Ok(None);\n }\n let (id, mime_type) = match (file.id, file.mime_type) {\n (Some(id), Some(mime_type)) => (Arc::::from(id), mime_type),\n (id, mime_type) => {\n warn!(\"Skipping file with incomplete metadata: id={id:?}, mime_type={mime_type:?}\",);\n return Ok(None);\n }\n };\n if !seen_ids.insert(id.clone()) {\n return Ok(None);\n }\n let result = if mime_type == FOLDER_MIME_TYPE {\n new_folder_ids.push(id);\n None\n } else if is_supported_file_type(&mime_type) {\n Some(PartialSourceRowMetadata {\n key: KeyValue::Str(id),\n ordinal: file.modified_time.map(|t| t.try_into()).transpose()?,\n })\n } else {\n None\n };\n Ok(result)\n }\n\n async fn list_files(\n &self,\n folder_id: &str,\n fields: &str,\n next_page_token: &mut Option,\n ) -> Result> {\n let query = format!(\"'{}' in parents\", escape_string(folder_id));\n let mut list_call = self\n .drive_hub\n .files()\n .list()\n .add_scope(Scope::Readonly)\n .q(&query)\n .param(\"fields\", fields);\n if let Some(next_page_token) = &next_page_token {\n list_call = list_call.page_token(next_page_token);\n }\n let (_, files) = list_call.doit().await?;\n *next_page_token = files.next_page_token;\n let file_iter = files.files.into_iter().flat_map(|file| file.into_iter());\n Ok(file_iter)\n }\n\n fn make_cutoff_time(\n most_recent_modified_time: Option>,\n list_start_time: DateTime,\n ) -> DateTime {\n let safe_upperbound = list_start_time - CUTOFF_TIME_BUFFER;\n most_recent_modified_time\n .map(|t| t.min(safe_upperbound))\n .unwrap_or(safe_upperbound)\n }\n\n async fn get_recent_updates(\n &self,\n cutoff_time: &mut DateTime,\n ) -> Result {\n let mut page_size: i32 = 10;\n let mut next_page_token: Option = None;\n let mut changes = Vec::new();\n let mut most_recent_modified_time = None;\n let start_time = Utc::now();\n 'paginate: loop {\n let mut list_call = self\n .drive_hub\n .files()\n .list()\n .add_scope(Scope::Readonly)\n .param(\"fields\", \"files(id,modifiedTime,parents,trashed)\")\n .order_by(\"modifiedTime desc\")\n .page_size(page_size);\n if let Some(token) = next_page_token {\n list_call = list_call.page_token(token.as_str());\n }\n let (_, files) = list_call.doit().await?;\n for file in files.files.into_iter().flat_map(|files| files.into_iter()) {\n let modified_time = file.modified_time.unwrap_or_default();\n if most_recent_modified_time.is_none() {\n most_recent_modified_time = Some(modified_time);\n }\n if modified_time <= *cutoff_time {\n break 'paginate;\n }\n let file_id = file.id.ok_or_else(|| anyhow!(\"File has no id\"))?;\n if self.is_file_covered(&file_id).await? {\n changes.push(SourceChange {\n key: KeyValue::Str(Arc::from(file_id)),\n data: None,\n });\n }\n }\n if let Some(token) = files.next_page_token {\n next_page_token = Some(token);\n } else {\n break;\n }\n // List more in a page since 2nd.\n page_size = 100;\n }\n *cutoff_time = Self::make_cutoff_time(most_recent_modified_time, start_time);\n Ok(SourceChangeMessage {\n changes,\n ack_fn: None,\n })\n }\n\n async fn is_file_covered(&self, file_id: &str) -> Result {\n let mut next_file_id = Some(Cow::Borrowed(file_id));\n while let Some(file_id) = next_file_id {\n if self.root_folder_ids.contains(file_id.as_ref()) {\n return Ok(true);\n }\n let (_, file) = self\n .drive_hub\n .files()\n .get(&file_id)\n .add_scope(Scope::Readonly)\n .param(\"fields\", \"parents\")\n .doit()\n .await?;\n next_file_id = file\n .parents\n .into_iter()\n .flat_map(|parents| parents.into_iter())\n .map(Cow::Owned)\n .next();\n }\n Ok(false)\n }\n}\n\ntrait ResultExt {\n type OptResult;\n fn or_not_found(self) -> Self::OptResult;\n}\n\nimpl ResultExt for google_drive3::Result {\n type OptResult = google_drive3::Result>;\n\n fn or_not_found(self) -> Self::OptResult {\n match self {\n Ok(value) => Ok(Some(value)),\n Err(google_drive3::Error::BadRequest(err_msg))\n if err_msg\n .get(\"error\")\n .and_then(|e| e.get(\"code\"))\n .and_then(|code| code.as_i64())\n == Some(404) =>\n {\n Ok(None)\n }\n Err(e) => Err(e),\n }\n }\n}\n\nfn optional_modified_time(include_ordinal: bool) -> &'static str {\n if include_ordinal { \",modifiedTime\" } else { \"\" }\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n let mut seen_ids = HashSet::new();\n let mut folder_ids = self.root_folder_ids.clone();\n let fields = format!(\n \"files(id,name,mimeType,trashed{})\",\n optional_modified_time(options.include_ordinal)\n );\n let mut new_folder_ids = Vec::new();\n try_stream! {\n while let Some(folder_id) = folder_ids.pop() {\n let mut next_page_token = None;\n loop {\n let mut curr_rows = Vec::new();\n let files = self\n .list_files(&folder_id, &fields, &mut next_page_token)\n .await?;\n for file in files {\n curr_rows.extend(self.visit_file(file, &mut new_folder_ids, &mut seen_ids)?);\n }\n if !curr_rows.is_empty() {\n yield curr_rows;\n }\n if next_page_token.is_none() {\n break;\n }\n }\n folder_ids.extend(new_folder_ids.drain(..).rev());\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let file_id = key.str_value()?;\n let fields = format!(\n \"id,name,mimeType,trashed{}\",\n optional_modified_time(options.include_ordinal)\n );\n let resp = self\n .drive_hub\n .files()\n .get(file_id)\n .add_scope(Scope::Readonly)\n .param(\"fields\", &fields)\n .doit()\n .await\n .or_not_found()?;\n let file = match resp {\n Some((_, file)) if file.trashed != Some(true) => file,\n _ => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n };\n let ordinal = if options.include_ordinal {\n file.modified_time.map(|t| t.try_into()).transpose()?\n } else {\n None\n };\n let type_n_body = if let Some(export_mime_type) = file\n .mime_type\n .as_ref()\n .and_then(|mime_type| EXPORT_MIME_TYPES.get(mime_type.as_str()))\n {\n let target_mime_type = if self.binary {\n export_mime_type.binary\n } else {\n export_mime_type.text\n };\n self.drive_hub\n .files()\n .export(file_id, target_mime_type)\n .add_scope(Scope::Readonly)\n .doit()\n .await\n .or_not_found()?\n .map(|content| (Some(target_mime_type.to_string()), content.into_body()))\n } else {\n self.drive_hub\n .files()\n .get(file_id)\n .add_scope(Scope::Readonly)\n .param(\"alt\", \"media\")\n .doit()\n .await\n .or_not_found()?\n .map(|(resp, _)| (file.mime_type, resp.into_body()))\n };\n let value = match type_n_body {\n Some((mime_type, resp_body)) => {\n let content = resp_body.collect().await?;\n\n let fields = vec![\n file.name.unwrap_or_default().into(),\n mime_type.into(),\n if self.binary {\n content.to_bytes().to_vec().into()\n } else {\n String::from_utf8_lossy(&content.to_bytes())\n .to_string()\n .into()\n },\n ];\n Some(SourceValue::Existence(FieldValues { fields }))\n }\n None => None,\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n let poll_interval = if let Some(poll_interval) = self.recent_updates_poll_interval {\n poll_interval\n } else {\n return Ok(None);\n };\n let mut cutoff_time = Utc::now() - CUTOFF_TIME_BUFFER;\n let mut interval = tokio::time::interval(poll_interval);\n interval.tick().await;\n let stream = stream! {\n loop {\n interval.tick().await;\n yield self.get_recent_updates(&mut cutoff_time).await;\n }\n };\n Ok(Some(stream.boxed()))\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"GoogleDrive\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n schema_builder.add_field(FieldSchema::new(\n \"file_id\",\n make_output_type(BasicValueType::Str),\n ));\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n let mime_type_field = schema_builder.add_field(FieldSchema::new(\n \"mime_type\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n )\n .with_attr(\n field_attrs::CONTENT_MIME_TYPE,\n serde_json::to_value(mime_type_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(spec).await?))\n }\n}\n"], ["/cocoindex/src/ops/interface.rs", "use std::time::SystemTime;\n\nuse crate::base::{schema::*, spec::IndexOptions, value::*};\nuse crate::prelude::*;\nuse crate::setup;\nuse chrono::TimeZone;\nuse serde::Serialize;\n\npub struct FlowInstanceContext {\n pub flow_instance_name: String,\n pub auth_registry: Arc,\n pub py_exec_ctx: Option>,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Default)]\npub struct Ordinal(pub Option);\n\nimpl Ordinal {\n pub fn unavailable() -> Self {\n Self(None)\n }\n\n pub fn is_available(&self) -> bool {\n self.0.is_some()\n }\n}\n\nimpl From for Option {\n fn from(val: Ordinal) -> Self {\n val.0\n }\n}\n\nimpl TryFrom for Ordinal {\n type Error = anyhow::Error;\n\n fn try_from(time: SystemTime) -> Result {\n let duration = time.duration_since(std::time::UNIX_EPOCH)?;\n Ok(Ordinal(Some(duration.as_micros().try_into()?)))\n }\n}\n\nimpl TryFrom> for Ordinal {\n type Error = anyhow::Error;\n\n fn try_from(time: chrono::DateTime) -> Result {\n Ok(Ordinal(Some(time.timestamp_micros())))\n }\n}\n\npub struct PartialSourceRowMetadata {\n pub key: KeyValue,\n pub ordinal: Option,\n}\n\n#[derive(Debug)]\npub enum SourceValue {\n Existence(FieldValues),\n NonExistence,\n}\n\nimpl SourceValue {\n pub fn is_existent(&self) -> bool {\n matches!(self, Self::Existence(_))\n }\n\n pub fn as_optional(&self) -> Option<&FieldValues> {\n match self {\n Self::Existence(value) => Some(value),\n Self::NonExistence => None,\n }\n }\n\n pub fn into_optional(self) -> Option {\n match self {\n Self::Existence(value) => Some(value),\n Self::NonExistence => None,\n }\n }\n}\n\npub struct SourceData {\n pub value: SourceValue,\n pub ordinal: Ordinal,\n}\n\npub struct SourceChange {\n pub key: KeyValue,\n\n /// If None, the engine will poll to get the latest existence state and value.\n pub data: Option,\n}\n\npub struct SourceChangeMessage {\n pub changes: Vec,\n pub ack_fn: Option BoxFuture<'static, Result<()>> + Send + Sync>>,\n}\n\n#[derive(Debug, Default)]\npub struct SourceExecutorListOptions {\n pub include_ordinal: bool,\n}\n\n#[derive(Debug, Default)]\npub struct SourceExecutorGetOptions {\n pub include_ordinal: bool,\n pub include_value: bool,\n}\n\n#[derive(Debug)]\npub struct PartialSourceRowData {\n pub value: Option,\n pub ordinal: Option,\n}\n\nimpl TryFrom for SourceData {\n type Error = anyhow::Error;\n\n fn try_from(data: PartialSourceRowData) -> Result {\n Ok(Self {\n value: data\n .value\n .ok_or_else(|| anyhow::anyhow!(\"value is missing\"))?,\n ordinal: data\n .ordinal\n .ok_or_else(|| anyhow::anyhow!(\"ordinal is missing\"))?,\n })\n }\n}\n#[async_trait]\npub trait SourceExecutor: Send + Sync {\n /// Get the list of keys for the source.\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>>;\n\n // Get the value for the given key.\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result;\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n Ok(None)\n }\n}\n\n#[async_trait]\npub trait SourceFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )>;\n}\n\n#[async_trait]\npub trait SimpleFunctionExecutor: Send + Sync {\n /// Evaluate the operation.\n async fn evaluate(&self, args: Vec) -> Result;\n\n fn enable_cache(&self) -> bool {\n false\n }\n\n /// Must be Some if `enable_cache` is true.\n /// If it changes, the cache will be invalidated.\n fn behavior_version(&self) -> Option {\n None\n }\n}\n\n#[async_trait]\npub trait SimpleFunctionFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )>;\n}\n\n#[derive(Debug)]\npub struct ExportTargetUpsertEntry {\n pub key: KeyValue,\n pub additional_key: serde_json::Value,\n pub value: FieldValues,\n}\n\n#[derive(Debug)]\npub struct ExportTargetDeleteEntry {\n pub key: KeyValue,\n pub additional_key: serde_json::Value,\n}\n\n#[derive(Debug, Default)]\npub struct ExportTargetMutation {\n pub upserts: Vec,\n pub deletes: Vec,\n}\n\nimpl ExportTargetMutation {\n pub fn is_empty(&self) -> bool {\n self.upserts.is_empty() && self.deletes.is_empty()\n }\n}\n\n#[derive(Debug)]\npub struct ExportTargetMutationWithContext<'ctx, T: ?Sized + Send + Sync> {\n pub mutation: ExportTargetMutation,\n pub export_context: &'ctx T,\n}\n\npub struct ResourceSetupChangeItem<'a> {\n pub key: &'a serde_json::Value,\n pub setup_status: &'a dyn setup::ResourceSetupStatus,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum SetupStateCompatibility {\n /// The resource is fully compatible with the desired state.\n /// This means the resource can be updated to the desired state without any loss of data.\n Compatible,\n /// The resource is partially compatible with the desired state.\n /// This means data from some existing fields will be lost after applying the setup change.\n /// But at least their key fields of all rows are still preserved.\n PartialCompatible,\n /// The resource needs to be rebuilt. After applying the setup change, all data will be gone.\n NotCompatible,\n}\n\npub struct ExportDataCollectionBuildOutput {\n pub export_context: BoxFuture<'static, Result>>,\n pub setup_key: serde_json::Value,\n pub desired_setup_state: serde_json::Value,\n}\n\npub struct ExportDataCollectionSpec {\n pub name: String,\n pub spec: serde_json::Value,\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n pub index_options: IndexOptions,\n}\n\n#[async_trait]\npub trait ExportTargetFactory: Send + Sync {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )>;\n\n /// Will not be called if it's setup by user.\n /// It returns an error if the target only supports setup by user.\n async fn check_setup_status(\n &self,\n key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n context: Arc,\n ) -> Result>;\n\n /// Normalize the key. e.g. the JSON format may change (after code change, e.g. new optional field or field ordering), even if the underlying value is not changed.\n /// This should always return the canonical serialized form.\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result;\n\n fn check_state_compatibility(\n &self,\n desired_state: &serde_json::Value,\n existing_state: &serde_json::Value,\n ) -> Result;\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result;\n\n fn extract_additional_key(\n &self,\n key: &KeyValue,\n value: &FieldValues,\n export_context: &(dyn Any + Send + Sync),\n ) -> Result;\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()>;\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()>;\n}\n\n#[derive(Clone)]\npub enum ExecutorFactory {\n Source(Arc),\n SimpleFunction(Arc),\n ExportTarget(Arc),\n}\n"], ["/cocoindex/src/ops/py_factory.rs", "use crate::prelude::*;\n\nuse pyo3::{\n IntoPyObjectExt, Py, PyAny, Python, pyclass, pymethods,\n types::{IntoPyDict, PyList, PyString, PyTuple},\n};\nuse pythonize::{depythonize, pythonize};\n\nuse crate::{\n base::{schema, value},\n builder::plan,\n ops::sdk::SetupStateCompatibility,\n py::{self, ToResultWithPyTrace},\n};\nuse anyhow::{Result, anyhow};\n\n#[pyclass(name = \"OpArgSchema\")]\npub struct PyOpArgSchema {\n value_type: crate::py::Pythonized,\n analyzed_value: crate::py::Pythonized,\n}\n\n#[pymethods]\nimpl PyOpArgSchema {\n #[getter]\n fn value_type(&self) -> &crate::py::Pythonized {\n &self.value_type\n }\n\n #[getter]\n fn analyzed_value(&self) -> &crate::py::Pythonized {\n &self.analyzed_value\n }\n}\n\nstruct PyFunctionExecutor {\n py_function_executor: Py,\n py_exec_ctx: Arc,\n\n num_positional_args: usize,\n kw_args_names: Vec>,\n result_type: schema::EnrichedValueType,\n\n enable_cache: bool,\n behavior_version: Option,\n}\n\nimpl PyFunctionExecutor {\n fn call_py_fn<'py>(\n &self,\n py: Python<'py>,\n input: Vec,\n ) -> Result> {\n let mut args = Vec::with_capacity(self.num_positional_args);\n for v in input[0..self.num_positional_args].iter() {\n args.push(py::value_to_py_object(py, v)?);\n }\n\n let kwargs = if self.kw_args_names.is_empty() {\n None\n } else {\n let mut kwargs = Vec::with_capacity(self.kw_args_names.len());\n for (name, v) in self\n .kw_args_names\n .iter()\n .zip(input[self.num_positional_args..].iter())\n {\n kwargs.push((name.bind(py), py::value_to_py_object(py, v)?));\n }\n Some(kwargs)\n };\n\n let result = self\n .py_function_executor\n .call(\n py,\n PyTuple::new(py, args.into_iter())?,\n kwargs\n .map(|kwargs| -> Result<_> { Ok(kwargs.into_py_dict(py)?) })\n .transpose()?\n .as_ref(),\n )\n .to_result_with_py_trace(py)?;\n Ok(result.into_bound(py))\n }\n}\n\n#[async_trait]\nimpl interface::SimpleFunctionExecutor for Arc {\n async fn evaluate(&self, input: Vec) -> Result {\n let self = self.clone();\n let result_fut = Python::with_gil(|py| -> Result<_> {\n let result_coro = self.call_py_fn(py, input)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(self.py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro,\n )?)\n })?;\n let result = result_fut.await;\n Python::with_gil(|py| -> Result<_> {\n let result = result.to_result_with_py_trace(py)?;\n Ok(py::value_from_py_object(\n &self.result_type.typ,\n &result.into_bound(py),\n )?)\n })\n }\n\n fn enable_cache(&self) -> bool {\n self.enable_cache\n }\n\n fn behavior_version(&self) -> Option {\n self.behavior_version\n }\n}\n\npub(crate) struct PyFunctionFactory {\n pub py_function_factory: Py,\n}\n\n#[async_trait]\nimpl interface::SimpleFunctionFactory for PyFunctionFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n schema::EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let (result_type, executor, kw_args_names, num_positional_args) =\n Python::with_gil(|py| -> anyhow::Result<_> {\n let mut args = vec![pythonize(py, &spec)?];\n let mut kwargs = vec![];\n let mut num_positional_args = 0;\n for arg in input_schema.into_iter() {\n let py_arg_schema = PyOpArgSchema {\n value_type: crate::py::Pythonized(arg.value_type.clone()),\n analyzed_value: crate::py::Pythonized(arg.analyzed_value.clone()),\n };\n match arg.name.0 {\n Some(name) => {\n kwargs.push((name.clone(), py_arg_schema));\n }\n None => {\n args.push(py_arg_schema.into_bound_py_any(py)?);\n num_positional_args += 1;\n }\n }\n }\n\n let kw_args_names = kwargs\n .iter()\n .map(|(name, _)| PyString::new(py, name).unbind())\n .collect::>();\n let result = self\n .py_function_factory\n .call(\n py,\n PyTuple::new(py, args.into_iter())?,\n Some(&kwargs.into_py_dict(py)?),\n )\n .to_result_with_py_trace(py)?;\n let (result_type, executor) = result\n .extract::<(crate::py::Pythonized, Py)>(py)?;\n Ok((\n result_type.into_inner(),\n executor,\n kw_args_names,\n num_positional_args,\n ))\n })?;\n\n let executor_fut = {\n let result_type = result_type.clone();\n async move {\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n let (prepare_fut, enable_cache, behavior_version) =\n Python::with_gil(|py| -> anyhow::Result<_> {\n let prepare_coro = executor\n .call_method(py, \"prepare\", (), None)\n .to_result_with_py_trace(py)?;\n let prepare_fut = pyo3_async_runtimes::into_future_with_locals(\n &pyo3_async_runtimes::TaskLocals::new(\n py_exec_ctx.event_loop.bind(py).clone(),\n ),\n prepare_coro.into_bound(py),\n )?;\n let enable_cache = executor\n .call_method(py, \"enable_cache\", (), None)\n .to_result_with_py_trace(py)?\n .extract::(py)?;\n let behavior_version = executor\n .call_method(py, \"behavior_version\", (), None)\n .to_result_with_py_trace(py)?\n .extract::>(py)?;\n Ok((prepare_fut, enable_cache, behavior_version))\n })?;\n prepare_fut.await?;\n Ok(Box::new(Arc::new(PyFunctionExecutor {\n py_function_executor: executor,\n py_exec_ctx,\n num_positional_args,\n kw_args_names,\n result_type,\n enable_cache,\n behavior_version,\n }))\n as Box)\n }\n };\n\n Ok((result_type, executor_fut.boxed()))\n }\n}\n\npub(crate) struct PyExportTargetFactory {\n pub py_target_connector: Py,\n}\n\nstruct PyTargetExecutorContext {\n py_export_ctx: Py,\n py_exec_ctx: Arc,\n}\n\n#[derive(Debug)]\nstruct PyTargetResourceSetupStatus {\n stale_existing_states: IndexSet>,\n desired_state: Option,\n}\n\nimpl setup::ResourceSetupStatus for PyTargetResourceSetupStatus {\n fn describe_changes(&self) -> Vec {\n vec![]\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n if self.stale_existing_states.is_empty() {\n setup::SetupChangeType::NoChange\n } else if self.desired_state.is_some() {\n if self\n .stale_existing_states\n .iter()\n .any(|state| state.is_none())\n {\n setup::SetupChangeType::Create\n } else {\n setup::SetupChangeType::Update\n }\n } else {\n setup::SetupChangeType::Delete\n }\n }\n}\n\n#[async_trait]\nimpl interface::ExportTargetFactory for PyExportTargetFactory {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )> {\n if declarations.len() != 0 {\n api_error!(\"Custom target connector doesn't support declarations yet\");\n }\n\n let mut build_outputs = Vec::with_capacity(data_collections.len());\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n for data_collection in data_collections.into_iter() {\n let (py_export_ctx, persistent_key) =\n Python::with_gil(|py| -> Result<(Py, serde_json::Value)> {\n // Deserialize the spec to Python object.\n let py_export_ctx = self\n .py_target_connector\n .call_method(\n py,\n \"create_export_context\",\n (\n &data_collection.name,\n pythonize(py, &data_collection.spec)?,\n pythonize(py, &data_collection.key_fields_schema)?,\n pythonize(py, &data_collection.value_fields_schema)?,\n ),\n None,\n )\n .to_result_with_py_trace(py)?;\n\n // Call the `get_persistent_key` method to get the persistent key.\n let persistent_key = self\n .py_target_connector\n .call_method(py, \"get_persistent_key\", (&py_export_ctx,), None)\n .to_result_with_py_trace(py)?;\n let persistent_key = depythonize(&persistent_key.into_bound(py))?;\n Ok((py_export_ctx, persistent_key))\n })?;\n\n let py_exec_ctx = py_exec_ctx.clone();\n let build_output = interface::ExportDataCollectionBuildOutput {\n export_context: Box::pin(async move {\n Ok(Arc::new(PyTargetExecutorContext {\n py_export_ctx,\n py_exec_ctx,\n }) as Arc)\n }),\n setup_key: persistent_key,\n desired_setup_state: data_collection.spec,\n };\n build_outputs.push(build_output);\n }\n Ok((build_outputs, vec![]))\n }\n\n async fn check_setup_status(\n &self,\n _key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n _context: Arc,\n ) -> Result> {\n // Collect all possible existing states that are not the desired state.\n let mut stale_existing_states = IndexSet::new();\n if !existing_states.always_exists() && desired_state.is_some() {\n stale_existing_states.insert(None);\n }\n for possible_state in existing_states.possible_versions() {\n if Some(possible_state) != desired_state.as_ref() {\n stale_existing_states.insert(Some(possible_state.clone()));\n }\n }\n\n Ok(Box::new(PyTargetResourceSetupStatus {\n stale_existing_states,\n desired_state,\n }))\n }\n\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result {\n Ok(key.clone())\n }\n\n fn check_state_compatibility(\n &self,\n _desired_state: &serde_json::Value,\n _existing_state: &serde_json::Value,\n ) -> Result {\n // The Python target connector doesn't support state update yet.\n Ok(SetupStateCompatibility::Compatible)\n }\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result {\n Python::with_gil(|py| -> Result {\n let result = self\n .py_target_connector\n .call_method(py, \"describe_resource\", (pythonize(py, key)?,), None)\n .to_result_with_py_trace(py)?;\n let description = result.extract::(py)?;\n Ok(description)\n })\n }\n\n fn extract_additional_key(\n &self,\n _key: &value::KeyValue,\n _value: &value::FieldValues,\n _export_context: &(dyn Any + Send + Sync),\n ) -> Result {\n Ok(serde_json::Value::Null)\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n // Filter the setup changes that are not NoChange, and flatten to\n // `list[tuple[key, list[stale_existing_states | None], desired_state | None]]` for Python.\n let mut setup_changes = Vec::new();\n for item in setup_status.into_iter() {\n let decoded_setup_status = (item.setup_status as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?;\n if ::change_type(decoded_setup_status)\n != setup::SetupChangeType::NoChange\n {\n setup_changes.push((\n item.key,\n &decoded_setup_status.stale_existing_states,\n &decoded_setup_status.desired_state,\n ));\n }\n }\n\n if setup_changes.is_empty() {\n return Ok(());\n }\n\n // Call the `apply_setup_changes_async()` method.\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n let py_result = Python::with_gil(move |py| -> Result<_> {\n let result_coro = self\n .py_target_connector\n .call_method(\n py,\n \"apply_setup_changes_async\",\n (pythonize(py, &setup_changes)?,),\n None,\n )\n .to_result_with_py_trace(py)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro.into_bound(py),\n )?)\n })?\n .await;\n Python::with_gil(move |py| py_result.to_result_with_py_trace(py))?;\n\n Ok(())\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec<\n interface::ExportTargetMutationWithContext<'async_trait, dyn Any + Send + Sync>,\n >,\n ) -> Result<()> {\n if mutations.is_empty() {\n return Ok(());\n }\n\n let py_result = Python::with_gil(|py| -> Result<_> {\n // Create a `list[tuple[export_ctx, list[tuple[key, value | None]]]]` for Python, and collect `py_exec_ctx`.\n let mut py_args = Vec::with_capacity(mutations.len());\n let mut py_exec_ctx: Option<&Arc> = None;\n for mutation in mutations.into_iter() {\n // Downcast export_context to PyTargetExecutorContext.\n let export_context = (mutation.export_context as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?;\n\n let mut flattened_mutations = Vec::with_capacity(\n mutation.mutation.upserts.len() + mutation.mutation.deletes.len(),\n );\n for upsert in mutation.mutation.upserts.into_iter() {\n flattened_mutations.push((\n py::value_to_py_object(py, &upsert.key.into())?,\n py::field_values_to_py_object(py, upsert.value.fields.iter())?,\n ));\n }\n for delete in mutation.mutation.deletes.into_iter() {\n flattened_mutations.push((\n py::value_to_py_object(py, &delete.key.into())?,\n py.None().into_bound(py),\n ));\n }\n py_args.push((\n &export_context.py_export_ctx,\n PyList::new(py, flattened_mutations)?.into_any(),\n ));\n py_exec_ctx = py_exec_ctx.or(Some(&export_context.py_exec_ctx));\n }\n let py_exec_ctx = py_exec_ctx.ok_or_else(invariance_violation)?;\n\n let result_coro = self\n .py_target_connector\n .call_method(py, \"mutate_async\", (py_args,), None)\n .to_result_with_py_trace(py)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro.into_bound(py),\n )?)\n })?\n .await;\n\n Python::with_gil(move |py| py_result.to_result_with_py_trace(py))?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/sources/azure_blob.rs", "use crate::fields_value;\nuse async_stream::try_stream;\nuse azure_core::prelude::NextMarker;\nuse azure_identity::{DefaultAzureCredential, TokenCredentialOptions};\nuse azure_storage::StorageCredentials;\nuse azure_storage_blobs::prelude::*;\nuse futures::StreamExt;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse std::sync::Arc;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n account_name: String,\n container_name: String,\n prefix: Option,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n\n /// SAS token for authentication. Takes precedence over account_access_key.\n sas_token: Option>,\n /// Account access key for authentication. If not provided, will use default Azure credential.\n account_access_key: Option>,\n}\n\nstruct Executor {\n client: BlobServiceClient,\n container_name: String,\n prefix: Option,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n}\n\nimpl Executor {\n fn is_excluded(&self, key: &str) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(key))\n }\n\n fn is_file_included(&self, key: &str) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(key))\n && !self.is_excluded(key)\n }\n}\n\nfn datetime_to_ordinal(dt: &time::OffsetDateTime) -> Ordinal {\n Ordinal(Some(dt.unix_timestamp_nanos() as i64 / 1000))\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n _options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n try_stream! {\n let mut continuation_token: Option = None;\n loop {\n let mut list_builder = self.client\n .container_client(&self.container_name)\n .list_blobs();\n\n if let Some(p) = &self.prefix {\n list_builder = list_builder.prefix(p.clone());\n }\n\n if let Some(token) = continuation_token.take() {\n list_builder = list_builder.marker(token);\n }\n\n let mut page_stream = list_builder.into_stream();\n let Some(page_result) = page_stream.next().await else {\n break;\n };\n\n let page = page_result?;\n let mut batch = Vec::new();\n\n for blob in page.blobs.blobs() {\n let key = &blob.name;\n\n // Only include files (not directories)\n if key.ends_with('/') { continue; }\n\n if self.is_file_included(key) {\n let ordinal = Some(datetime_to_ordinal(&blob.properties.last_modified));\n batch.push(PartialSourceRowMetadata {\n key: KeyValue::Str(key.clone().into()),\n ordinal,\n });\n }\n }\n\n if !batch.is_empty() {\n yield batch;\n }\n\n continuation_token = page.next_marker;\n if continuation_token.is_none() {\n break;\n }\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let key_str = key.str_value()?;\n if !self.is_file_included(key_str) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n\n let blob_client = self\n .client\n .container_client(&self.container_name)\n .blob_client(key_str.as_ref());\n\n let mut stream = blob_client.get().into_stream();\n let result = stream.next().await;\n\n let blob_response = match result {\n Some(response) => response?,\n None => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n };\n\n let ordinal = if options.include_ordinal {\n Some(datetime_to_ordinal(\n &blob_response.blob.properties.last_modified,\n ))\n } else {\n None\n };\n\n let value = if options.include_value {\n let bytes = blob_response.data.collect().await?;\n Some(SourceValue::Existence(if self.binary {\n fields_value!(bytes)\n } else {\n fields_value!(String::from_utf8_lossy(&bytes).to_string())\n }))\n } else {\n None\n };\n\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n // Azure Blob Storage doesn't have built-in change notifications like S3+SQS\n Ok(None)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"AzureBlob\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n context: Arc,\n ) -> Result> {\n let credential = if let Some(sas_token) = spec.sas_token {\n let sas_token = context.auth_registry.get(&sas_token)?;\n StorageCredentials::sas_token(sas_token)?\n } else if let Some(account_access_key) = spec.account_access_key {\n let account_access_key = context.auth_registry.get(&account_access_key)?;\n StorageCredentials::access_key(spec.account_name.clone(), account_access_key)\n } else {\n let default_credential = Arc::new(DefaultAzureCredential::create(\n TokenCredentialOptions::default(),\n )?);\n StorageCredentials::token_credential(default_credential)\n };\n\n let client = BlobServiceClient::new(&spec.account_name, credential);\n Ok(Box::new(Executor {\n client,\n container_name: spec.container_name,\n prefix: spec.prefix,\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/builder/analyzer.rs", "use crate::builder::exec_ctx::AnalyzedSetupState;\nuse crate::ops::get_executor_factory;\nuse crate::prelude::*;\n\nuse super::plan::*;\nuse crate::lib_context::get_auth_registry;\nuse crate::utils::fingerprint::Fingerprinter;\nuse crate::{\n base::{schema::*, spec::*},\n ops::interface::*,\n};\nuse futures::future::{BoxFuture, try_join3};\nuse futures::{FutureExt, future::try_join_all};\n\n#[derive(Debug)]\npub(super) enum ValueTypeBuilder {\n Basic(BasicValueType),\n Struct(StructSchemaBuilder),\n Table(TableSchemaBuilder),\n}\n\nimpl TryFrom<&ValueType> for ValueTypeBuilder {\n type Error = anyhow::Error;\n\n fn try_from(value_type: &ValueType) -> Result {\n match value_type {\n ValueType::Basic(basic_type) => Ok(ValueTypeBuilder::Basic(basic_type.clone())),\n ValueType::Struct(struct_type) => Ok(ValueTypeBuilder::Struct(struct_type.try_into()?)),\n ValueType::Table(table_type) => Ok(ValueTypeBuilder::Table(table_type.try_into()?)),\n }\n }\n}\n\nimpl TryInto for &ValueTypeBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n match self {\n ValueTypeBuilder::Basic(basic_type) => Ok(ValueType::Basic(basic_type.clone())),\n ValueTypeBuilder::Struct(struct_type) => Ok(ValueType::Struct(struct_type.try_into()?)),\n ValueTypeBuilder::Table(table_type) => Ok(ValueType::Table(table_type.try_into()?)),\n }\n }\n}\n\n#[derive(Default, Debug)]\npub(super) struct StructSchemaBuilder {\n fields: Vec>,\n field_name_idx: HashMap,\n description: Option>,\n}\n\nimpl StructSchemaBuilder {\n fn add_field(&mut self, field: FieldSchema) -> Result {\n let field_idx = self.fields.len() as u32;\n match self.field_name_idx.entry(field.name.clone()) {\n std::collections::hash_map::Entry::Occupied(_) => {\n bail!(\"Field name already exists: {}\", field.name);\n }\n std::collections::hash_map::Entry::Vacant(entry) => {\n entry.insert(field_idx);\n }\n }\n self.fields.push(field);\n Ok(field_idx)\n }\n\n pub fn find_field(&self, field_name: &'_ str) -> Option<(u32, &FieldSchema)> {\n self.field_name_idx\n .get(field_name)\n .map(|&field_idx| (field_idx, &self.fields[field_idx as usize]))\n }\n}\n\nimpl TryFrom<&StructSchema> for StructSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_from(schema: &StructSchema) -> Result {\n let mut result = StructSchemaBuilder {\n fields: Vec::with_capacity(schema.fields.len()),\n field_name_idx: HashMap::with_capacity(schema.fields.len()),\n description: schema.description.clone(),\n };\n for field in schema.fields.iter() {\n result.add_field(FieldSchema::::from_alternative(field)?)?;\n }\n Ok(result)\n }\n}\n\nimpl TryInto for &StructSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n Ok(StructSchema {\n fields: Arc::new(\n self.fields\n .iter()\n .map(FieldSchema::::from_alternative)\n .collect::>>()?,\n ),\n description: self.description.clone(),\n })\n }\n}\n\n#[derive(Debug)]\npub(super) struct TableSchemaBuilder {\n pub kind: TableKind,\n pub sub_scope: Arc>,\n}\n\nimpl TryFrom<&TableSchema> for TableSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_from(schema: &TableSchema) -> Result {\n Ok(Self {\n kind: schema.kind,\n sub_scope: Arc::new(Mutex::new(DataScopeBuilder {\n data: (&schema.row).try_into()?,\n })),\n })\n }\n}\n\nimpl TryInto for &TableSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n let sub_scope = self.sub_scope.lock().unwrap();\n let row = (&sub_scope.data).try_into()?;\n Ok(TableSchema {\n kind: self.kind,\n row,\n })\n }\n}\n\nfn try_make_common_value_type(\n value_type1: &EnrichedValueType,\n value_type2: &EnrichedValueType,\n) -> Result {\n let typ = match (&value_type1.typ, &value_type2.typ) {\n (ValueType::Basic(basic_type1), ValueType::Basic(basic_type2)) => {\n if basic_type1 != basic_type2 {\n api_bail!(\"Value types are not compatible: {basic_type1} vs {basic_type2}\");\n }\n ValueType::Basic(basic_type1.clone())\n }\n (ValueType::Struct(struct_type1), ValueType::Struct(struct_type2)) => {\n let common_schema = try_merge_struct_schemas(struct_type1, struct_type2)?;\n ValueType::Struct(common_schema)\n }\n (ValueType::Table(table_type1), ValueType::Table(table_type2)) => {\n if table_type1.kind != table_type2.kind {\n api_bail!(\n \"Collection types are not compatible: {} vs {}\",\n table_type1,\n table_type2\n );\n }\n let row = try_merge_struct_schemas(&table_type1.row, &table_type2.row)?;\n ValueType::Table(TableSchema {\n kind: table_type1.kind,\n row,\n })\n }\n (t1 @ (ValueType::Basic(_) | ValueType::Struct(_) | ValueType::Table(_)), t2) => {\n api_bail!(\"Unmatched types:\\n {t1}\\n {t2}\\n\",)\n }\n };\n let common_attrs: Vec<_> = value_type1\n .attrs\n .iter()\n .filter_map(|(k, v)| {\n if value_type2.attrs.get(k) == Some(v) {\n Some((k, v))\n } else {\n None\n }\n })\n .collect();\n let attrs = if common_attrs.len() == value_type1.attrs.len() {\n value_type1.attrs.clone()\n } else {\n Arc::new(\n common_attrs\n .into_iter()\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect(),\n )\n };\n\n Ok(EnrichedValueType {\n typ,\n nullable: value_type1.nullable || value_type2.nullable,\n attrs,\n })\n}\n\nfn try_merge_fields_schemas(\n schema1: &[FieldSchema],\n schema2: &[FieldSchema],\n) -> Result> {\n if schema1.len() != schema2.len() {\n api_bail!(\n \"Fields are not compatible as they have different fields count:\\n ({})\\n ({})\\n\",\n schema1\n .iter()\n .map(|f| f.to_string())\n .collect::>()\n .join(\", \"),\n schema2\n .iter()\n .map(|f| f.to_string())\n .collect::>()\n .join(\", \")\n );\n }\n let mut result_fields = Vec::with_capacity(schema1.len());\n for (field1, field2) in schema1.iter().zip(schema2.iter()) {\n if field1.name != field2.name {\n api_bail!(\n \"Structs are not compatible as they have incompatible field names `{}` vs `{}`\",\n field1.name,\n field2.name\n );\n }\n result_fields.push(FieldSchema {\n name: field1.name.clone(),\n value_type: try_make_common_value_type(&field1.value_type, &field2.value_type)?,\n });\n }\n Ok(result_fields)\n}\n\nfn try_merge_struct_schemas(\n schema1: &StructSchema,\n schema2: &StructSchema,\n) -> Result {\n let fields = try_merge_fields_schemas(&schema1.fields, &schema2.fields)?;\n Ok(StructSchema {\n fields: Arc::new(fields),\n description: schema1\n .description\n .clone()\n .or_else(|| schema2.description.clone()),\n })\n}\n\nfn try_merge_collector_schemas(\n schema1: &CollectorSchema,\n schema2: &CollectorSchema,\n) -> Result {\n let fields = try_merge_fields_schemas(&schema1.fields, &schema2.fields)?;\n Ok(CollectorSchema {\n fields,\n auto_uuid_field_idx: if schema1.auto_uuid_field_idx == schema2.auto_uuid_field_idx {\n schema1.auto_uuid_field_idx\n } else {\n None\n },\n })\n}\n\n#[derive(Debug)]\npub(super) struct CollectorBuilder {\n pub schema: Arc,\n pub is_used: bool,\n}\n\nimpl CollectorBuilder {\n pub fn new(schema: Arc) -> Self {\n Self {\n schema,\n is_used: false,\n }\n }\n\n pub fn merge_schema(&mut self, schema: &CollectorSchema) -> Result<()> {\n if self.is_used {\n api_bail!(\"Collector is already used\");\n }\n let existing_schema = Arc::make_mut(&mut self.schema);\n *existing_schema = try_merge_collector_schemas(existing_schema, schema)?;\n Ok(())\n }\n\n pub fn use_schema(&mut self) -> Arc {\n self.is_used = true;\n self.schema.clone()\n }\n}\n\n#[derive(Debug)]\npub(super) struct DataScopeBuilder {\n pub data: StructSchemaBuilder,\n}\n\nimpl DataScopeBuilder {\n pub fn new() -> Self {\n Self {\n data: Default::default(),\n }\n }\n\n pub fn last_field(&self) -> Option<&FieldSchema> {\n self.data.fields.last()\n }\n\n pub fn add_field(\n &mut self,\n name: FieldName,\n value_type: &EnrichedValueType,\n ) -> Result {\n let field_index = self.data.add_field(FieldSchema {\n name,\n value_type: EnrichedValueType::from_alternative(value_type)?,\n })?;\n Ok(AnalyzedOpOutput {\n field_idx: field_index,\n })\n }\n\n pub fn analyze_field_path<'a>(\n &'a self,\n field_path: &'_ FieldPath,\n ) -> Result<(\n AnalyzedLocalFieldReference,\n &'a EnrichedValueType,\n )> {\n let mut indices = Vec::with_capacity(field_path.len());\n let mut struct_schema = &self.data;\n\n let mut i = 0;\n let value_type = loop {\n let field_name = &field_path[i];\n let (field_idx, field) = struct_schema.find_field(field_name).ok_or_else(|| {\n api_error!(\"Field {} not found\", field_path[0..(i + 1)].join(\".\"))\n })?;\n indices.push(field_idx);\n if i + 1 >= field_path.len() {\n break &field.value_type;\n }\n i += 1;\n\n struct_schema = match &field.value_type.typ {\n ValueTypeBuilder::Struct(struct_type) => struct_type,\n _ => {\n api_bail!(\"Field {} is not a struct\", field_path[0..(i + 1)].join(\".\"));\n }\n };\n };\n Ok((\n AnalyzedLocalFieldReference {\n fields_idx: indices,\n },\n value_type,\n ))\n }\n}\n\npub(super) struct AnalyzerContext {\n pub lib_ctx: Arc,\n pub flow_ctx: Arc,\n}\n\n#[derive(Debug, Default)]\npub(super) struct OpScopeStates {\n pub op_output_types: HashMap,\n pub collectors: IndexMap,\n pub sub_scopes: HashMap>,\n}\n\nimpl OpScopeStates {\n pub fn add_collector(\n &mut self,\n collector_name: FieldName,\n schema: CollectorSchema,\n ) -> Result {\n let existing_len = self.collectors.len();\n let idx = match self.collectors.entry(collector_name) {\n indexmap::map::Entry::Occupied(mut entry) => {\n entry.get_mut().merge_schema(&schema)?;\n entry.index()\n }\n indexmap::map::Entry::Vacant(entry) => {\n entry.insert(CollectorBuilder::new(Arc::new(schema)));\n existing_len\n }\n };\n Ok(AnalyzedLocalCollectorReference {\n collector_idx: idx as u32,\n })\n }\n\n pub fn consume_collector(\n &mut self,\n collector_name: &FieldName,\n ) -> Result<(AnalyzedLocalCollectorReference, Arc)> {\n let (collector_idx, _, collector) = self\n .collectors\n .get_full_mut(collector_name)\n .ok_or_else(|| api_error!(\"Collector not found: {}\", collector_name))?;\n Ok((\n AnalyzedLocalCollectorReference {\n collector_idx: collector_idx as u32,\n },\n collector.use_schema(),\n ))\n }\n\n fn build_op_scope_schema(&self) -> OpScopeSchema {\n OpScopeSchema {\n op_output_types: self\n .op_output_types\n .iter()\n .map(|(name, value_type)| (name.clone(), value_type.without_attrs()))\n .collect(),\n collectors: self\n .collectors\n .iter()\n .map(|(name, schema)| NamedSpec {\n name: name.clone(),\n spec: schema.schema.clone(),\n })\n .collect(),\n op_scopes: self.sub_scopes.clone(),\n }\n }\n}\n\n#[derive(Debug)]\npub struct OpScope {\n pub name: String,\n pub parent: Option<(Arc, spec::FieldPath)>,\n pub(super) data: Arc>,\n pub(super) states: Mutex,\n}\n\nstruct Iter<'a>(Option<&'a OpScope>);\n\nimpl<'a> Iterator for Iter<'a> {\n type Item = &'a OpScope;\n\n fn next(&mut self) -> Option {\n match self.0 {\n Some(scope) => {\n self.0 = scope.parent.as_ref().map(|(parent, _)| parent.as_ref());\n Some(scope)\n }\n None => None,\n }\n }\n}\n\nimpl OpScope {\n pub(super) fn new(\n name: String,\n parent: Option<(Arc, spec::FieldPath)>,\n data: Arc>,\n ) -> Arc {\n Arc::new(Self {\n name,\n parent,\n data,\n states: Mutex::default(),\n })\n }\n\n fn add_op_output(\n &self,\n name: FieldName,\n value_type: EnrichedValueType,\n ) -> Result {\n let op_output = self\n .data\n .lock()\n .unwrap()\n .add_field(name.clone(), &value_type)?;\n self.states\n .lock()\n .unwrap()\n .op_output_types\n .insert(name, value_type);\n Ok(op_output)\n }\n\n pub fn ancestors(&self) -> impl Iterator {\n Iter(Some(self))\n }\n\n pub fn is_op_scope_descendant(&self, other: &Self) -> bool {\n if self == other {\n return true;\n }\n match &self.parent {\n Some((parent, _)) => parent.is_op_scope_descendant(other),\n None => false,\n }\n }\n\n pub(super) fn new_foreach_op_scope(\n self: &Arc,\n scope_name: String,\n field_path: &FieldPath,\n ) -> Result<(AnalyzedLocalFieldReference, Arc)> {\n let (local_field_ref, sub_data_scope) = {\n let data_scope = self.data.lock().unwrap();\n let (local_field_ref, value_type) = data_scope.analyze_field_path(field_path)?;\n let sub_data_scope = match &value_type.typ {\n ValueTypeBuilder::Table(table_type) => table_type.sub_scope.clone(),\n _ => api_bail!(\"ForEach only works on collection, field {field_path} is not\"),\n };\n (local_field_ref, sub_data_scope)\n };\n let sub_op_scope = OpScope::new(\n scope_name,\n Some((self.clone(), field_path.clone())),\n sub_data_scope,\n );\n Ok((local_field_ref, sub_op_scope))\n }\n}\n\nimpl std::fmt::Display for OpScope {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n if let Some((scope, field_path)) = &self.parent {\n write!(f, \"{} [{} AS {}]\", scope, field_path, self.name)?;\n } else {\n write!(f, \"[{}]\", self.name)?;\n }\n Ok(())\n }\n}\n\nimpl PartialEq for OpScope {\n fn eq(&self, other: &Self) -> bool {\n std::ptr::eq(self, other)\n }\n}\nimpl Eq for OpScope {}\n\nfn find_scope<'a>(scope_name: &ScopeName, op_scope: &'a OpScope) -> Result<(u32, &'a OpScope)> {\n let (up_level, scope) = op_scope\n .ancestors()\n .enumerate()\n .find(|(_, s)| &s.name == scope_name)\n .ok_or_else(|| api_error!(\"Scope not found: {}\", scope_name))?;\n Ok((up_level as u32, scope))\n}\n\nfn analyze_struct_mapping(\n mapping: &StructMapping,\n op_scope: &OpScope,\n) -> Result<(AnalyzedStructMapping, Vec)> {\n let mut field_mappings = Vec::with_capacity(mapping.fields.len());\n let mut field_schemas = Vec::with_capacity(mapping.fields.len());\n for field in mapping.fields.iter() {\n let (field_mapping, value_type) = analyze_value_mapping(&field.spec, op_scope)?;\n field_mappings.push(field_mapping);\n field_schemas.push(FieldSchema {\n name: field.name.clone(),\n value_type,\n });\n }\n Ok((\n AnalyzedStructMapping {\n fields: field_mappings,\n },\n field_schemas,\n ))\n}\n\nfn analyze_value_mapping(\n value_mapping: &ValueMapping,\n op_scope: &OpScope,\n) -> Result<(AnalyzedValueMapping, EnrichedValueType)> {\n let result = match value_mapping {\n ValueMapping::Constant(v) => {\n let value = value::Value::from_json(v.value.clone(), &v.schema.typ)?;\n (AnalyzedValueMapping::Constant { value }, v.schema.clone())\n }\n\n ValueMapping::Field(v) => {\n let (scope_up_level, op_scope) = match &v.scope {\n Some(scope_name) => find_scope(scope_name, op_scope)?,\n None => (0, op_scope),\n };\n let data_scope = op_scope.data.lock().unwrap();\n let (local_field_ref, value_type) = data_scope.analyze_field_path(&v.field_path)?;\n (\n AnalyzedValueMapping::Field(AnalyzedFieldReference {\n local: local_field_ref,\n scope_up_level,\n }),\n EnrichedValueType::from_alternative(value_type)?,\n )\n }\n\n ValueMapping::Struct(v) => {\n let (struct_mapping, field_schemas) = analyze_struct_mapping(v, op_scope)?;\n (\n AnalyzedValueMapping::Struct(struct_mapping),\n EnrichedValueType {\n typ: ValueType::Struct(StructSchema {\n fields: Arc::new(field_schemas),\n description: None,\n }),\n nullable: false,\n attrs: Default::default(),\n },\n )\n }\n };\n Ok(result)\n}\n\nfn analyze_input_fields(\n arg_bindings: &[OpArgBinding],\n op_scope: &OpScope,\n) -> Result> {\n let mut input_field_schemas = Vec::with_capacity(arg_bindings.len());\n for arg_binding in arg_bindings.iter() {\n let (analyzed_value, value_type) = analyze_value_mapping(&arg_binding.value, op_scope)?;\n input_field_schemas.push(OpArgSchema {\n name: arg_binding.arg_name.clone(),\n value_type,\n analyzed_value: analyzed_value.clone(),\n });\n }\n Ok(input_field_schemas)\n}\n\nfn add_collector(\n scope_name: &ScopeName,\n collector_name: FieldName,\n schema: CollectorSchema,\n op_scope: &OpScope,\n) -> Result {\n let (scope_up_level, scope) = find_scope(scope_name, op_scope)?;\n let local_ref = scope\n .states\n .lock()\n .unwrap()\n .add_collector(collector_name, schema)?;\n Ok(AnalyzedCollectorReference {\n local: local_ref,\n scope_up_level,\n })\n}\n\nstruct ExportDataFieldsInfo {\n local_collector_ref: AnalyzedLocalCollectorReference,\n primary_key_def: AnalyzedPrimaryKeyDef,\n primary_key_type: ValueType,\n value_fields_idx: Vec,\n value_stable: bool,\n}\n\nimpl AnalyzerContext {\n pub(super) async fn analyze_import_op(\n &self,\n op_scope: &Arc,\n import_op: NamedSpec,\n ) -> Result> + Send + use<>> {\n let source_factory = match get_executor_factory(&import_op.spec.source.kind)? {\n ExecutorFactory::Source(source_executor) => source_executor,\n _ => {\n return Err(anyhow::anyhow!(\n \"`{}` is not a source op\",\n import_op.spec.source.kind\n ));\n }\n };\n let (output_type, executor) = source_factory\n .build(\n serde_json::Value::Object(import_op.spec.source.spec),\n self.flow_ctx.clone(),\n )\n .await?;\n\n let op_name = import_op.name.clone();\n let primary_key_type = output_type\n .typ\n .key_type()\n .ok_or_else(|| api_error!(\"Source must produce a type with key: {op_name}\"))?\n .typ\n .clone();\n let output = op_scope.add_op_output(import_op.name, output_type)?;\n\n let concur_control_options = import_op\n .spec\n .execution_options\n .get_concur_control_options();\n let global_concurrency_controller = self.lib_ctx.global_concurrency_controller.clone();\n let result_fut = async move {\n trace!(\"Start building executor for source op `{op_name}`\");\n let executor = executor.await?;\n trace!(\"Finished building executor for source op `{op_name}`\");\n Ok(AnalyzedImportOp {\n executor,\n output,\n primary_key_type,\n name: op_name,\n refresh_options: import_op.spec.refresh_options,\n concurrency_controller: concur_control::CombinedConcurrencyController::new(\n &concur_control_options,\n global_concurrency_controller,\n ),\n })\n };\n Ok(result_fut)\n }\n\n pub(super) async fn analyze_reactive_op(\n &self,\n op_scope: &Arc,\n reactive_op: &NamedSpec,\n ) -> Result>> {\n let result_fut = match &reactive_op.spec {\n ReactiveOpSpec::Transform(op) => {\n let input_field_schemas =\n analyze_input_fields(&op.inputs, op_scope).with_context(|| {\n format!(\n \"Failed to analyze inputs for transform op: {}\",\n reactive_op.name\n )\n })?;\n let spec = serde_json::Value::Object(op.op.spec.clone());\n\n match get_executor_factory(&op.op.kind)? {\n ExecutorFactory::SimpleFunction(fn_executor) => {\n let input_value_mappings = input_field_schemas\n .iter()\n .map(|field| field.analyzed_value.clone())\n .collect();\n let (output_enriched_type, executor) = fn_executor\n .build(spec, input_field_schemas, self.flow_ctx.clone())\n .await?;\n let logic_fingerprinter = Fingerprinter::default()\n .with(&op.op)?\n .with(&output_enriched_type.without_attrs())?;\n let output_type = output_enriched_type.typ.clone();\n let output = op_scope\n .add_op_output(reactive_op.name.clone(), output_enriched_type)?;\n let op_name = reactive_op.name.clone();\n async move {\n trace!(\"Start building executor for transform op `{op_name}`\");\n let executor = executor.await.with_context(|| {\n format!(\"Failed to build executor for transform op: {op_name}\")\n })?;\n let enable_cache = executor.enable_cache();\n let behavior_version = executor.behavior_version();\n trace!(\"Finished building executor for transform op `{op_name}`, enable cache: {enable_cache}, behavior version: {behavior_version:?}\");\n let function_exec_info = AnalyzedFunctionExecInfo {\n enable_cache,\n behavior_version,\n fingerprinter: logic_fingerprinter\n .with(&behavior_version)?,\n output_type\n };\n if function_exec_info.enable_cache\n && function_exec_info.behavior_version.is_none()\n {\n api_bail!(\n \"When caching is enabled, behavior version must be specified for transform op: {op_name}\"\n );\n }\n Ok(AnalyzedReactiveOp::Transform(AnalyzedTransformOp {\n name: op_name,\n inputs: input_value_mappings,\n function_exec_info,\n executor,\n output,\n }))\n }\n .boxed()\n }\n _ => api_bail!(\"`{}` is not a function op\", op.op.kind),\n }\n }\n\n ReactiveOpSpec::ForEach(foreach_op) => {\n let (local_field_ref, sub_op_scope) = op_scope.new_foreach_op_scope(\n foreach_op.op_scope.name.clone(),\n &foreach_op.field_path,\n )?;\n let analyzed_op_scope_fut = {\n let analyzed_op_scope_fut = self\n .analyze_op_scope(&sub_op_scope, &foreach_op.op_scope.ops)\n .boxed_local()\n .await?;\n let sub_op_scope_schema =\n sub_op_scope.states.lock().unwrap().build_op_scope_schema();\n op_scope.states.lock().unwrap().sub_scopes.insert(\n foreach_op.op_scope.name.clone(),\n Arc::new(sub_op_scope_schema),\n );\n analyzed_op_scope_fut\n };\n let op_name = reactive_op.name.clone();\n\n let concur_control_options =\n foreach_op.execution_options.get_concur_control_options();\n async move {\n Ok(AnalyzedReactiveOp::ForEach(AnalyzedForEachOp {\n local_field_ref,\n op_scope: analyzed_op_scope_fut\n .await\n .with_context(|| format!(\"Analyzing foreach op: {op_name}\"))?,\n name: op_name,\n concurrency_controller: concur_control::ConcurrencyController::new(\n &concur_control_options,\n ),\n }))\n }\n .boxed()\n }\n\n ReactiveOpSpec::Collect(op) => {\n let (struct_mapping, fields_schema) = analyze_struct_mapping(&op.input, op_scope)?;\n let has_auto_uuid_field = op.auto_uuid_field.is_some();\n let fingerprinter = Fingerprinter::default().with(&fields_schema)?;\n let collect_op = AnalyzedReactiveOp::Collect(AnalyzedCollectOp {\n name: reactive_op.name.clone(),\n has_auto_uuid_field,\n input: struct_mapping,\n collector_ref: add_collector(\n &op.scope_name,\n op.collector_name.clone(),\n CollectorSchema::from_fields(fields_schema, op.auto_uuid_field.clone()),\n op_scope,\n )?,\n fingerprinter,\n });\n async move { Ok(collect_op) }.boxed()\n }\n };\n Ok(result_fut)\n }\n\n #[allow(clippy::too_many_arguments)]\n async fn analyze_export_op_group(\n &self,\n target_kind: &str,\n op_scope: &Arc,\n flow_inst: &FlowInstanceSpec,\n export_op_group: &AnalyzedExportTargetOpGroup,\n declarations: Vec,\n targets_analyzed_ss: &mut [Option],\n declarations_analyzed_ss: &mut Vec,\n ) -> Result> + Send + use<>>> {\n let mut collection_specs = Vec::::new();\n let mut data_fields_infos = Vec::::new();\n for idx in export_op_group.op_idx.iter() {\n let export_op = &flow_inst.export_ops[*idx];\n let (local_collector_ref, collector_schema) = op_scope\n .states\n .lock()\n .unwrap()\n .consume_collector(&export_op.spec.collector_name)?;\n let (key_fields_schema, value_fields_schema, data_collection_info) =\n match &export_op.spec.index_options.primary_key_fields {\n Some(fields) => {\n let pk_fields_idx = fields\n .iter()\n .map(|f| {\n collector_schema\n .fields\n .iter()\n .position(|field| &field.name == f)\n .ok_or_else(|| anyhow!(\"field not found: {}\", f))\n })\n .collect::>>()?;\n\n let key_fields_schema = pk_fields_idx\n .iter()\n .map(|idx| collector_schema.fields[*idx].clone())\n .collect::>();\n let primary_key_type = if pk_fields_idx.len() == 1 {\n key_fields_schema[0].value_type.typ.clone()\n } else {\n ValueType::Struct(StructSchema {\n fields: Arc::from(key_fields_schema.clone()),\n description: None,\n })\n };\n let mut value_fields_schema: Vec = vec![];\n let mut value_fields_idx = vec![];\n for (idx, field) in collector_schema.fields.iter().enumerate() {\n if !pk_fields_idx.contains(&idx) {\n value_fields_schema.push(field.clone());\n value_fields_idx.push(idx as u32);\n }\n }\n let value_stable = collector_schema\n .auto_uuid_field_idx\n .as_ref()\n .map(|uuid_idx| pk_fields_idx.contains(uuid_idx))\n .unwrap_or(false);\n (\n key_fields_schema,\n value_fields_schema,\n ExportDataFieldsInfo {\n local_collector_ref,\n primary_key_def: AnalyzedPrimaryKeyDef::Fields(pk_fields_idx),\n primary_key_type,\n value_fields_idx,\n value_stable,\n },\n )\n }\n None => {\n // TODO: Support auto-generate primary key\n api_bail!(\"Primary key fields must be specified\")\n }\n };\n collection_specs.push(interface::ExportDataCollectionSpec {\n name: export_op.name.clone(),\n spec: serde_json::Value::Object(export_op.spec.target.spec.clone()),\n key_fields_schema,\n value_fields_schema,\n index_options: export_op.spec.index_options.clone(),\n });\n data_fields_infos.push(data_collection_info);\n }\n let (data_collections_output, declarations_output) = export_op_group\n .target_factory\n .clone()\n .build(collection_specs, declarations, self.flow_ctx.clone())\n .await?;\n let analyzed_export_ops = export_op_group\n .op_idx\n .iter()\n .zip(data_collections_output.into_iter())\n .zip(data_fields_infos.into_iter())\n .map(|((idx, data_coll_output), data_fields_info)| {\n let export_op = &flow_inst.export_ops[*idx];\n let op_name = export_op.name.clone();\n let export_target_factory = export_op_group.target_factory.clone();\n\n let export_op_ss = exec_ctx::AnalyzedTargetSetupState {\n target_kind: target_kind.to_string(),\n setup_key: data_coll_output.setup_key,\n desired_setup_state: data_coll_output.desired_setup_state,\n setup_by_user: export_op.spec.setup_by_user,\n };\n targets_analyzed_ss[*idx] = Some(export_op_ss);\n\n Ok(async move {\n trace!(\"Start building executor for export op `{op_name}`\");\n let export_context = data_coll_output\n .export_context\n .await\n .with_context(|| format!(\"Analyzing export op: {op_name}\"))?;\n trace!(\"Finished building executor for export op `{op_name}`\");\n Ok(AnalyzedExportOp {\n name: op_name,\n input: data_fields_info.local_collector_ref,\n export_target_factory,\n export_context,\n primary_key_def: data_fields_info.primary_key_def,\n primary_key_type: data_fields_info.primary_key_type,\n value_fields: data_fields_info.value_fields_idx,\n value_stable: data_fields_info.value_stable,\n })\n })\n })\n .collect::>>()?;\n for (setup_key, desired_setup_state) in declarations_output {\n let decl_ss = exec_ctx::AnalyzedTargetSetupState {\n target_kind: target_kind.to_string(),\n setup_key,\n desired_setup_state,\n setup_by_user: false,\n };\n declarations_analyzed_ss.push(decl_ss);\n }\n Ok(analyzed_export_ops)\n }\n\n async fn analyze_op_scope(\n &self,\n op_scope: &Arc,\n reactive_ops: &[NamedSpec],\n ) -> Result> + Send + use<>> {\n let mut op_futs = Vec::with_capacity(reactive_ops.len());\n for reactive_op in reactive_ops.iter() {\n op_futs.push(self.analyze_reactive_op(op_scope, reactive_op).await?);\n }\n let collector_len = op_scope.states.lock().unwrap().collectors.len();\n let result_fut = async move {\n Ok(AnalyzedOpScope {\n reactive_ops: try_join_all(op_futs).await?,\n collector_len,\n })\n };\n Ok(result_fut)\n }\n}\n\npub fn build_flow_instance_context(\n flow_inst_name: &str,\n py_exec_ctx: Option,\n) -> Arc {\n Arc::new(FlowInstanceContext {\n flow_instance_name: flow_inst_name.to_string(),\n auth_registry: get_auth_registry().clone(),\n py_exec_ctx: py_exec_ctx.map(Arc::new),\n })\n}\n\nfn build_flow_schema(root_op_scope: &OpScope) -> Result {\n let schema = (&root_op_scope.data.lock().unwrap().data).try_into()?;\n let root_op_scope_schema = root_op_scope.states.lock().unwrap().build_op_scope_schema();\n Ok(FlowSchema {\n schema,\n root_op_scope: root_op_scope_schema,\n })\n}\n\npub async fn analyze_flow(\n flow_inst: &FlowInstanceSpec,\n flow_ctx: Arc,\n) -> Result<(\n FlowSchema,\n AnalyzedSetupState,\n impl Future> + Send + use<>,\n)> {\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: get_lib_context()?,\n flow_ctx,\n };\n let root_data_scope = Arc::new(Mutex::new(DataScopeBuilder::new()));\n let root_op_scope = OpScope::new(ROOT_SCOPE_NAME.to_string(), None, root_data_scope);\n let mut import_ops_futs = Vec::with_capacity(flow_inst.import_ops.len());\n for import_op in flow_inst.import_ops.iter() {\n import_ops_futs.push(\n analyzer_ctx\n .analyze_import_op(&root_op_scope, import_op.clone())\n .await?,\n );\n }\n let op_scope_fut = analyzer_ctx\n .analyze_op_scope(&root_op_scope, &flow_inst.reactive_ops)\n .await?;\n\n #[derive(Default)]\n struct TargetOpGroup {\n export_op_ids: Vec,\n declarations: Vec,\n }\n let mut target_op_group = IndexMap::::new();\n for (idx, export_op) in flow_inst.export_ops.iter().enumerate() {\n target_op_group\n .entry(export_op.spec.target.kind.clone())\n .or_default()\n .export_op_ids\n .push(idx);\n }\n for declaration in flow_inst.declarations.iter() {\n target_op_group\n .entry(declaration.kind.clone())\n .or_default()\n .declarations\n .push(serde_json::Value::Object(declaration.spec.clone()));\n }\n\n let mut export_ops_futs = vec![];\n let mut analyzed_target_op_groups = vec![];\n\n let mut targets_analyzed_ss = Vec::with_capacity(flow_inst.export_ops.len());\n targets_analyzed_ss.resize_with(flow_inst.export_ops.len(), || None);\n\n let mut declarations_analyzed_ss = Vec::with_capacity(flow_inst.declarations.len());\n\n for (target_kind, op_ids) in target_op_group.into_iter() {\n let target_factory = match get_executor_factory(&target_kind)? {\n ExecutorFactory::ExportTarget(export_executor) => export_executor,\n _ => api_bail!(\"`{}` is not a export target op\", target_kind),\n };\n let analyzed_target_op_group = AnalyzedExportTargetOpGroup {\n target_factory,\n op_idx: op_ids.export_op_ids,\n };\n export_ops_futs.extend(\n analyzer_ctx\n .analyze_export_op_group(\n target_kind.as_str(),\n &root_op_scope,\n flow_inst,\n &analyzed_target_op_group,\n op_ids.declarations,\n &mut targets_analyzed_ss,\n &mut declarations_analyzed_ss,\n )\n .await?,\n );\n analyzed_target_op_groups.push(analyzed_target_op_group);\n }\n\n let flow_schema = build_flow_schema(&root_op_scope)?;\n let analyzed_ss = exec_ctx::AnalyzedSetupState {\n targets: targets_analyzed_ss\n .into_iter()\n .enumerate()\n .map(|(idx, v)| v.ok_or_else(|| anyhow!(\"target op `{}` not found\", idx)))\n .collect::>>()?,\n declarations: declarations_analyzed_ss,\n };\n\n let logic_fingerprint = Fingerprinter::default()\n .with(&flow_inst)?\n .with(&flow_schema.schema)?\n .into_fingerprint();\n let plan_fut = async move {\n let (import_ops, op_scope, export_ops) = try_join3(\n try_join_all(import_ops_futs),\n op_scope_fut,\n try_join_all(export_ops_futs),\n )\n .await?;\n\n Ok(ExecutionPlan {\n logic_fingerprint,\n import_ops,\n op_scope,\n export_ops,\n export_op_groups: analyzed_target_op_groups,\n })\n };\n\n Ok((flow_schema, analyzed_ss, plan_fut))\n}\n\npub async fn analyze_transient_flow<'a>(\n flow_inst: &TransientFlowSpec,\n flow_ctx: Arc,\n) -> Result<(\n EnrichedValueType,\n FlowSchema,\n impl Future> + Send + 'a,\n)> {\n let mut root_data_scope = DataScopeBuilder::new();\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: get_lib_context()?,\n flow_ctx,\n };\n let mut input_fields = vec![];\n for field in flow_inst.input_fields.iter() {\n let analyzed_field = root_data_scope.add_field(field.name.clone(), &field.value_type)?;\n input_fields.push(analyzed_field);\n }\n let root_op_scope = OpScope::new(\n ROOT_SCOPE_NAME.to_string(),\n None,\n Arc::new(Mutex::new(root_data_scope)),\n );\n let op_scope_fut = analyzer_ctx\n .analyze_op_scope(&root_op_scope, &flow_inst.reactive_ops)\n .await?;\n let (output_value, output_type) =\n analyze_value_mapping(&flow_inst.output_value, &root_op_scope)?;\n let data_schema = build_flow_schema(&root_op_scope)?;\n let plan_fut = async move {\n let op_scope = op_scope_fut.await?;\n Ok(TransientExecutionPlan {\n input_fields,\n op_scope,\n output_value,\n })\n };\n Ok((output_type, data_schema, plan_fut))\n}\n"], ["/cocoindex/src/ops/targets/neo4j.rs", "use crate::prelude::*;\n\nuse super::shared::property_graph::*;\n\nuse crate::setup::components::{self, State, apply_component_changes};\nuse crate::setup::{ResourceSetupStatus, SetupChangeType};\nuse crate::{ops::sdk::*, setup::CombinedState};\n\nuse indoc::formatdoc;\nuse neo4rs::{BoltType, ConfigBuilder, Graph};\nuse std::fmt::Write;\nuse tokio::sync::OnceCell;\n\nconst DEFAULT_DB: &str = \"neo4j\";\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n uri: String,\n user: String,\n password: String,\n db: Option,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n connection: spec::AuthEntryReference,\n mapping: GraphElementMapping,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Declaration {\n connection: spec::AuthEntryReference,\n #[serde(flatten)]\n decl: GraphDeclaration,\n}\n\ntype Neo4jGraphElement = GraphElementType;\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct GraphKey {\n uri: String,\n db: String,\n}\n\nimpl GraphKey {\n fn from_spec(spec: &ConnectionSpec) -> Self {\n Self {\n uri: spec.uri.clone(),\n db: spec.db.clone().unwrap_or_else(|| DEFAULT_DB.to_string()),\n }\n }\n}\n\nimpl retryable::IsRetryable for neo4rs::Error {\n fn is_retryable(&self) -> bool {\n match self {\n neo4rs::Error::ConnectionError => true,\n neo4rs::Error::Neo4j(e) => e.kind() == neo4rs::Neo4jErrorKind::Transient,\n _ => false,\n }\n }\n}\n\n#[derive(Default)]\npub struct GraphPool {\n graphs: Mutex>>>>,\n}\n\nimpl GraphPool {\n async fn get_graph(&self, spec: &ConnectionSpec) -> Result> {\n let graph_key = GraphKey::from_spec(spec);\n let cell = {\n let mut graphs = self.graphs.lock().unwrap();\n graphs.entry(graph_key).or_default().clone()\n };\n let graph = cell\n .get_or_try_init(|| async {\n let mut config_builder = ConfigBuilder::default()\n .uri(spec.uri.clone())\n .user(spec.user.clone())\n .password(spec.password.clone());\n if let Some(db) = &spec.db {\n config_builder = config_builder.db(db.clone());\n }\n anyhow::Ok(Arc::new(Graph::connect(config_builder.build()?).await?))\n })\n .await?;\n Ok(graph.clone())\n }\n\n async fn get_graph_for_key(\n &self,\n key: &Neo4jGraphElement,\n auth_registry: &AuthRegistry,\n ) -> Result> {\n let spec = auth_registry.get::(&key.connection)?;\n self.get_graph(&spec).await\n }\n}\n\npub struct ExportContext {\n connection_ref: AuthEntryReference,\n graph: Arc,\n\n create_order: u8,\n\n delete_cypher: String,\n insert_cypher: String,\n delete_before_upsert: bool,\n\n analyzed_data_coll: AnalyzedDataCollection,\n\n key_field_params: Vec,\n src_key_field_params: Vec,\n tgt_key_field_params: Vec,\n}\n\nfn json_value_to_bolt_value(value: &serde_json::Value) -> Result {\n let bolt_value = match value {\n serde_json::Value::Null => BoltType::Null(neo4rs::BoltNull),\n serde_json::Value::Bool(v) => BoltType::Boolean(neo4rs::BoltBoolean::new(*v)),\n serde_json::Value::Number(v) => {\n if let Some(i) = v.as_i64() {\n BoltType::Integer(neo4rs::BoltInteger::new(i))\n } else if let Some(f) = v.as_f64() {\n BoltType::Float(neo4rs::BoltFloat::new(f))\n } else {\n anyhow::bail!(\"Unsupported JSON number: {}\", v)\n }\n }\n serde_json::Value::String(v) => BoltType::String(neo4rs::BoltString::new(v)),\n serde_json::Value::Array(v) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(json_value_to_bolt_value)\n .collect::>()?,\n }),\n serde_json::Value::Object(v) => BoltType::Map(neo4rs::BoltMap {\n value: v\n .into_iter()\n .map(|(k, v)| Ok((neo4rs::BoltString::new(k), json_value_to_bolt_value(v)?)))\n .collect::>()?,\n }),\n };\n Ok(bolt_value)\n}\n\nfn key_to_bolt(key: &KeyValue, schema: &schema::ValueType) -> Result {\n value_to_bolt(&key.into(), schema)\n}\n\nfn field_values_to_bolt<'a>(\n field_values: impl IntoIterator,\n schema: impl IntoIterator,\n) -> Result {\n let bolt_value = BoltType::Map(neo4rs::BoltMap {\n value: std::iter::zip(schema, field_values)\n .map(|(schema, value)| {\n Ok((\n neo4rs::BoltString::new(&schema.name),\n value_to_bolt(value, &schema.value_type.typ)?,\n ))\n })\n .collect::>()?,\n });\n Ok(bolt_value)\n}\n\nfn mapped_field_values_to_bolt(\n fields_schema: &[schema::FieldSchema],\n fields_input_idx: &[usize],\n field_values: &FieldValues,\n) -> Result {\n let bolt_value = BoltType::Map(neo4rs::BoltMap {\n value: std::iter::zip(fields_schema.iter(), fields_input_idx.iter())\n .map(|(schema, field_idx)| {\n Ok((\n neo4rs::BoltString::new(&schema.name),\n value_to_bolt(&field_values.fields[*field_idx], &schema.value_type.typ)?,\n ))\n })\n .collect::>()?,\n });\n Ok(bolt_value)\n}\n\nfn basic_value_to_bolt(value: &BasicValue, schema: &BasicValueType) -> Result {\n let bolt_value = match value {\n BasicValue::Bytes(v) => {\n BoltType::Bytes(neo4rs::BoltBytes::new(bytes::Bytes::from_owner(v.clone())))\n }\n BasicValue::Str(v) => BoltType::String(neo4rs::BoltString::new(v)),\n BasicValue::Bool(v) => BoltType::Boolean(neo4rs::BoltBoolean::new(*v)),\n BasicValue::Int64(v) => BoltType::Integer(neo4rs::BoltInteger::new(*v)),\n BasicValue::Float64(v) => BoltType::Float(neo4rs::BoltFloat::new(*v)),\n BasicValue::Float32(v) => BoltType::Float(neo4rs::BoltFloat::new(*v as f64)),\n BasicValue::Range(v) => BoltType::List(neo4rs::BoltList {\n value: [\n BoltType::Integer(neo4rs::BoltInteger::new(v.start as i64)),\n BoltType::Integer(neo4rs::BoltInteger::new(v.end as i64)),\n ]\n .into(),\n }),\n BasicValue::Uuid(v) => BoltType::String(neo4rs::BoltString::new(&v.to_string())),\n BasicValue::Date(v) => BoltType::Date(neo4rs::BoltDate::from(*v)),\n BasicValue::Time(v) => BoltType::LocalTime(neo4rs::BoltLocalTime::from(*v)),\n BasicValue::LocalDateTime(v) => {\n BoltType::LocalDateTime(neo4rs::BoltLocalDateTime::from(*v))\n }\n BasicValue::OffsetDateTime(v) => BoltType::DateTime(neo4rs::BoltDateTime::from(*v)),\n BasicValue::TimeDelta(v) => BoltType::Duration(neo4rs::BoltDuration::new(\n neo4rs::BoltInteger { value: 0 },\n neo4rs::BoltInteger { value: 0 },\n neo4rs::BoltInteger {\n value: v.num_seconds(),\n },\n v.subsec_nanos().into(),\n )),\n BasicValue::Vector(v) => match schema {\n BasicValueType::Vector(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|v| basic_value_to_bolt(v, &t.element_type))\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-vector type got vector value: {}\", schema),\n },\n BasicValue::Json(v) => json_value_to_bolt_value(v)?,\n BasicValue::UnionVariant { tag_id, value } => match schema {\n BasicValueType::Union(s) => {\n let typ = s\n .types\n .get(*tag_id)\n .ok_or_else(|| anyhow::anyhow!(\"Invalid `tag_id`: {}\", tag_id))?;\n\n basic_value_to_bolt(value, typ)?\n }\n _ => anyhow::bail!(\"Non-union type got union value: {}\", schema),\n },\n };\n Ok(bolt_value)\n}\n\nfn value_to_bolt(value: &Value, schema: &schema::ValueType) -> Result {\n let bolt_value = match value {\n Value::Null => BoltType::Null(neo4rs::BoltNull),\n Value::Basic(v) => match schema {\n ValueType::Basic(t) => basic_value_to_bolt(v, t)?,\n _ => anyhow::bail!(\"Non-basic type got basic value: {}\", schema),\n },\n Value::Struct(v) => match schema {\n ValueType::Struct(t) => field_values_to_bolt(v.fields.iter(), t.fields.iter())?,\n _ => anyhow::bail!(\"Non-struct type got struct value: {}\", schema),\n },\n Value::UTable(v) | Value::LTable(v) => match schema {\n ValueType::Table(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|v| field_values_to_bolt(v.0.fields.iter(), t.row.fields.iter()))\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-table type got table value: {}\", schema),\n },\n Value::KTable(v) => match schema {\n ValueType::Table(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|(k, v)| {\n field_values_to_bolt(\n std::iter::once(&Into::::into(k.clone()))\n .chain(v.0.fields.iter()),\n t.row.fields.iter(),\n )\n })\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-table type got table value: {}\", schema),\n },\n };\n Ok(bolt_value)\n}\n\nconst CORE_KEY_PARAM_PREFIX: &str = \"key\";\nconst CORE_PROPS_PARAM: &str = \"props\";\nconst SRC_KEY_PARAM_PREFIX: &str = \"source_key\";\nconst SRC_PROPS_PARAM: &str = \"source_props\";\nconst TGT_KEY_PARAM_PREFIX: &str = \"target_key\";\nconst TGT_PROPS_PARAM: &str = \"target_props\";\nconst CORE_ELEMENT_MATCHER_VAR: &str = \"e\";\nconst SELF_CONTAINED_TAG_FIELD_NAME: &str = \"__self_contained\";\n\nimpl ExportContext {\n fn build_key_field_params_n_literal<'a>(\n param_prefix: &str,\n key_fields: impl Iterator,\n ) -> (Vec, String) {\n let (params, items): (Vec, Vec) = key_fields\n .into_iter()\n .enumerate()\n .map(|(i, name)| {\n let param = format!(\"{param_prefix}_{i}\");\n let item = format!(\"{name}: ${param}\");\n (param, item)\n })\n .unzip();\n (params, format!(\"{{{}}}\", items.into_iter().join(\", \")))\n }\n\n fn new(\n graph: Arc,\n spec: Spec,\n analyzed_data_coll: AnalyzedDataCollection,\n ) -> Result {\n let (key_field_params, key_fields_literal) = Self::build_key_field_params_n_literal(\n CORE_KEY_PARAM_PREFIX,\n analyzed_data_coll.schema.key_fields.iter().map(|f| &f.name),\n );\n let result = match spec.mapping {\n GraphElementMapping::Node(node_spec) => {\n let delete_cypher = formatdoc! {\"\n OPTIONAL MATCH (old_node:{label} {key_fields_literal})\n WITH old_node\n SET old_node.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\n WITH old_node\n WHERE NOT (old_node)--()\n DELETE old_node\n FINISH\n \",\n label = node_spec.label,\n };\n\n let insert_cypher = formatdoc! {\"\n MERGE (new_node:{label} {key_fields_literal})\n SET new_node.{SELF_CONTAINED_TAG_FIELD_NAME} = TRUE{optional_set_props}\n FINISH\n \",\n label = node_spec.label,\n optional_set_props = if !analyzed_data_coll.value_fields_input_idx.is_empty() {\n format!(\", new_node += ${CORE_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n };\n\n Self {\n connection_ref: spec.connection,\n graph,\n create_order: 0,\n delete_cypher,\n insert_cypher,\n delete_before_upsert: false,\n analyzed_data_coll,\n key_field_params,\n src_key_field_params: vec![],\n tgt_key_field_params: vec![],\n }\n }\n GraphElementMapping::Relationship(rel_spec) => {\n let delete_cypher = formatdoc! {\"\n OPTIONAL MATCH (old_src)-[old_rel:{rel_type} {key_fields_literal}]->(old_tgt)\n\n DELETE old_rel\n\n WITH collect(old_src) + collect(old_tgt) AS nodes_to_check\n UNWIND nodes_to_check AS node\n WITH DISTINCT node\n WHERE NOT COALESCE(node.{SELF_CONTAINED_TAG_FIELD_NAME}, FALSE)\n AND COUNT{{ (node)--() }} = 0\n DELETE node\n\n FINISH\n \",\n rel_type = rel_spec.rel_type,\n };\n\n let analyzed_rel = analyzed_data_coll\n .rel\n .as_ref()\n .ok_or_else(invariance_violation)?;\n let analyzed_src = &analyzed_rel.source;\n let analyzed_tgt = &analyzed_rel.target;\n\n let (src_key_field_params, src_key_fields_literal) =\n Self::build_key_field_params_n_literal(\n SRC_KEY_PARAM_PREFIX,\n analyzed_src.schema.key_fields.iter().map(|f| &f.name),\n );\n let (tgt_key_field_params, tgt_key_fields_literal) =\n Self::build_key_field_params_n_literal(\n TGT_KEY_PARAM_PREFIX,\n analyzed_tgt.schema.key_fields.iter().map(|f| &f.name),\n );\n\n let insert_cypher = formatdoc! {\"\n MERGE (new_src:{src_node_label} {src_key_fields_literal})\n {optional_set_src_props}\n\n MERGE (new_tgt:{tgt_node_label} {tgt_key_fields_literal})\n {optional_set_tgt_props}\n\n MERGE (new_src)-[new_rel:{rel_type} {key_fields_literal}]->(new_tgt)\n {optional_set_rel_props}\n\n FINISH\n \",\n src_node_label = rel_spec.source.label,\n optional_set_src_props = if analyzed_src.has_value_fields() {\n format!(\"SET new_src += ${SRC_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n tgt_node_label = rel_spec.target.label,\n optional_set_tgt_props = if analyzed_tgt.has_value_fields() {\n format!(\"SET new_tgt += ${TGT_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n rel_type = rel_spec.rel_type,\n optional_set_rel_props = if !analyzed_data_coll.value_fields_input_idx.is_empty() {\n format!(\"SET new_rel += ${CORE_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n };\n Self {\n connection_ref: spec.connection,\n graph,\n create_order: 1,\n delete_cypher,\n insert_cypher,\n delete_before_upsert: true,\n analyzed_data_coll,\n key_field_params,\n src_key_field_params,\n tgt_key_field_params,\n }\n }\n };\n Ok(result)\n }\n\n fn bind_key_field_params<'a>(\n query: neo4rs::Query,\n params: &[String],\n type_val: impl Iterator,\n ) -> Result {\n let mut query = query;\n for (i, (typ, val)) in type_val.enumerate() {\n query = query.param(¶ms[i], value_to_bolt(val, typ)?);\n }\n Ok(query)\n }\n\n fn bind_rel_key_field_params(\n &self,\n query: neo4rs::Query,\n val: &KeyValue,\n ) -> Result {\n let mut query = query;\n for (i, val) in val\n .fields_iter(self.analyzed_data_coll.schema.key_fields.len())?\n .enumerate()\n {\n query = query.param(\n &self.key_field_params[i],\n key_to_bolt(\n val,\n &self.analyzed_data_coll.schema.key_fields[i].value_type.typ,\n )?,\n );\n }\n Ok(query)\n }\n\n fn add_upsert_queries(\n &self,\n upsert: &ExportTargetUpsertEntry,\n queries: &mut Vec,\n ) -> Result<()> {\n if self.delete_before_upsert {\n queries.push(\n self.bind_rel_key_field_params(neo4rs::query(&self.delete_cypher), &upsert.key)?,\n );\n }\n\n let value = &upsert.value;\n let mut query =\n self.bind_rel_key_field_params(neo4rs::query(&self.insert_cypher), &upsert.key)?;\n\n if let Some(analyzed_rel) = &self.analyzed_data_coll.rel {\n let bind_params = |query: neo4rs::Query,\n analyzed: &AnalyzedGraphElementFieldMapping,\n key_field_params: &[String]|\n -> Result {\n let mut query = Self::bind_key_field_params(\n query,\n key_field_params,\n std::iter::zip(\n analyzed.schema.key_fields.iter(),\n analyzed.fields_input_idx.key.iter(),\n )\n .map(|(f, field_idx)| (&f.value_type.typ, &value.fields[*field_idx])),\n )?;\n if analyzed.has_value_fields() {\n query = query.param(\n SRC_PROPS_PARAM,\n mapped_field_values_to_bolt(\n &analyzed.schema.value_fields,\n &analyzed.fields_input_idx.value,\n value,\n )?,\n );\n }\n Ok(query)\n };\n query = bind_params(query, &analyzed_rel.source, &self.src_key_field_params)?;\n query = bind_params(query, &analyzed_rel.target, &self.tgt_key_field_params)?;\n }\n\n if !self.analyzed_data_coll.value_fields_input_idx.is_empty() {\n query = query.param(\n CORE_PROPS_PARAM,\n mapped_field_values_to_bolt(\n &self.analyzed_data_coll.schema.value_fields,\n &self.analyzed_data_coll.value_fields_input_idx,\n value,\n )?,\n );\n }\n queries.push(query);\n Ok(())\n }\n\n fn add_delete_queries(\n &self,\n delete_key: &value::KeyValue,\n queries: &mut Vec,\n ) -> Result<()> {\n queries\n .push(self.bind_rel_key_field_params(neo4rs::query(&self.delete_cypher), delete_key)?);\n Ok(())\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct SetupState {\n key_field_names: Vec,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n dependent_node_labels: Vec,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n sub_components: Vec,\n}\n\nimpl SetupState {\n fn new(\n schema: &GraphElementSchema,\n index_options: &IndexOptions,\n dependent_node_labels: Vec,\n ) -> Result {\n let key_field_names: Vec =\n schema.key_fields.iter().map(|f| f.name.clone()).collect();\n let mut sub_components = vec![];\n sub_components.push(ComponentState {\n object_label: schema.elem_type.clone(),\n index_def: IndexDef::KeyConstraint {\n field_names: key_field_names.clone(),\n },\n });\n let value_field_types = schema\n .value_fields\n .iter()\n .map(|f| (f.name.as_str(), &f.value_type.typ))\n .collect::>();\n for index_def in index_options.vector_indexes.iter() {\n sub_components.push(ComponentState {\n object_label: schema.elem_type.clone(),\n index_def: IndexDef::from_vector_index_def(\n index_def,\n value_field_types\n .get(index_def.field_name.as_str())\n .ok_or_else(|| {\n api_error!(\n \"Unknown field name for vector index: {}\",\n index_def.field_name\n )\n })?,\n )?,\n });\n }\n Ok(Self {\n key_field_names,\n dependent_node_labels,\n sub_components,\n })\n }\n\n fn check_compatible(&self, existing: &Self) -> SetupStateCompatibility {\n if self.key_field_names == existing.key_field_names {\n SetupStateCompatibility::Compatible\n } else {\n SetupStateCompatibility::NotCompatible\n }\n }\n}\n\nimpl IntoIterator for SetupState {\n type Item = ComponentState;\n type IntoIter = std::vec::IntoIter;\n\n fn into_iter(self) -> Self::IntoIter {\n self.sub_components.into_iter()\n }\n}\n#[derive(Debug, Default)]\nstruct DataClearAction {\n dependent_node_labels: Vec,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\nenum ComponentKind {\n KeyConstraint,\n VectorIndex,\n}\n\nimpl ComponentKind {\n fn describe(&self) -> &str {\n match self {\n ComponentKind::KeyConstraint => \"KEY CONSTRAINT\",\n ComponentKind::VectorIndex => \"VECTOR INDEX\",\n }\n }\n}\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct ComponentKey {\n kind: ComponentKind,\n name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\nenum IndexDef {\n KeyConstraint {\n field_names: Vec,\n },\n VectorIndex {\n field_name: String,\n metric: spec::VectorSimilarityMetric,\n vector_size: usize,\n },\n}\n\nimpl IndexDef {\n fn from_vector_index_def(\n index_def: &spec::VectorIndexDef,\n field_typ: &schema::ValueType,\n ) -> Result {\n Ok(Self::VectorIndex {\n field_name: index_def.field_name.clone(),\n vector_size: (match field_typ {\n schema::ValueType::Basic(schema::BasicValueType::Vector(schema)) => {\n schema.dimension\n }\n _ => None,\n })\n .ok_or_else(|| {\n api_error!(\"Vector index field must be a vector with fixed dimension\")\n })?,\n metric: index_def.metric,\n })\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\npub struct ComponentState {\n object_label: ElementType,\n index_def: IndexDef,\n}\n\nimpl components::State for ComponentState {\n fn key(&self) -> ComponentKey {\n let prefix = match &self.object_label {\n ElementType::Relationship(_) => \"r\",\n ElementType::Node(_) => \"n\",\n };\n let label = self.object_label.label();\n match &self.index_def {\n IndexDef::KeyConstraint { .. } => ComponentKey {\n kind: ComponentKind::KeyConstraint,\n name: format!(\"{prefix}__{label}__key\"),\n },\n IndexDef::VectorIndex {\n field_name, metric, ..\n } => ComponentKey {\n kind: ComponentKind::VectorIndex,\n name: format!(\"{prefix}__{label}__{field_name}__{metric}__vidx\"),\n },\n }\n }\n}\n\npub struct SetupComponentOperator {\n graph_pool: Arc,\n conn_spec: ConnectionSpec,\n}\n\n#[async_trait]\nimpl components::SetupOperator for SetupComponentOperator {\n type Key = ComponentKey;\n type State = ComponentState;\n type SetupState = SetupState;\n type Context = ();\n\n fn describe_key(&self, key: &Self::Key) -> String {\n format!(\"{} {}\", key.kind.describe(), key.name)\n }\n\n fn describe_state(&self, state: &Self::State) -> String {\n let key_desc = self.describe_key(&state.key());\n let label = state.object_label.label();\n match &state.index_def {\n IndexDef::KeyConstraint { field_names } => {\n format!(\"{key_desc} ON {label} (key: {})\", field_names.join(\", \"))\n }\n IndexDef::VectorIndex {\n field_name,\n metric,\n vector_size,\n } => {\n format!(\n \"{key_desc} ON {label} (field_name: {field_name}, vector_size: {vector_size}, metric: {metric})\",\n )\n }\n }\n }\n\n fn is_up_to_date(&self, current: &ComponentState, desired: &ComponentState) -> bool {\n current == desired\n }\n\n async fn create(&self, state: &ComponentState, _context: &Self::Context) -> Result<()> {\n let graph = self.graph_pool.get_graph(&self.conn_spec).await?;\n let key = state.key();\n let qualifier = CORE_ELEMENT_MATCHER_VAR;\n let matcher = state.object_label.matcher(qualifier);\n let query = neo4rs::query(&match &state.index_def {\n IndexDef::KeyConstraint { field_names } => {\n let key_type = match &state.object_label {\n ElementType::Node(_) => \"NODE\",\n ElementType::Relationship(_) => \"RELATIONSHIP\",\n };\n format!(\n \"CREATE CONSTRAINT {name} IF NOT EXISTS FOR {matcher} REQUIRE {field_names} IS {key_type} KEY\",\n name = key.name,\n field_names = build_composite_field_names(qualifier, field_names),\n )\n }\n IndexDef::VectorIndex {\n field_name,\n metric,\n vector_size,\n } => {\n formatdoc! {\"\n CREATE VECTOR INDEX {name} IF NOT EXISTS\n FOR {matcher} ON {qualifier}.{field_name}\n OPTIONS {{\n indexConfig: {{\n `vector.dimensions`: {vector_size},\n `vector.similarity_function`: '{metric}'\n }}\n }}\",\n name = key.name,\n }\n }\n });\n Ok(graph.run(query).await?)\n }\n\n async fn delete(&self, key: &ComponentKey, _context: &Self::Context) -> Result<()> {\n let graph = self.graph_pool.get_graph(&self.conn_spec).await?;\n let query = neo4rs::query(&format!(\n \"DROP {kind} {name} IF EXISTS\",\n kind = match key.kind {\n ComponentKind::KeyConstraint => \"CONSTRAINT\",\n ComponentKind::VectorIndex => \"INDEX\",\n },\n name = key.name,\n ));\n Ok(graph.run(query).await?)\n }\n}\n\nfn build_composite_field_names(qualifier: &str, field_names: &[String]) -> String {\n let strs = field_names\n .iter()\n .map(|name| format!(\"{qualifier}.{name}\"))\n .join(\", \");\n if field_names.len() == 1 {\n strs\n } else {\n format!(\"({strs})\")\n }\n}\n#[derive(Debug)]\npub struct GraphElementDataSetupStatus {\n data_clear: Option,\n change_type: SetupChangeType,\n}\n\nimpl GraphElementDataSetupStatus {\n fn new(desired_state: Option<&SetupState>, existing: &CombinedState) -> Self {\n let mut data_clear: Option = None;\n for v in existing.possible_versions() {\n if desired_state.as_ref().is_none_or(|desired| {\n desired.check_compatible(v) == SetupStateCompatibility::NotCompatible\n }) {\n data_clear\n .get_or_insert_default()\n .dependent_node_labels\n .extend(v.dependent_node_labels.iter().cloned());\n }\n }\n\n let change_type = match (desired_state, existing.possible_versions().next()) {\n (Some(_), Some(_)) => {\n if data_clear.is_none() {\n SetupChangeType::NoChange\n } else {\n SetupChangeType::Update\n }\n }\n (Some(_), None) => SetupChangeType::Create,\n (None, Some(_)) => SetupChangeType::Delete,\n (None, None) => SetupChangeType::NoChange,\n };\n\n Self {\n data_clear,\n change_type,\n }\n }\n}\n\nimpl ResourceSetupStatus for GraphElementDataSetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n if let Some(data_clear) = &self.data_clear {\n let mut desc = \"Clear data\".to_string();\n if !data_clear.dependent_node_labels.is_empty() {\n write!(\n &mut desc,\n \"; dependents {}\",\n data_clear\n .dependent_node_labels\n .iter()\n .map(|l| format!(\"{}\", ElementType::Node(l.clone())))\n .join(\", \")\n )\n .unwrap();\n }\n result.push(setup::ChangeDescription::Action(desc));\n }\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.change_type\n }\n}\n\nasync fn clear_graph_element_data(\n graph: &Graph,\n key: &Neo4jGraphElement,\n is_self_contained: bool,\n) -> Result<()> {\n let var_name = CORE_ELEMENT_MATCHER_VAR;\n let matcher = key.typ.matcher(var_name);\n let query_string = match key.typ {\n ElementType::Node(_) => {\n let optional_reset_self_contained = if is_self_contained {\n formatdoc! {\"\n WITH {var_name}\n SET {var_name}.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\n \"}\n } else {\n \"\".to_string()\n };\n formatdoc! {\"\n CALL {{\n MATCH {matcher}\n {optional_reset_self_contained}\n WITH {var_name} WHERE NOT ({var_name})--() DELETE {var_name}\n }} IN TRANSACTIONS\n \"}\n }\n ElementType::Relationship(_) => {\n formatdoc! {\"\n CALL {{\n MATCH {matcher} WITH {var_name} DELETE {var_name}\n }} IN TRANSACTIONS\n \"}\n }\n };\n let delete_query = neo4rs::query(&query_string);\n graph.run(delete_query).await?;\n Ok(())\n}\n\n/// Factory for Neo4j relationships\npub struct Factory {\n graph_pool: Arc,\n}\n\nimpl Factory {\n pub fn new() -> Self {\n Self {\n graph_pool: Arc::default(),\n }\n }\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = Declaration;\n type SetupState = SetupState;\n type SetupStatus = (\n GraphElementDataSetupStatus,\n components::SetupStatus,\n );\n type Key = Neo4jGraphElement;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Neo4j\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(Neo4jGraphElement, SetupState)>,\n )> {\n let (analyzed_data_colls, declared_graph_elements) = analyze_graph_mappings(\n data_collections\n .iter()\n .map(|d| DataCollectionGraphMappingInput {\n auth_ref: &d.spec.connection,\n mapping: &d.spec.mapping,\n index_options: &d.index_options,\n key_fields_schema: d.key_fields_schema.clone(),\n value_fields_schema: d.value_fields_schema.clone(),\n }),\n declarations.iter().map(|d| (&d.connection, &d.decl)),\n )?;\n let data_coll_output = std::iter::zip(data_collections, analyzed_data_colls)\n .map(|(data_coll, analyzed)| {\n let setup_key = Neo4jGraphElement {\n connection: data_coll.spec.connection.clone(),\n typ: analyzed.schema.elem_type.clone(),\n };\n let desired_setup_state = SetupState::new(\n &analyzed.schema,\n &data_coll.index_options,\n analyzed\n .dependent_node_labels()\n .into_iter()\n .map(|s| s.to_string())\n .collect(),\n )?;\n\n let conn_spec = context\n .auth_registry\n .get::(&data_coll.spec.connection)?;\n let factory = self.clone();\n let export_context = async move {\n Ok(Arc::new(ExportContext::new(\n factory.graph_pool.get_graph(&conn_spec).await?,\n data_coll.spec,\n analyzed,\n )?))\n }\n .boxed();\n\n Ok(TypedExportDataCollectionBuildOutput {\n export_context,\n setup_key,\n desired_setup_state,\n })\n })\n .collect::>>()?;\n let decl_output = std::iter::zip(declarations, declared_graph_elements)\n .map(|(decl, graph_elem_schema)| {\n let setup_state =\n SetupState::new(&graph_elem_schema, &decl.decl.index_options, vec![])?;\n let setup_key = GraphElementType {\n connection: decl.connection,\n typ: graph_elem_schema.elem_type.clone(),\n };\n Ok((setup_key, setup_state))\n })\n .collect::>>()?;\n Ok((data_coll_output, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n key: Neo4jGraphElement,\n desired: Option,\n existing: CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result {\n let conn_spec = flow_instance_ctx\n .auth_registry\n .get::(&key.connection)?;\n let data_status = GraphElementDataSetupStatus::new(desired.as_ref(), &existing);\n let components = components::SetupStatus::create(\n SetupComponentOperator {\n graph_pool: self.graph_pool.clone(),\n conn_spec,\n },\n desired,\n existing,\n )?;\n Ok((data_status, components))\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(desired.check_compatible(existing))\n }\n\n fn describe_resource(&self, key: &Neo4jGraphElement) -> Result {\n Ok(format!(\"Neo4j {}\", key.typ))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut muts_by_graph = HashMap::new();\n for mut_with_ctx in mutations.iter() {\n muts_by_graph\n .entry(&mut_with_ctx.export_context.connection_ref)\n .or_insert_with(Vec::new)\n .push(mut_with_ctx);\n }\n let retry_options = retryable::RetryOptions::default();\n for muts in muts_by_graph.values_mut() {\n muts.sort_by_key(|m| m.export_context.create_order);\n let graph = &muts[0].export_context.graph;\n retryable::run(\n async || {\n let mut queries = vec![];\n for mut_with_ctx in muts.iter() {\n let export_ctx = &mut_with_ctx.export_context;\n for upsert in mut_with_ctx.mutation.upserts.iter() {\n export_ctx.add_upsert_queries(upsert, &mut queries)?;\n }\n }\n for mut_with_ctx in muts.iter().rev() {\n let export_ctx = &mut_with_ctx.export_context;\n for deletion in mut_with_ctx.mutation.deletes.iter() {\n export_ctx.add_delete_queries(&deletion.key, &mut queries)?;\n }\n }\n let mut txn = graph.start_txn().await?;\n txn.run_queries(queries).await?;\n txn.commit().await?;\n retryable::Ok(())\n },\n &retry_options,\n )\n .await\n .map_err(Into::::into)?\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n // Relationships first, then nodes, as relationships need to be deleted before nodes they referenced.\n let mut relationship_types = IndexSet::<&Neo4jGraphElement>::new();\n let mut node_labels = IndexSet::<&Neo4jGraphElement>::new();\n let mut dependent_node_labels = IndexSet::::new();\n\n let mut components = vec![];\n for change in changes.iter() {\n if let Some(data_clear) = &change.setup_status.0.data_clear {\n match &change.key.typ {\n ElementType::Relationship(_) => {\n relationship_types.insert(&change.key);\n for label in &data_clear.dependent_node_labels {\n dependent_node_labels.insert(Neo4jGraphElement {\n connection: change.key.connection.clone(),\n typ: ElementType::Node(label.clone()),\n });\n }\n }\n ElementType::Node(_) => {\n node_labels.insert(&change.key);\n }\n }\n }\n components.push(&change.setup_status.1);\n }\n\n // Relationships have no dependency, so can be cleared first.\n for rel_type in relationship_types.into_iter() {\n let graph = self\n .graph_pool\n .get_graph_for_key(rel_type, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, rel_type, true).await?;\n }\n // Clear standalone nodes, which is simpler than dependent nodes.\n for node_label in node_labels.iter() {\n let graph = self\n .graph_pool\n .get_graph_for_key(node_label, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, node_label, true).await?;\n }\n // Clear dependent nodes if they're not covered by standalone nodes.\n for node_label in dependent_node_labels.iter() {\n if !node_labels.contains(node_label) {\n let graph = self\n .graph_pool\n .get_graph_for_key(node_label, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, node_label, false).await?;\n }\n }\n\n apply_component_changes(components, &()).await?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/functions/split_recursively.rs", "use anyhow::anyhow;\nuse log::{error, trace};\nuse regex::{Matches, Regex};\nuse std::collections::HashSet;\nuse std::sync::LazyLock;\nuse std::{collections::HashMap, sync::Arc};\nuse unicase::UniCase;\n\nuse crate::base::field_attrs;\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::{fields_value, ops::sdk::*};\n\n#[derive(Deserialize)]\nstruct CustomLanguageSpec {\n language_name: String,\n #[serde(default)]\n aliases: Vec,\n separators_regex: Vec,\n}\n\n#[derive(Deserialize)]\nstruct Spec {\n #[serde(default)]\n custom_languages: Vec,\n}\n\nconst SYNTAX_LEVEL_GAP_COST: usize = 512;\nconst MISSING_OVERLAP_COST: usize = 512;\nconst PER_LINE_BREAK_LEVEL_GAP_COST: usize = 64;\nconst TOO_SMALL_CHUNK_COST: usize = 1048576;\n\npub struct Args {\n text: ResolvedOpArg,\n chunk_size: ResolvedOpArg,\n min_chunk_size: Option,\n chunk_overlap: Option,\n language: Option,\n}\n\nstruct SimpleLanguageConfig {\n name: String,\n aliases: Vec,\n separator_regex: Vec,\n}\n\nstatic DEFAULT_LANGUAGE_CONFIG: LazyLock =\n LazyLock::new(|| SimpleLanguageConfig {\n name: \"_DEFAULT\".to_string(),\n aliases: vec![],\n separator_regex: [r\"\\n\\n+\", r\"\\n\", r\"\\s+\"]\n .into_iter()\n .map(|s| Regex::new(s).unwrap())\n .collect(),\n });\n\nstruct TreesitterLanguageConfig {\n name: String,\n tree_sitter_lang: tree_sitter::Language,\n terminal_node_kind_ids: HashSet,\n}\n\nfn add_treesitter_language<'a>(\n output: &'a mut HashMap, Arc>,\n name: &'static str,\n aliases: impl IntoIterator,\n lang_fn: impl Into,\n terminal_node_kinds: impl IntoIterator,\n) {\n let tree_sitter_lang: tree_sitter::Language = lang_fn.into();\n let terminal_node_kind_ids = terminal_node_kinds\n .into_iter()\n .filter_map(|kind| {\n let id = tree_sitter_lang.id_for_node_kind(kind, true);\n if id != 0 {\n trace!(\"Got id for node kind: `{kind}` -> {id}\");\n Some(id)\n } else {\n error!(\"Failed in getting id for node kind: `{kind}`\");\n None\n }\n })\n .collect();\n\n let config = Arc::new(TreesitterLanguageConfig {\n name: name.to_string(),\n tree_sitter_lang,\n terminal_node_kind_ids,\n });\n for name in std::iter::once(name).chain(aliases.into_iter()) {\n if output.insert(name.into(), config.clone()).is_some() {\n panic!(\"Language `{name}` already exists\");\n }\n }\n}\n\nstatic TREE_SITTER_LANGUAGE_BY_LANG: LazyLock<\n HashMap, Arc>,\n> = LazyLock::new(|| {\n let mut map = HashMap::new();\n add_treesitter_language(&mut map, \"C\", [\".c\"], tree_sitter_c::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"C++\",\n [\".cpp\", \".cc\", \".cxx\", \".h\", \".hpp\", \"cpp\"],\n tree_sitter_c::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"C#\",\n [\".cs\", \"cs\", \"csharp\"],\n tree_sitter_c_sharp::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"CSS\",\n [\".css\", \".scss\"],\n tree_sitter_css::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Fortran\",\n [\".f\", \".f90\", \".f95\", \".f03\", \"f\", \"f90\", \"f95\", \"f03\"],\n tree_sitter_fortran::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Go\",\n [\".go\", \"golang\"],\n tree_sitter_go::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"HTML\",\n [\".html\", \".htm\"],\n tree_sitter_html::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"Java\", [\".java\"], tree_sitter_java::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"JavaScript\",\n [\".js\", \"js\"],\n tree_sitter_javascript::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"JSON\", [\".json\"], tree_sitter_json::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Kotlin\",\n [\".kt\", \".kts\"],\n tree_sitter_kotlin_ng::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Markdown\",\n [\".md\", \".mdx\", \"md\"],\n tree_sitter_md::LANGUAGE,\n [\"inline\"],\n );\n add_treesitter_language(\n &mut map,\n \"Pascal\",\n [\".pas\", \"pas\", \".dpr\", \"dpr\", \"Delphi\"],\n tree_sitter_pascal::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"PHP\", [\".php\"], tree_sitter_php::LANGUAGE_PHP, []);\n add_treesitter_language(\n &mut map,\n \"Python\",\n [\".py\"],\n tree_sitter_python::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"R\", [\".r\"], tree_sitter_r::LANGUAGE, []);\n add_treesitter_language(&mut map, \"Ruby\", [\".rb\"], tree_sitter_ruby::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Rust\",\n [\".rs\", \"rs\"],\n tree_sitter_rust::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Scala\",\n [\".scala\"],\n tree_sitter_scala::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"SQL\", [\".sql\"], tree_sitter_sequel::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Swift\",\n [\".swift\"],\n tree_sitter_swift::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TOML\",\n [\".toml\"],\n tree_sitter_toml_ng::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TSX\",\n [\".tsx\"],\n tree_sitter_typescript::LANGUAGE_TSX,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TypeScript\",\n [\".ts\", \"ts\"],\n tree_sitter_typescript::LANGUAGE_TYPESCRIPT,\n [],\n );\n add_treesitter_language(&mut map, \"XML\", [\".xml\"], tree_sitter_xml::LANGUAGE_XML, []);\n add_treesitter_language(&mut map, \"DTD\", [\".dtd\"], tree_sitter_xml::LANGUAGE_DTD, []);\n add_treesitter_language(\n &mut map,\n \"YAML\",\n [\".yaml\", \".yml\"],\n tree_sitter_yaml::LANGUAGE,\n [],\n );\n map\n});\n\nenum ChunkKind<'t> {\n TreeSitterNode {\n lang_config: &'t TreesitterLanguageConfig,\n node: tree_sitter::Node<'t>,\n },\n RegexpSepChunk {\n lang_config: &'t SimpleLanguageConfig,\n next_regexp_sep_id: usize,\n },\n}\n\nstruct Chunk<'t, 's: 't> {\n full_text: &'s str,\n range: RangeValue,\n kind: ChunkKind<'t>,\n}\n\nimpl<'t, 's: 't> Chunk<'t, 's> {\n fn text(&self) -> &'s str {\n self.range.extract_str(self.full_text)\n }\n}\n\nstruct TextChunksIter<'t, 's: 't> {\n lang_config: &'t SimpleLanguageConfig,\n parent: &'t Chunk<'t, 's>,\n matches_iter: Matches<'t, 's>,\n regexp_sep_id: usize,\n next_start_pos: Option,\n}\n\nimpl<'t, 's: 't> TextChunksIter<'t, 's> {\n fn new(\n lang_config: &'t SimpleLanguageConfig,\n parent: &'t Chunk<'t, 's>,\n regexp_sep_id: usize,\n ) -> Self {\n Self {\n lang_config,\n parent,\n matches_iter: lang_config.separator_regex[regexp_sep_id].find_iter(parent.text()),\n regexp_sep_id,\n next_start_pos: Some(parent.range.start),\n }\n }\n}\n\nimpl<'t, 's: 't> Iterator for TextChunksIter<'t, 's> {\n type Item = Chunk<'t, 's>;\n\n fn next(&mut self) -> Option {\n let start_pos = self.next_start_pos?;\n let end_pos = match self.matches_iter.next() {\n Some(grp) => {\n self.next_start_pos = Some(self.parent.range.start + grp.end());\n self.parent.range.start + grp.start()\n }\n None => {\n self.next_start_pos = None;\n if start_pos >= self.parent.range.end {\n return None;\n }\n self.parent.range.end\n }\n };\n Some(Chunk {\n full_text: self.parent.full_text,\n range: RangeValue::new(start_pos, end_pos),\n kind: ChunkKind::RegexpSepChunk {\n lang_config: self.lang_config,\n next_regexp_sep_id: self.regexp_sep_id + 1,\n },\n })\n }\n}\n\nstruct TreeSitterNodeIter<'t, 's: 't> {\n lang_config: &'t TreesitterLanguageConfig,\n full_text: &'s str,\n cursor: Option>,\n next_start_pos: usize,\n end_pos: usize,\n}\n\nimpl<'t, 's: 't> TreeSitterNodeIter<'t, 's> {\n fn fill_gap(\n next_start_pos: &mut usize,\n gap_end_pos: usize,\n full_text: &'s str,\n ) -> Option> {\n let start_pos = *next_start_pos;\n if start_pos < gap_end_pos {\n *next_start_pos = gap_end_pos;\n Some(Chunk {\n full_text,\n range: RangeValue::new(start_pos, gap_end_pos),\n kind: ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n },\n })\n } else {\n None\n }\n }\n}\n\nimpl<'t, 's: 't> Iterator for TreeSitterNodeIter<'t, 's> {\n type Item = Chunk<'t, 's>;\n\n fn next(&mut self) -> Option {\n let cursor = if let Some(cursor) = &mut self.cursor {\n cursor\n } else {\n return Self::fill_gap(&mut self.next_start_pos, self.end_pos, self.full_text);\n };\n let node = cursor.node();\n if let Some(gap) =\n Self::fill_gap(&mut self.next_start_pos, node.start_byte(), self.full_text)\n {\n return Some(gap);\n }\n if !cursor.goto_next_sibling() {\n self.cursor = None;\n }\n self.next_start_pos = node.end_byte();\n Some(Chunk {\n full_text: self.full_text,\n range: RangeValue::new(node.start_byte(), node.end_byte()),\n kind: ChunkKind::TreeSitterNode {\n lang_config: self.lang_config,\n node,\n },\n })\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]\nenum LineBreakLevel {\n Inline,\n Newline,\n DoubleNewline,\n}\n\nimpl LineBreakLevel {\n fn ord(self) -> usize {\n match self {\n LineBreakLevel::Inline => 0,\n LineBreakLevel::Newline => 1,\n LineBreakLevel::DoubleNewline => 2,\n }\n }\n}\n\nfn line_break_level(c: &str) -> LineBreakLevel {\n let mut lb_level = LineBreakLevel::Inline;\n let mut iter = c.chars();\n while let Some(c) = iter.next() {\n if c == '\\n' || c == '\\r' {\n lb_level = LineBreakLevel::Newline;\n for c2 in iter.by_ref() {\n if c2 == '\\n' || c2 == '\\r' {\n if c == c2 {\n return LineBreakLevel::DoubleNewline;\n }\n } else {\n break;\n }\n }\n }\n }\n lb_level\n}\n\nconst INLINE_SPACE_CHARS: [char; 2] = [' ', '\\t'];\n\nstruct AtomChunk {\n range: RangeValue,\n boundary_syntax_level: usize,\n\n internal_lb_level: LineBreakLevel,\n boundary_lb_level: LineBreakLevel,\n}\n\nstruct AtomChunksCollector<'s> {\n full_text: &'s str,\n\n curr_level: usize,\n min_level: usize,\n atom_chunks: Vec,\n}\nimpl<'s> AtomChunksCollector<'s> {\n fn collect(&mut self, range: RangeValue) {\n // Trim trailing whitespaces.\n let end_trimmed_text = &self.full_text[range.start..range.end].trim_end();\n if end_trimmed_text.is_empty() {\n return;\n }\n\n // Trim leading whitespaces.\n let trimmed_text = end_trimmed_text.trim_start();\n let new_start = range.start + (end_trimmed_text.len() - trimmed_text.len());\n let new_end = new_start + trimmed_text.len();\n\n // Align to beginning of the line if possible.\n let prev_end = self.atom_chunks.last().map_or(0, |chunk| chunk.range.end);\n let gap = &self.full_text[prev_end..new_start];\n let boundary_lb_level = line_break_level(gap);\n let range = if boundary_lb_level != LineBreakLevel::Inline {\n let trimmed_gap = gap.trim_end_matches(INLINE_SPACE_CHARS);\n RangeValue::new(prev_end + trimmed_gap.len(), new_end)\n } else {\n RangeValue::new(new_start, new_end)\n };\n\n self.atom_chunks.push(AtomChunk {\n range,\n boundary_syntax_level: self.min_level,\n internal_lb_level: line_break_level(trimmed_text),\n boundary_lb_level,\n });\n self.min_level = self.curr_level;\n }\n\n fn into_atom_chunks(mut self) -> Vec {\n self.atom_chunks.push(AtomChunk {\n range: RangeValue::new(self.full_text.len(), self.full_text.len()),\n boundary_syntax_level: self.min_level,\n internal_lb_level: LineBreakLevel::Inline,\n boundary_lb_level: LineBreakLevel::DoubleNewline,\n });\n self.atom_chunks\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Eq)]\nstruct OutputPosition {\n char_offset: usize,\n line: u32,\n column: u32,\n}\n\nimpl OutputPosition {\n fn into_output(self) -> value::Value {\n value::Value::Struct(fields_value!(\n self.char_offset as i64,\n self.line as i64,\n self.column as i64\n ))\n }\n}\nstruct Position {\n byte_offset: usize,\n output: Option,\n}\n\nimpl Position {\n fn new(byte_offset: usize) -> Self {\n Self {\n byte_offset,\n output: None,\n }\n }\n}\n\nstruct ChunkOutput<'s> {\n start_pos: Position,\n end_pos: Position,\n text: &'s str,\n}\n\nstruct RecursiveChunker<'s> {\n full_text: &'s str,\n chunk_size: usize,\n chunk_overlap: usize,\n min_chunk_size: usize,\n}\n\nimpl<'t, 's: 't> RecursiveChunker<'s> {\n fn collect_atom_chunks_from_iter(\n &self,\n sub_chunks_iter: impl Iterator>,\n atom_collector: &mut AtomChunksCollector<'s>,\n ) -> Result<()> {\n atom_collector.curr_level += 1;\n for sub_chunk in sub_chunks_iter {\n let range = sub_chunk.range;\n if range.len() <= self.min_chunk_size {\n atom_collector.collect(range);\n } else {\n self.collect_atom_chunks(sub_chunk, atom_collector)?;\n }\n }\n atom_collector.curr_level -= 1;\n if atom_collector.curr_level < atom_collector.min_level {\n atom_collector.min_level = atom_collector.curr_level;\n }\n Ok(())\n }\n\n fn collect_atom_chunks(\n &self,\n chunk: Chunk<'t, 's>,\n atom_collector: &mut AtomChunksCollector<'s>,\n ) -> Result<()> {\n match chunk.kind {\n ChunkKind::TreeSitterNode { lang_config, node } => {\n if !lang_config.terminal_node_kind_ids.contains(&node.kind_id()) {\n let mut cursor = node.walk();\n if cursor.goto_first_child() {\n return self.collect_atom_chunks_from_iter(\n TreeSitterNodeIter {\n lang_config,\n full_text: self.full_text,\n cursor: Some(cursor),\n next_start_pos: node.start_byte(),\n end_pos: node.end_byte(),\n },\n atom_collector,\n );\n }\n }\n self.collect_atom_chunks(\n Chunk {\n full_text: self.full_text,\n range: chunk.range,\n kind: ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n },\n },\n atom_collector,\n )\n }\n ChunkKind::RegexpSepChunk {\n lang_config,\n next_regexp_sep_id,\n } => {\n if next_regexp_sep_id >= lang_config.separator_regex.len() {\n atom_collector.collect(chunk.range);\n Ok(())\n } else {\n self.collect_atom_chunks_from_iter(\n TextChunksIter::new(lang_config, &chunk, next_regexp_sep_id),\n atom_collector,\n )\n }\n }\n }\n }\n\n fn get_overlap_cost_base(&self, offset: usize) -> usize {\n if self.chunk_overlap == 0 {\n 0\n } else {\n (self.full_text.len() - offset) * MISSING_OVERLAP_COST / self.chunk_overlap\n }\n }\n\n fn merge_atom_chunks(&self, atom_chunks: Vec) -> Vec> {\n struct AtomRoutingPlan {\n start_idx: usize, // index of `atom_chunks` for the start chunk\n prev_plan_idx: usize, // index of `plans` for the previous plan\n cost: usize,\n overlap_cost_base: usize,\n }\n type PrevPlanCandidate = (std::cmp::Reverse, usize); // (cost, start_idx)\n\n let mut plans = Vec::with_capacity(atom_chunks.len());\n // Janitor\n plans.push(AtomRoutingPlan {\n start_idx: 0,\n prev_plan_idx: 0,\n cost: 0,\n overlap_cost_base: self.get_overlap_cost_base(0),\n });\n let mut prev_plan_candidates = std::collections::BinaryHeap::::new();\n\n let mut gap_cost_cache = vec![0];\n let mut syntax_level_gap_cost = |boundary: usize, internal: usize| -> usize {\n if boundary > internal {\n let gap = boundary - internal;\n for i in gap_cost_cache.len()..=gap {\n gap_cost_cache.push(gap_cost_cache[i - 1] + SYNTAX_LEVEL_GAP_COST / i);\n }\n gap_cost_cache[gap]\n } else {\n 0\n }\n };\n\n for (i, chunk) in atom_chunks[0..atom_chunks.len() - 1].iter().enumerate() {\n let mut min_cost = usize::MAX;\n let mut arg_min_start_idx: usize = 0;\n let mut arg_min_prev_plan_idx: usize = 0;\n let mut start_idx = i;\n\n let end_syntax_level = atom_chunks[i + 1].boundary_syntax_level;\n let end_lb_level = atom_chunks[i + 1].boundary_lb_level;\n\n let mut internal_syntax_level = usize::MAX;\n let mut internal_lb_level = LineBreakLevel::Inline;\n\n fn lb_level_gap(boundary: LineBreakLevel, internal: LineBreakLevel) -> usize {\n if boundary.ord() < internal.ord() {\n internal.ord() - boundary.ord()\n } else {\n 0\n }\n }\n loop {\n let start_chunk = &atom_chunks[start_idx];\n let chunk_size = chunk.range.end - start_chunk.range.start;\n\n let mut cost = 0;\n cost +=\n syntax_level_gap_cost(start_chunk.boundary_syntax_level, internal_syntax_level);\n cost += syntax_level_gap_cost(end_syntax_level, internal_syntax_level);\n cost += (lb_level_gap(start_chunk.boundary_lb_level, internal_lb_level)\n + lb_level_gap(end_lb_level, internal_lb_level))\n * PER_LINE_BREAK_LEVEL_GAP_COST;\n if chunk_size < self.min_chunk_size {\n cost += TOO_SMALL_CHUNK_COST;\n }\n\n if chunk_size > self.chunk_size {\n if min_cost == usize::MAX {\n min_cost = cost + plans[start_idx].cost;\n arg_min_start_idx = start_idx;\n arg_min_prev_plan_idx = start_idx;\n }\n break;\n }\n\n let prev_plan_idx = if self.chunk_overlap > 0 {\n while let Some(top_prev_plan) = prev_plan_candidates.peek() {\n let overlap_size =\n atom_chunks[top_prev_plan.1].range.end - start_chunk.range.start;\n if overlap_size <= self.chunk_overlap {\n break;\n }\n prev_plan_candidates.pop();\n }\n prev_plan_candidates.push((\n std::cmp::Reverse(\n plans[start_idx].cost + plans[start_idx].overlap_cost_base,\n ),\n start_idx,\n ));\n prev_plan_candidates.peek().unwrap().1\n } else {\n start_idx\n };\n let prev_plan = &plans[prev_plan_idx];\n cost += prev_plan.cost;\n if self.chunk_overlap == 0 {\n cost += MISSING_OVERLAP_COST / 2;\n } else {\n let start_cost_base = self.get_overlap_cost_base(start_chunk.range.start);\n cost += if prev_plan.overlap_cost_base < start_cost_base {\n MISSING_OVERLAP_COST + prev_plan.overlap_cost_base - start_cost_base\n } else {\n MISSING_OVERLAP_COST\n };\n }\n if cost < min_cost {\n min_cost = cost;\n arg_min_start_idx = start_idx;\n arg_min_prev_plan_idx = prev_plan_idx;\n }\n\n if start_idx == 0 {\n break;\n }\n\n start_idx -= 1;\n internal_syntax_level =\n internal_syntax_level.min(start_chunk.boundary_syntax_level);\n internal_lb_level = internal_lb_level.max(start_chunk.internal_lb_level);\n }\n plans.push(AtomRoutingPlan {\n start_idx: arg_min_start_idx,\n prev_plan_idx: arg_min_prev_plan_idx,\n cost: min_cost,\n overlap_cost_base: self.get_overlap_cost_base(chunk.range.end),\n });\n prev_plan_candidates.clear();\n }\n\n let mut output = Vec::new();\n let mut plan_idx = plans.len() - 1;\n while plan_idx > 0 {\n let plan = &plans[plan_idx];\n let start_chunk = &atom_chunks[plan.start_idx];\n let end_chunk = &atom_chunks[plan_idx - 1];\n output.push(ChunkOutput {\n start_pos: Position::new(start_chunk.range.start),\n end_pos: Position::new(end_chunk.range.end),\n text: &self.full_text[start_chunk.range.start..end_chunk.range.end],\n });\n plan_idx = plan.prev_plan_idx;\n }\n output.reverse();\n output\n }\n\n fn split_root_chunk(&self, kind: ChunkKind<'t>) -> Result>> {\n let mut atom_collector = AtomChunksCollector {\n full_text: self.full_text,\n min_level: 0,\n curr_level: 0,\n atom_chunks: Vec::new(),\n };\n self.collect_atom_chunks(\n Chunk {\n full_text: self.full_text,\n range: RangeValue::new(0, self.full_text.len()),\n kind,\n },\n &mut atom_collector,\n )?;\n let atom_chunks = atom_collector.into_atom_chunks();\n let output = self.merge_atom_chunks(atom_chunks);\n Ok(output)\n }\n}\n\nstruct Executor {\n args: Args,\n custom_languages: HashMap, Arc>,\n}\n\nimpl Executor {\n fn new(args: Args, spec: Spec) -> Result {\n let mut custom_languages = HashMap::new();\n for lang in spec.custom_languages {\n let separator_regex = lang\n .separators_regex\n .iter()\n .map(|s| Regex::new(s))\n .collect::>()\n .with_context(|| {\n format!(\n \"failed in parsing regexp for language `{}`\",\n lang.language_name\n )\n })?;\n let language_config = Arc::new(SimpleLanguageConfig {\n name: lang.language_name,\n aliases: lang.aliases,\n separator_regex,\n });\n if custom_languages\n .insert(\n UniCase::new(language_config.name.clone()),\n language_config.clone(),\n )\n .is_some()\n {\n api_bail!(\n \"duplicate language name / alias: `{}`\",\n language_config.name\n );\n }\n for alias in &language_config.aliases {\n if custom_languages\n .insert(UniCase::new(alias.clone()), language_config.clone())\n .is_some()\n {\n api_bail!(\"duplicate language name / alias: `{}`\", alias);\n }\n }\n }\n Ok(Self {\n args,\n custom_languages,\n })\n }\n}\n\nfn set_output_positions<'a>(text: &str, positions: impl Iterator) {\n let mut positions = positions.collect::>();\n positions.sort_by_key(|o| o.byte_offset);\n\n let mut positions_iter = positions.iter_mut();\n let Some(mut next_position) = positions_iter.next() else {\n return;\n };\n\n let mut char_offset = 0;\n let mut line = 1;\n let mut column = 1;\n for (byte_offset, ch) in text.char_indices() {\n while next_position.byte_offset == byte_offset {\n next_position.output = Some(OutputPosition {\n char_offset,\n line,\n column,\n });\n if let Some(position) = positions_iter.next() {\n next_position = position;\n } else {\n return;\n }\n }\n char_offset += 1;\n if ch == '\\n' {\n line += 1;\n column = 1;\n } else {\n column += 1;\n }\n }\n\n // Offsets after the last char.\n loop {\n next_position.output = Some(OutputPosition {\n char_offset,\n line,\n column,\n });\n if let Some(position) = positions_iter.next() {\n next_position = position;\n } else {\n return;\n }\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n async fn evaluate(&self, input: Vec) -> Result {\n let full_text = self.args.text.value(&input)?.as_str()?;\n let chunk_size = self.args.chunk_size.value(&input)?.as_int64()?;\n let recursive_chunker = RecursiveChunker {\n full_text,\n chunk_size: chunk_size as usize,\n chunk_overlap: (self.args.chunk_overlap.value(&input)?)\n .optional()\n .map(|v| v.as_int64())\n .transpose()?\n .unwrap_or(0) as usize,\n min_chunk_size: (self.args.min_chunk_size.value(&input)?)\n .optional()\n .map(|v| v.as_int64())\n .transpose()?\n .unwrap_or(chunk_size / 2) as usize,\n };\n\n let language = UniCase::new(\n (if let Some(language) = self.args.language.value(&input)?.optional() {\n language.as_str()?\n } else {\n \"\"\n })\n .to_string(),\n );\n let mut output = if let Some(lang_config) = self.custom_languages.get(&language) {\n recursive_chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config,\n next_regexp_sep_id: 0,\n })?\n } else if let Some(lang_config) = TREE_SITTER_LANGUAGE_BY_LANG.get(&language) {\n let mut parser = tree_sitter::Parser::new();\n parser.set_language(&lang_config.tree_sitter_lang)?;\n let tree = parser.parse(full_text.as_ref(), None).ok_or_else(|| {\n anyhow!(\"failed in parsing text in language: {}\", lang_config.name)\n })?;\n recursive_chunker.split_root_chunk(ChunkKind::TreeSitterNode {\n lang_config,\n node: tree.root_node(),\n })?\n } else {\n recursive_chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n })?\n };\n\n set_output_positions(\n full_text,\n output.iter_mut().flat_map(|chunk_output| {\n std::iter::once(&mut chunk_output.start_pos)\n .chain(std::iter::once(&mut chunk_output.end_pos))\n }),\n );\n\n let table = output\n .into_iter()\n .map(|chunk_output| {\n let output_start = chunk_output.start_pos.output.unwrap();\n let output_end = chunk_output.end_pos.output.unwrap();\n (\n RangeValue::new(output_start.char_offset, output_end.char_offset).into(),\n fields_value!(\n Arc::::from(chunk_output.text),\n output_start.into_output(),\n output_end.into_output()\n )\n .into(),\n )\n })\n .collect();\n\n Ok(Value::KTable(table))\n }\n}\n\nstruct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"SplitRecursively\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n _spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n chunk_size: args_resolver\n .next_arg(\"chunk_size\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n min_chunk_size: args_resolver\n .next_optional_arg(\"min_chunk_size\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n chunk_overlap: args_resolver\n .next_optional_arg(\"chunk_overlap\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n language: args_resolver\n .next_optional_arg(\"language\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n };\n\n let pos_struct = schema::ValueType::Struct(schema::StructSchema {\n fields: Arc::new(vec![\n schema::FieldSchema::new(\"offset\", make_output_type(BasicValueType::Int64)),\n schema::FieldSchema::new(\"line\", make_output_type(BasicValueType::Int64)),\n schema::FieldSchema::new(\"column\", make_output_type(BasicValueType::Int64)),\n ]),\n description: None,\n });\n\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n schema_builder.add_field(FieldSchema::new(\n \"location\",\n make_output_type(BasicValueType::Range),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"text\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"start\",\n schema::EnrichedValueType {\n typ: pos_struct.clone(),\n nullable: false,\n attrs: Default::default(),\n },\n ));\n schema_builder.add_field(FieldSchema::new(\n \"end\",\n schema::EnrichedValueType {\n typ: pos_struct,\n nullable: false,\n attrs: Default::default(),\n },\n ));\n let output_schema = make_output_type(TableSchema::new(TableKind::KTable, struct_schema))\n .with_attr(\n field_attrs::CHUNK_BASE_TEXT,\n serde_json::to_value(args_resolver.get_analyze_value(&args.text))?,\n );\n Ok((args, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(args, spec)?))\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory.register(registry)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n // Helper function to assert chunk text and its consistency with the range within the original text.\n fn assert_chunk_text_consistency(\n full_text: &str, // Added full text\n actual_chunk: &ChunkOutput<'_>,\n expected_text: &str,\n context: &str,\n ) {\n // Extract text using the chunk's range from the original full text.\n let extracted_text = full_text\n .get(actual_chunk.start_pos.byte_offset..actual_chunk.end_pos.byte_offset)\n .unwrap();\n // Assert that the expected text matches the text provided in the chunk.\n assert_eq!(\n actual_chunk.text, expected_text,\n \"Provided chunk text mismatch - {context}\"\n );\n // Assert that the expected text also matches the text extracted using the chunk's range.\n assert_eq!(\n extracted_text, expected_text,\n \"Range inconsistency: extracted text mismatch - {context}\"\n );\n }\n\n // Creates a default RecursiveChunker for testing, assuming no language-specific parsing.\n fn create_test_chunker(\n text: &str,\n chunk_size: usize,\n min_chunk_size: usize,\n chunk_overlap: usize,\n ) -> RecursiveChunker {\n RecursiveChunker {\n full_text: text,\n chunk_size,\n chunk_overlap,\n min_chunk_size,\n }\n }\n\n #[tokio::test]\n async fn test_split_recursively() {\n let spec = Spec {\n custom_languages: vec![],\n };\n let factory = Arc::new(Factory);\n let text_content = \"Linea 1.\\nLinea 2.\\n\\nLinea 3.\";\n\n let input_args_values = vec![\n text_content.to_string().into(),\n (15i64).into(),\n (5i64).into(),\n (0i64).into(),\n Value::Null,\n ];\n\n let input_arg_schemas = vec![\n build_arg_schema(\"text\", BasicValueType::Str),\n build_arg_schema(\"chunk_size\", BasicValueType::Int64),\n build_arg_schema(\"min_chunk_size\", BasicValueType::Int64),\n build_arg_schema(\"chunk_overlap\", BasicValueType::Int64),\n build_arg_schema(\"language\", BasicValueType::Str),\n ];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed: {:?}\",\n result.err()\n );\n let value = result.unwrap();\n\n match value {\n Value::KTable(table) => {\n let expected_chunks = vec![\n (RangeValue::new(0, 8), \"Linea 1.\"),\n (RangeValue::new(9, 17), \"Linea 2.\"),\n (RangeValue::new(19, 27), \"Linea 3.\"),\n ];\n\n for (range, expected_text) in expected_chunks {\n let key: KeyValue = range.into();\n match table.get(&key) {\n Some(scope_value_ref) => {\n let chunk_text =\n scope_value_ref.0.fields[0].as_str().unwrap_or_else(|_| {\n panic!(\"Chunk text not a string for key {key:?}\")\n });\n assert_eq!(**chunk_text, *expected_text);\n }\n None => panic!(\"Expected row value for key {key:?}, not found\"),\n }\n }\n }\n other => panic!(\"Expected Value::KTable, got {other:?}\"),\n }\n }\n\n #[test]\n fn test_translate_bytes_to_chars_simple() {\n let text = \"abc😄def\";\n let mut start1 = Position::new(0);\n let mut end1 = Position::new(3);\n let mut start2 = Position::new(3);\n let mut end2 = Position::new(7);\n let mut start3 = Position::new(7);\n let mut end3 = Position::new(10);\n let mut end_full = Position::new(text.len());\n\n let offsets = vec![\n &mut start1,\n &mut end1,\n &mut start2,\n &mut end2,\n &mut start3,\n &mut end3,\n &mut end_full,\n ];\n\n set_output_positions(text, offsets.into_iter());\n\n assert_eq!(\n start1.output,\n Some(OutputPosition {\n char_offset: 0,\n line: 1,\n column: 1,\n })\n );\n assert_eq!(\n end1.output,\n Some(OutputPosition {\n char_offset: 3,\n line: 1,\n column: 4,\n })\n );\n assert_eq!(\n start2.output,\n Some(OutputPosition {\n char_offset: 3,\n line: 1,\n column: 4,\n })\n );\n assert_eq!(\n end2.output,\n Some(OutputPosition {\n char_offset: 4,\n line: 1,\n column: 5,\n })\n );\n assert_eq!(\n end3.output,\n Some(OutputPosition {\n char_offset: 7,\n line: 1,\n column: 8,\n })\n );\n assert_eq!(\n end_full.output,\n Some(OutputPosition {\n char_offset: 7,\n line: 1,\n column: 8,\n })\n );\n }\n\n #[test]\n fn test_basic_split_no_overlap() {\n let text = \"Linea 1.\\nLinea 2.\\n\\nLinea 3.\";\n let chunker = create_test_chunker(text, 15, 5, 0);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert_eq!(chunks.len(), 3);\n assert_chunk_text_consistency(text, &chunks[0], \"Linea 1.\", \"Test 1, Chunk 0\");\n assert_chunk_text_consistency(text, &chunks[1], \"Linea 2.\", \"Test 1, Chunk 1\");\n assert_chunk_text_consistency(text, &chunks[2], \"Linea 3.\", \"Test 1, Chunk 2\");\n\n // Test splitting when chunk_size forces breaks within segments.\n let text2 = \"A very very long text that needs to be split.\";\n let chunker2 = create_test_chunker(text2, 20, 12, 0);\n let result2 = chunker2.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result2.is_ok());\n let chunks2 = result2.unwrap();\n\n // Expect multiple chunks, likely split by spaces due to chunk_size.\n assert!(chunks2.len() > 1);\n assert_chunk_text_consistency(text2, &chunks2[0], \"A very very long\", \"Test 2, Chunk 0\");\n assert!(chunks2[0].text.len() <= 20);\n }\n\n #[test]\n fn test_basic_split_with_overlap() {\n let text = \"This is a test text that is a bit longer to see how the overlap works.\";\n let chunker = create_test_chunker(text, 20, 10, 5);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert!(chunks.len() > 1);\n\n if chunks.len() >= 2 {\n assert!(chunks[0].text.len() <= 25);\n }\n }\n\n #[test]\n fn test_split_trims_whitespace() {\n let text = \" \\n First chunk. \\n\\n Second chunk with spaces at the end. \\n\";\n let chunker = create_test_chunker(text, 30, 10, 0);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert_eq!(chunks.len(), 3);\n\n assert_chunk_text_consistency(\n text,\n &chunks[0],\n \" First chunk.\",\n \"Whitespace Test, Chunk 0\",\n );\n assert_chunk_text_consistency(\n text,\n &chunks[1],\n \" Second chunk with spaces\",\n \"Whitespace Test, Chunk 1\",\n );\n assert_chunk_text_consistency(text, &chunks[2], \"at the end.\", \"Whitespace Test, Chunk 2\");\n }\n}\n"], ["/cocoindex/src/llm/vllm.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_vllm(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"http://127.0.0.1:8000/v1\".to_string());\n let api_key = std::env::var(\"VLLM_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/ops/targets/postgres.rs", "use crate::prelude::*;\n\nuse super::shared::table_columns::{\n TableColumnsSchema, TableMainSetupAction, TableUpsertionAction, check_table_compatibility,\n};\nuse crate::base::spec::{self, *};\nuse crate::ops::sdk::*;\nuse crate::settings::DatabaseConnectionSpec;\nuse async_trait::async_trait;\nuse indexmap::{IndexMap, IndexSet};\nuse itertools::Itertools;\nuse serde::Serialize;\nuse sqlx::PgPool;\nuse sqlx::postgres::types::PgRange;\nuse std::ops::Bound;\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n database: Option>,\n table_name: Option,\n}\nconst BIND_LIMIT: usize = 65535;\n\nfn key_value_fields_iter<'a>(\n key_fields_schema: &[FieldSchema],\n key_value: &'a KeyValue,\n) -> Result<&'a [KeyValue]> {\n let slice = if key_fields_schema.len() == 1 {\n std::slice::from_ref(key_value)\n } else {\n match key_value {\n KeyValue::Struct(fields) => fields,\n _ => bail!(\"expect struct key value\"),\n }\n };\n Ok(slice)\n}\n\nfn convertible_to_pgvector(vec_schema: &VectorTypeSchema) -> bool {\n if vec_schema.dimension.is_some() {\n matches!(\n *vec_schema.element_type,\n BasicValueType::Float32 | BasicValueType::Float64 | BasicValueType::Int64\n )\n } else {\n false\n }\n}\n\nfn bind_key_field<'arg>(\n builder: &mut sqlx::QueryBuilder<'arg, sqlx::Postgres>,\n key_value: &'arg KeyValue,\n) -> Result<()> {\n match key_value {\n KeyValue::Bytes(v) => {\n builder.push_bind(&**v);\n }\n KeyValue::Str(v) => {\n builder.push_bind(&**v);\n }\n KeyValue::Bool(v) => {\n builder.push_bind(v);\n }\n KeyValue::Int64(v) => {\n builder.push_bind(v);\n }\n KeyValue::Range(v) => {\n builder.push_bind(PgRange {\n start: Bound::Included(v.start as i64),\n end: Bound::Excluded(v.end as i64),\n });\n }\n KeyValue::Uuid(v) => {\n builder.push_bind(v);\n }\n KeyValue::Date(v) => {\n builder.push_bind(v);\n }\n KeyValue::Struct(fields) => {\n builder.push_bind(sqlx::types::Json(fields));\n }\n }\n Ok(())\n}\n\nfn bind_value_field<'arg>(\n builder: &mut sqlx::QueryBuilder<'arg, sqlx::Postgres>,\n field_schema: &'arg FieldSchema,\n value: &'arg Value,\n) -> Result<()> {\n match &value {\n Value::Basic(v) => match v {\n BasicValue::Bytes(v) => {\n builder.push_bind(&**v);\n }\n BasicValue::Str(v) => {\n builder.push_bind(&**v);\n }\n BasicValue::Bool(v) => {\n builder.push_bind(v);\n }\n BasicValue::Int64(v) => {\n builder.push_bind(v);\n }\n BasicValue::Float32(v) => {\n builder.push_bind(v);\n }\n BasicValue::Float64(v) => {\n builder.push_bind(v);\n }\n BasicValue::Range(v) => {\n builder.push_bind(PgRange {\n start: Bound::Included(v.start as i64),\n end: Bound::Excluded(v.end as i64),\n });\n }\n BasicValue::Uuid(v) => {\n builder.push_bind(v);\n }\n BasicValue::Date(v) => {\n builder.push_bind(v);\n }\n BasicValue::Time(v) => {\n builder.push_bind(v);\n }\n BasicValue::LocalDateTime(v) => {\n builder.push_bind(v);\n }\n BasicValue::OffsetDateTime(v) => {\n builder.push_bind(v);\n }\n BasicValue::TimeDelta(v) => {\n builder.push_bind(v);\n }\n BasicValue::Json(v) => {\n builder.push_bind(sqlx::types::Json(&**v));\n }\n BasicValue::Vector(v) => match &field_schema.value_type.typ {\n ValueType::Basic(BasicValueType::Vector(vs)) if convertible_to_pgvector(vs) => {\n let vec = v\n .iter()\n .map(|v| {\n Ok(match v {\n BasicValue::Float32(v) => *v,\n BasicValue::Float64(v) => *v as f32,\n BasicValue::Int64(v) => *v as f32,\n v => bail!(\"unexpected vector element type: {}\", v.kind()),\n })\n })\n .collect::>>()?;\n builder.push_bind(pgvector::Vector::from(vec));\n }\n _ => {\n builder.push_bind(sqlx::types::Json(v));\n }\n },\n BasicValue::UnionVariant { .. } => {\n builder.push_bind(sqlx::types::Json(TypedValue {\n t: &field_schema.value_type.typ,\n v: value,\n }));\n }\n },\n Value::Null => {\n builder.push(\"NULL\");\n }\n v => {\n builder.push_bind(sqlx::types::Json(TypedValue {\n t: &field_schema.value_type.typ,\n v,\n }));\n }\n };\n Ok(())\n}\n\npub struct ExportContext {\n db_ref: Option>,\n db_pool: PgPool,\n key_fields_schema: Vec,\n value_fields_schema: Vec,\n upsert_sql_prefix: String,\n upsert_sql_suffix: String,\n delete_sql_prefix: String,\n}\n\nimpl ExportContext {\n fn new(\n db_ref: Option>,\n db_pool: PgPool,\n table_name: String,\n key_fields_schema: Vec,\n value_fields_schema: Vec,\n ) -> Result {\n let key_fields = key_fields_schema\n .iter()\n .map(|f| format!(\"\\\"{}\\\"\", f.name))\n .collect::>()\n .join(\", \");\n let all_fields = (key_fields_schema.iter().chain(value_fields_schema.iter()))\n .map(|f| format!(\"\\\"{}\\\"\", f.name))\n .collect::>()\n .join(\", \");\n let set_value_fields = value_fields_schema\n .iter()\n .map(|f| format!(\"\\\"{}\\\" = EXCLUDED.\\\"{}\\\"\", f.name, f.name))\n .collect::>()\n .join(\", \");\n\n Ok(Self {\n db_ref,\n db_pool,\n upsert_sql_prefix: format!(\"INSERT INTO {table_name} ({all_fields}) VALUES \"),\n upsert_sql_suffix: if value_fields_schema.is_empty() {\n format!(\" ON CONFLICT ({key_fields}) DO NOTHING;\")\n } else {\n format!(\" ON CONFLICT ({key_fields}) DO UPDATE SET {set_value_fields};\")\n },\n delete_sql_prefix: format!(\"DELETE FROM {table_name} WHERE \"),\n key_fields_schema,\n value_fields_schema,\n })\n }\n}\n\nimpl ExportContext {\n async fn upsert(\n &self,\n upserts: &[interface::ExportTargetUpsertEntry],\n txn: &mut sqlx::PgTransaction<'_>,\n ) -> Result<()> {\n let num_parameters = self.key_fields_schema.len() + self.value_fields_schema.len();\n for upsert_chunk in upserts.chunks(BIND_LIMIT / num_parameters) {\n let mut query_builder = sqlx::QueryBuilder::new(&self.upsert_sql_prefix);\n for (i, upsert) in upsert_chunk.iter().enumerate() {\n if i > 0 {\n query_builder.push(\",\");\n }\n query_builder.push(\" (\");\n for (j, key_value) in key_value_fields_iter(&self.key_fields_schema, &upsert.key)?\n .iter()\n .enumerate()\n {\n if j > 0 {\n query_builder.push(\", \");\n }\n bind_key_field(&mut query_builder, key_value)?;\n }\n if self.value_fields_schema.len() != upsert.value.fields.len() {\n bail!(\n \"unmatched value length: {} vs {}\",\n self.value_fields_schema.len(),\n upsert.value.fields.len()\n );\n }\n for (schema, value) in self\n .value_fields_schema\n .iter()\n .zip(upsert.value.fields.iter())\n {\n query_builder.push(\", \");\n bind_value_field(&mut query_builder, schema, value)?;\n }\n query_builder.push(\")\");\n }\n query_builder.push(&self.upsert_sql_suffix);\n query_builder.build().execute(&mut **txn).await?;\n }\n Ok(())\n }\n\n async fn delete(\n &self,\n deletions: &[interface::ExportTargetDeleteEntry],\n txn: &mut sqlx::PgTransaction<'_>,\n ) -> Result<()> {\n // TODO: Find a way to batch delete.\n for deletion in deletions.iter() {\n let mut query_builder = sqlx::QueryBuilder::new(\"\");\n query_builder.push(&self.delete_sql_prefix);\n for (i, (schema, value)) in self\n .key_fields_schema\n .iter()\n .zip(key_value_fields_iter(&self.key_fields_schema, &deletion.key)?.iter())\n .enumerate()\n {\n if i > 0 {\n query_builder.push(\" AND \");\n }\n query_builder.push(\"\\\"\");\n query_builder.push(schema.name.as_str());\n query_builder.push(\"\\\"\");\n query_builder.push(\"=\");\n bind_key_field(&mut query_builder, value)?;\n }\n query_builder.build().execute(&mut **txn).await?;\n }\n Ok(())\n }\n}\n\n#[derive(Default)]\npub struct Factory {}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct TableId {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n database: Option>,\n table_name: String,\n}\n\nimpl std::fmt::Display for TableId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.table_name)?;\n if let Some(database) = &self.database {\n write!(f, \" (database: {database})\")?;\n }\n Ok(())\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SetupState {\n #[serde(flatten)]\n columns: TableColumnsSchema,\n\n vector_indexes: BTreeMap,\n}\n\nimpl SetupState {\n fn new(\n table_id: &TableId,\n key_fields_schema: &[FieldSchema],\n value_fields_schema: &[FieldSchema],\n index_options: &IndexOptions,\n ) -> Self {\n Self {\n columns: TableColumnsSchema {\n key_columns: key_fields_schema\n .iter()\n .map(|f| (f.name.clone(), f.value_type.typ.without_attrs()))\n .collect(),\n value_columns: value_fields_schema\n .iter()\n .map(|f| (f.name.clone(), f.value_type.typ.without_attrs()))\n .collect(),\n },\n vector_indexes: index_options\n .vector_indexes\n .iter()\n .map(|v| (to_vector_index_name(&table_id.table_name, v), v.clone()))\n .collect(),\n }\n }\n\n fn uses_pgvector(&self) -> bool {\n self.columns\n .value_columns\n .iter()\n .any(|(_, value)| match &value {\n ValueType::Basic(BasicValueType::Vector(vec_schema)) => {\n convertible_to_pgvector(vec_schema)\n }\n _ => false,\n })\n }\n}\n\nfn to_column_type_sql(column_type: &ValueType) -> String {\n match column_type {\n ValueType::Basic(basic_type) => match basic_type {\n BasicValueType::Bytes => \"bytea\".into(),\n BasicValueType::Str => \"text\".into(),\n BasicValueType::Bool => \"boolean\".into(),\n BasicValueType::Int64 => \"bigint\".into(),\n BasicValueType::Float32 => \"real\".into(),\n BasicValueType::Float64 => \"double precision\".into(),\n BasicValueType::Range => \"int8range\".into(),\n BasicValueType::Uuid => \"uuid\".into(),\n BasicValueType::Date => \"date\".into(),\n BasicValueType::Time => \"time\".into(),\n BasicValueType::LocalDateTime => \"timestamp\".into(),\n BasicValueType::OffsetDateTime => \"timestamp with time zone\".into(),\n BasicValueType::TimeDelta => \"interval\".into(),\n BasicValueType::Json => \"jsonb\".into(),\n BasicValueType::Vector(vec_schema) => {\n if convertible_to_pgvector(vec_schema) {\n format!(\"vector({})\", vec_schema.dimension.unwrap_or(0))\n } else {\n \"jsonb\".into()\n }\n }\n BasicValueType::Union(_) => \"jsonb\".into(),\n },\n _ => \"jsonb\".into(),\n }\n}\n\nimpl<'a> From<&'a SetupState> for Cow<'a, TableColumnsSchema> {\n fn from(val: &'a SetupState) -> Self {\n Cow::Owned(TableColumnsSchema {\n key_columns: val\n .columns\n .key_columns\n .iter()\n .map(|(k, v)| (k.clone(), to_column_type_sql(v)))\n .collect(),\n value_columns: val\n .columns\n .value_columns\n .iter()\n .map(|(k, v)| (k.clone(), to_column_type_sql(v)))\n .collect(),\n })\n }\n}\n\n#[derive(Debug)]\npub struct TableSetupAction {\n table_action: TableMainSetupAction,\n indexes_to_delete: IndexSet,\n indexes_to_create: IndexMap,\n}\n\n#[derive(Debug)]\npub struct SetupStatus {\n create_pgvector_extension: bool,\n actions: TableSetupAction,\n vector_as_jsonb_columns: Vec<(String, ValueType)>,\n}\n\nimpl SetupStatus {\n fn new(desired_state: Option, existing: setup::CombinedState) -> Self {\n let table_action =\n TableMainSetupAction::from_states(desired_state.as_ref(), &existing, false);\n let vector_as_jsonb_columns = desired_state\n .as_ref()\n .iter()\n .flat_map(|s| {\n s.columns.value_columns.iter().filter_map(|(name, schema)| {\n if let ValueType::Basic(BasicValueType::Vector(vec_schema)) = schema\n && !convertible_to_pgvector(vec_schema)\n {\n let is_touched = match &table_action.table_upsertion {\n Some(TableUpsertionAction::Create { values, .. }) => {\n values.contains_key(name)\n }\n Some(TableUpsertionAction::Update {\n columns_to_upsert, ..\n }) => columns_to_upsert.contains_key(name),\n None => false,\n };\n if is_touched {\n Some((name.clone(), schema.clone()))\n } else {\n None\n }\n } else {\n None\n }\n })\n })\n .collect::>();\n let (indexes_to_delete, indexes_to_create) = desired_state\n .as_ref()\n .map(|desired| {\n (\n existing\n .possible_versions()\n .flat_map(|v| v.vector_indexes.keys())\n .filter(|index_name| !desired.vector_indexes.contains_key(*index_name))\n .cloned()\n .collect::>(),\n desired\n .vector_indexes\n .iter()\n .filter(|(name, def)| {\n !existing.always_exists()\n || existing\n .possible_versions()\n .any(|v| v.vector_indexes.get(*name) != Some(def))\n })\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect::>(),\n )\n })\n .unwrap_or_default();\n let create_pgvector_extension = desired_state\n .as_ref()\n .map(|s| s.uses_pgvector())\n .unwrap_or(false)\n && !existing.current.map(|s| s.uses_pgvector()).unwrap_or(false);\n\n Self {\n create_pgvector_extension,\n actions: TableSetupAction {\n table_action,\n indexes_to_delete,\n indexes_to_create,\n },\n vector_as_jsonb_columns,\n }\n }\n}\n\nfn to_vector_similarity_metric_sql(metric: VectorSimilarityMetric) -> &'static str {\n match metric {\n VectorSimilarityMetric::CosineSimilarity => \"vector_cosine_ops\",\n VectorSimilarityMetric::L2Distance => \"vector_l2_ops\",\n VectorSimilarityMetric::InnerProduct => \"vector_ip_ops\",\n }\n}\n\nfn to_index_spec_sql(index_spec: &VectorIndexDef) -> Cow<'static, str> {\n format!(\n \"USING hnsw ({} {})\",\n index_spec.field_name,\n to_vector_similarity_metric_sql(index_spec.metric)\n )\n .into()\n}\n\nfn to_vector_index_name(table_name: &str, vector_index_def: &spec::VectorIndexDef) -> String {\n format!(\n \"{}__{}__{}\",\n table_name,\n vector_index_def.field_name,\n to_vector_similarity_metric_sql(vector_index_def.metric)\n )\n}\n\nfn describe_index_spec(index_name: &str, index_spec: &VectorIndexDef) -> String {\n format!(\"{} {}\", index_name, to_index_spec_sql(index_spec))\n}\n\nimpl setup::ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut descriptions = self.actions.table_action.describe_changes();\n for (column_name, schema) in self.vector_as_jsonb_columns.iter() {\n descriptions.push(setup::ChangeDescription::Note(format!(\n \"Field `{}` has type `{}`. Only number vector with fixed size is supported by pgvector. It will be stored as `jsonb`.\",\n column_name,\n schema\n )));\n }\n if self.create_pgvector_extension {\n descriptions.push(setup::ChangeDescription::Action(\n \"Create pg_vector extension (if not exists)\".to_string(),\n ));\n }\n if !self.actions.indexes_to_delete.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Delete indexes from table: {}\",\n self.actions.indexes_to_delete.iter().join(\", \"),\n )));\n }\n if !self.actions.indexes_to_create.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Create indexes in table: {}\",\n self.actions\n .indexes_to_create\n .iter()\n .map(|(index_name, index_spec)| describe_index_spec(index_name, index_spec))\n .join(\", \"),\n )));\n }\n descriptions\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n let has_other_update = !self.actions.indexes_to_create.is_empty()\n || !self.actions.indexes_to_delete.is_empty();\n self.actions.table_action.change_type(has_other_update)\n }\n}\n\nimpl SetupStatus {\n async fn apply_change(&self, db_pool: &PgPool, table_name: &str) -> Result<()> {\n if self.actions.table_action.drop_existing {\n sqlx::query(&format!(\"DROP TABLE IF EXISTS {table_name}\"))\n .execute(db_pool)\n .await?;\n }\n if self.create_pgvector_extension {\n sqlx::query(\"CREATE EXTENSION IF NOT EXISTS vector;\")\n .execute(db_pool)\n .await?;\n }\n for index_name in self.actions.indexes_to_delete.iter() {\n let sql = format!(\"DROP INDEX IF EXISTS {index_name}\");\n sqlx::query(&sql).execute(db_pool).await?;\n }\n if let Some(table_upsertion) = &self.actions.table_action.table_upsertion {\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n let mut fields = (keys\n .iter()\n .map(|(name, typ)| format!(\"\\\"{name}\\\" {typ} NOT NULL\")))\n .chain(values.iter().map(|(name, typ)| format!(\"\\\"{name}\\\" {typ}\")));\n let sql = format!(\n \"CREATE TABLE IF NOT EXISTS {table_name} ({}, PRIMARY KEY ({}))\",\n fields.join(\", \"),\n keys.keys().join(\", \")\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n for column_name in columns_to_delete.iter() {\n let sql = format!(\n \"ALTER TABLE {table_name} DROP COLUMN IF EXISTS \\\"{column_name}\\\"\",\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n for (column_name, column_type) in columns_to_upsert.iter() {\n let sql = format!(\n \"ALTER TABLE {table_name} DROP COLUMN IF EXISTS \\\"{column_name}\\\", ADD COLUMN \\\"{column_name}\\\" {column_type}\"\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n }\n }\n }\n for (index_name, index_spec) in self.actions.indexes_to_create.iter() {\n let sql = format!(\n \"CREATE INDEX IF NOT EXISTS {index_name} ON {table_name} {}\",\n to_index_spec_sql(index_spec)\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n Ok(())\n }\n}\n\nasync fn get_db_pool(\n db_ref: Option<&spec::AuthEntryReference>,\n auth_registry: &AuthRegistry,\n) -> Result {\n let lib_context = get_lib_context()?;\n let db_conn_spec = db_ref\n .as_ref()\n .map(|db_ref| auth_registry.get(db_ref))\n .transpose()?;\n let db_pool = match db_conn_spec {\n Some(db_conn_spec) => lib_context.db_pools.get_pool(&db_conn_spec).await?,\n None => lib_context.require_builtin_db_pool()?.clone(),\n };\n Ok(db_pool)\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = ();\n type SetupState = SetupState;\n type SetupStatus = SetupStatus;\n type Key = TableId;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Postgres\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n _declarations: Vec<()>,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(TableId, SetupState)>,\n )> {\n let data_coll_output = data_collections\n .into_iter()\n .map(|d| {\n let table_id = TableId {\n database: d.spec.database.clone(),\n table_name: d.spec.table_name.unwrap_or_else(|| {\n utils::db::sanitize_identifier(&format!(\n \"{}__{}\",\n context.flow_instance_name, d.name\n ))\n }),\n };\n let setup_state = SetupState::new(\n &table_id,\n &d.key_fields_schema,\n &d.value_fields_schema,\n &d.index_options,\n );\n let table_name = table_id.table_name.clone();\n let db_ref = d.spec.database;\n let auth_registry = context.auth_registry.clone();\n let export_context = Box::pin(async move {\n let db_pool = get_db_pool(db_ref.as_ref(), &auth_registry).await?;\n let export_context = Arc::new(ExportContext::new(\n db_ref,\n db_pool.clone(),\n table_name,\n d.key_fields_schema,\n d.value_fields_schema,\n )?);\n Ok(export_context)\n });\n Ok(TypedExportDataCollectionBuildOutput {\n setup_key: table_id,\n desired_setup_state: setup_state,\n export_context,\n })\n })\n .collect::>>()?;\n Ok((data_coll_output, vec![]))\n }\n\n async fn check_setup_status(\n &self,\n _key: TableId,\n desired: Option,\n existing: setup::CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n Ok(SetupStatus::new(desired, existing))\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(check_table_compatibility(\n &desired.columns,\n &existing.columns,\n ))\n }\n\n fn describe_resource(&self, key: &TableId) -> Result {\n Ok(format!(\"Postgres table {}\", key.table_name))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut mut_groups_by_db_ref = HashMap::new();\n for mutation in mutations.iter() {\n mut_groups_by_db_ref\n .entry(mutation.export_context.db_ref.clone())\n .or_insert_with(Vec::new)\n .push(mutation);\n }\n for mut_groups in mut_groups_by_db_ref.values() {\n let db_pool = &mut_groups\n .first()\n .ok_or_else(|| anyhow!(\"empty group\"))?\n .export_context\n .db_pool;\n let mut txn = db_pool.begin().await?;\n for mut_group in mut_groups.iter() {\n mut_group\n .export_context\n .upsert(&mut_group.mutation.upserts, &mut txn)\n .await?;\n }\n for mut_group in mut_groups.iter() {\n mut_group\n .export_context\n .delete(&mut_group.mutation.deletes, &mut txn)\n .await?;\n }\n txn.commit().await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n for change in changes.iter() {\n let db_pool = get_db_pool(change.key.database.as_ref(), &context.auth_registry).await?;\n change\n .setup_status\n .apply_change(&db_pool, &change.key.table_name)\n .await?;\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/execution/evaluator.rs", "use crate::prelude::*;\n\nuse anyhow::{Context, Ok};\nuse futures::future::try_join_all;\n\nuse crate::base::value::EstimatedByteSize;\nuse crate::builder::{AnalyzedTransientFlow, plan::*};\nuse crate::py::IntoPyResult;\nuse crate::{\n base::{schema, value},\n utils::immutable::RefList,\n};\n\nuse super::memoization::{EvaluationMemory, EvaluationMemoryOptions, evaluate_with_cell};\n\n#[derive(Debug)]\npub struct ScopeValueBuilder {\n // TODO: Share the same lock for values produced in the same execution scope, for stricter atomicity.\n pub fields: Vec>>,\n}\n\nimpl value::EstimatedByteSize for ScopeValueBuilder {\n fn estimated_detached_byte_size(&self) -> usize {\n self.fields\n .iter()\n .map(|f| f.get().map_or(0, |v| v.estimated_byte_size()))\n .sum()\n }\n}\n\nimpl From<&ScopeValueBuilder> for value::ScopeValue {\n fn from(val: &ScopeValueBuilder) -> Self {\n value::ScopeValue(value::FieldValues {\n fields: val\n .fields\n .iter()\n .map(|f| value::Value::from_alternative_ref(f.get().unwrap()))\n .collect(),\n })\n }\n}\n\nimpl From for value::ScopeValue {\n fn from(val: ScopeValueBuilder) -> Self {\n value::ScopeValue(value::FieldValues {\n fields: val\n .fields\n .into_iter()\n .map(|f| value::Value::from_alternative(f.into_inner().unwrap()))\n .collect(),\n })\n }\n}\n\nimpl ScopeValueBuilder {\n fn new(num_fields: usize) -> Self {\n let mut fields = Vec::with_capacity(num_fields);\n fields.resize_with(num_fields, OnceLock::new);\n Self { fields }\n }\n\n fn augmented_from(source: &value::ScopeValue, schema: &schema::TableSchema) -> Result {\n let val_index_base = if schema.has_key() { 1 } else { 0 };\n let len = schema.row.fields.len() - val_index_base;\n\n let mut builder = Self::new(len);\n\n let value::ScopeValue(source_fields) = source;\n for ((v, t), r) in source_fields\n .fields\n .iter()\n .zip(schema.row.fields[val_index_base..(val_index_base + len)].iter())\n .zip(&mut builder.fields)\n {\n r.set(augmented_value(v, &t.value_type.typ)?)\n .into_py_result()?;\n }\n Ok(builder)\n }\n}\n\nfn augmented_value(\n val: &value::Value,\n val_type: &schema::ValueType,\n) -> Result> {\n let value = match (val, val_type) {\n (value::Value::Null, _) => value::Value::Null,\n (value::Value::Basic(v), _) => value::Value::Basic(v.clone()),\n (value::Value::Struct(v), schema::ValueType::Struct(t)) => {\n value::Value::Struct(value::FieldValues {\n fields: v\n .fields\n .iter()\n .enumerate()\n .map(|(i, v)| augmented_value(v, &t.fields[i].value_type.typ))\n .collect::>>()?,\n })\n }\n (value::Value::UTable(v), schema::ValueType::Table(t)) => value::Value::UTable(\n v.iter()\n .map(|v| ScopeValueBuilder::augmented_from(v, t))\n .collect::>>()?,\n ),\n (value::Value::KTable(v), schema::ValueType::Table(t)) => value::Value::KTable(\n v.iter()\n .map(|(k, v)| Ok((k.clone(), ScopeValueBuilder::augmented_from(v, t)?)))\n .collect::>>()?,\n ),\n (value::Value::LTable(v), schema::ValueType::Table(t)) => value::Value::LTable(\n v.iter()\n .map(|v| ScopeValueBuilder::augmented_from(v, t))\n .collect::>>()?,\n ),\n (val, _) => bail!(\"Value kind doesn't match the type {val_type}: {val:?}\"),\n };\n Ok(value)\n}\n\nenum ScopeKey<'a> {\n /// For root struct and UTable.\n None,\n /// For KTable row.\n MapKey(&'a value::KeyValue),\n /// For LTable row.\n ListIndex(usize),\n}\n\nimpl<'a> ScopeKey<'a> {\n pub fn key(&self) -> Option> {\n match self {\n ScopeKey::None => None,\n ScopeKey::MapKey(k) => Some(Cow::Borrowed(k)),\n ScopeKey::ListIndex(i) => Some(Cow::Owned(value::KeyValue::Int64(*i as i64))),\n }\n }\n\n pub fn value_field_index_base(&self) -> u32 {\n match *self {\n ScopeKey::None => 0,\n ScopeKey::MapKey(_) => 1,\n ScopeKey::ListIndex(_) => 0,\n }\n }\n}\n\nimpl std::fmt::Display for ScopeKey<'_> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ScopeKey::None => write!(f, \"()\"),\n ScopeKey::MapKey(k) => write!(f, \"{{{k}}}\"),\n ScopeKey::ListIndex(i) => write!(f, \"[{i}]\"),\n }\n }\n}\n\nstruct ScopeEntry<'a> {\n key: ScopeKey<'a>,\n value: &'a ScopeValueBuilder,\n schema: &'a schema::StructSchema,\n collected_values: Vec>>,\n}\n\nimpl<'a> ScopeEntry<'a> {\n fn new(\n key: ScopeKey<'a>,\n value: &'a ScopeValueBuilder,\n schema: &'a schema::StructSchema,\n analyzed_op_scope: &AnalyzedOpScope,\n ) -> Self {\n let mut collected_values = Vec::with_capacity(analyzed_op_scope.collector_len);\n collected_values.resize_with(analyzed_op_scope.collector_len, Default::default);\n\n Self {\n key,\n value,\n schema,\n collected_values,\n }\n }\n\n fn get_local_field_schema<'b>(\n schema: &'b schema::StructSchema,\n indices: &[u32],\n ) -> Result<&'b schema::FieldSchema> {\n let field_idx = indices[0] as usize;\n let field_schema = &schema.fields[field_idx];\n let result = if indices.len() == 1 {\n field_schema\n } else {\n let struct_field_schema = match &field_schema.value_type.typ {\n schema::ValueType::Struct(s) => s,\n _ => bail!(\"Expect struct field\"),\n };\n Self::get_local_field_schema(struct_field_schema, &indices[1..])?\n };\n Ok(result)\n }\n\n fn get_local_key_field<'b>(\n key_val: &'b value::KeyValue,\n indices: &'_ [u32],\n ) -> &'b value::KeyValue {\n if indices.is_empty() {\n key_val\n } else if let value::KeyValue::Struct(fields) = key_val {\n Self::get_local_key_field(&fields[indices[0] as usize], &indices[1..])\n } else {\n panic!(\"Only struct can be accessed by sub field\");\n }\n }\n\n fn get_local_field<'b>(\n val: &'b value::Value,\n indices: &'_ [u32],\n ) -> &'b value::Value {\n if indices.is_empty() {\n val\n } else if let value::Value::Struct(fields) = val {\n Self::get_local_field(&fields.fields[indices[0] as usize], &indices[1..])\n } else {\n panic!(\"Only struct can be accessed by sub field\");\n }\n }\n\n fn get_value_field_builder(\n &self,\n field_ref: &AnalyzedLocalFieldReference,\n ) -> &value::Value {\n let first_index = field_ref.fields_idx[0];\n let index_base = self.key.value_field_index_base();\n let val = self.value.fields[(first_index - index_base) as usize]\n .get()\n .unwrap();\n Self::get_local_field(val, &field_ref.fields_idx[1..])\n }\n\n fn get_field(&self, field_ref: &AnalyzedLocalFieldReference) -> value::Value {\n let first_index = field_ref.fields_idx[0];\n let index_base = self.key.value_field_index_base();\n if first_index < index_base {\n let key_val = self.key.key().unwrap().into_owned();\n let key_part = Self::get_local_key_field(&key_val, &field_ref.fields_idx[1..]);\n key_part.clone().into()\n } else {\n let val = self.value.fields[(first_index - index_base) as usize]\n .get()\n .unwrap();\n let val_part = Self::get_local_field(val, &field_ref.fields_idx[1..]);\n value::Value::from_alternative_ref(val_part)\n }\n }\n\n fn get_field_schema(\n &self,\n field_ref: &AnalyzedLocalFieldReference,\n ) -> Result<&schema::FieldSchema> {\n Ok(Self::get_local_field_schema(\n self.schema,\n &field_ref.fields_idx,\n )?)\n }\n\n fn define_field_w_builder(\n &self,\n output_field: &AnalyzedOpOutput,\n val: value::Value,\n ) -> Result<()> {\n let field_index = output_field.field_idx as usize;\n let index_base = self.key.value_field_index_base() as usize;\n self.value.fields[field_index - index_base].set(val).map_err(|_| {\n anyhow!(\"Field {field_index} for scope is already set, violating single-definition rule.\")\n })?;\n Ok(())\n }\n\n fn define_field(&self, output_field: &AnalyzedOpOutput, val: &value::Value) -> Result<()> {\n let field_index = output_field.field_idx as usize;\n let field_schema = &self.schema.fields[field_index];\n let val = augmented_value(val, &field_schema.value_type.typ)?;\n self.define_field_w_builder(output_field, val)?;\n Ok(())\n }\n}\n\nfn assemble_value(\n value_mapping: &AnalyzedValueMapping,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n) -> value::Value {\n match value_mapping {\n AnalyzedValueMapping::Constant { value } => value.clone(),\n AnalyzedValueMapping::Field(field_ref) => scoped_entries\n .headn(field_ref.scope_up_level as usize)\n .unwrap()\n .get_field(&field_ref.local),\n AnalyzedValueMapping::Struct(mapping) => {\n let fields = mapping\n .fields\n .iter()\n .map(|f| assemble_value(f, scoped_entries))\n .collect();\n value::Value::Struct(value::FieldValues { fields })\n }\n }\n}\n\nfn assemble_input_values<'a>(\n value_mappings: &'a [AnalyzedValueMapping],\n scoped_entries: RefList<'a, &ScopeEntry<'a>>,\n) -> impl Iterator + 'a {\n value_mappings\n .iter()\n .map(move |value_mapping| assemble_value(value_mapping, scoped_entries))\n}\n\nasync fn evaluate_child_op_scope(\n op_scope: &AnalyzedOpScope,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n child_scope_entry: ScopeEntry<'_>,\n concurrency_controller: &concur_control::ConcurrencyController,\n memory: &EvaluationMemory,\n) -> Result<()> {\n let _permit = concurrency_controller\n .acquire(Some(|| {\n child_scope_entry\n .value\n .fields\n .iter()\n .map(|f| f.get().map_or(0, |v| v.estimated_byte_size()))\n .sum()\n }))\n .await?;\n evaluate_op_scope(op_scope, scoped_entries.prepend(&child_scope_entry), memory)\n .await\n .with_context(|| {\n format!(\n \"Evaluating in scope with key {}\",\n match child_scope_entry.key.key() {\n Some(k) => k.to_string(),\n None => \"()\".to_string(),\n }\n )\n })\n}\n\nasync fn evaluate_op_scope(\n op_scope: &AnalyzedOpScope,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n memory: &EvaluationMemory,\n) -> Result<()> {\n let head_scope = *scoped_entries.head().unwrap();\n for reactive_op in op_scope.reactive_ops.iter() {\n match reactive_op {\n AnalyzedReactiveOp::Transform(op) => {\n let mut input_values = Vec::with_capacity(op.inputs.len());\n input_values\n .extend(assemble_input_values(&op.inputs, scoped_entries).collect::>());\n let output_value_cell = memory.get_cache_entry(\n || {\n Ok(op\n .function_exec_info\n .fingerprinter\n .clone()\n .with(&input_values)?\n .into_fingerprint())\n },\n &op.function_exec_info.output_type,\n /*ttl=*/ None,\n )?;\n let output_value = evaluate_with_cell(output_value_cell.as_ref(), move || {\n op.executor.evaluate(input_values)\n })\n .await\n .with_context(|| format!(\"Evaluating Transform op `{}`\", op.name,))?;\n head_scope.define_field(&op.output, &output_value)?;\n }\n\n AnalyzedReactiveOp::ForEach(op) => {\n let target_field_schema = head_scope.get_field_schema(&op.local_field_ref)?;\n let table_schema = match &target_field_schema.value_type.typ {\n schema::ValueType::Table(cs) => cs,\n _ => bail!(\"Expect target field to be a table\"),\n };\n\n let target_field = head_scope.get_value_field_builder(&op.local_field_ref);\n let task_futs = match target_field {\n value::Value::UTable(v) => v\n .iter()\n .map(|item| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::None,\n item,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n value::Value::KTable(v) => v\n .iter()\n .map(|(k, v)| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::MapKey(k),\n v,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n value::Value::LTable(v) => v\n .iter()\n .enumerate()\n .map(|(i, item)| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::ListIndex(i),\n item,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n _ => {\n bail!(\"Target field type is expected to be a table\");\n }\n };\n try_join_all(task_futs)\n .await\n .with_context(|| format!(\"Evaluating ForEach op `{}`\", op.name,))?;\n }\n\n AnalyzedReactiveOp::Collect(op) => {\n let mut field_values = Vec::with_capacity(\n op.input.fields.len() + if op.has_auto_uuid_field { 1 } else { 0 },\n );\n let field_values_iter = assemble_input_values(&op.input.fields, scoped_entries);\n if op.has_auto_uuid_field {\n field_values.push(value::Value::Null);\n field_values.extend(field_values_iter);\n let uuid = memory.next_uuid(\n op.fingerprinter\n .clone()\n .with(&field_values[1..])?\n .into_fingerprint(),\n )?;\n field_values[0] = value::Value::Basic(value::BasicValue::Uuid(uuid));\n } else {\n field_values.extend(field_values_iter);\n };\n let collector_entry = scoped_entries\n .headn(op.collector_ref.scope_up_level as usize)\n .ok_or_else(|| anyhow::anyhow!(\"Collector level out of bound\"))?;\n {\n let mut collected_records = collector_entry.collected_values\n [op.collector_ref.local.collector_idx as usize]\n .lock()\n .unwrap();\n collected_records.push(value::FieldValues {\n fields: field_values,\n });\n }\n }\n }\n }\n Ok(())\n}\n\npub struct SourceRowEvaluationContext<'a> {\n pub plan: &'a ExecutionPlan,\n pub import_op: &'a AnalyzedImportOp,\n pub schema: &'a schema::FlowSchema,\n pub key: &'a value::KeyValue,\n pub import_op_idx: usize,\n}\n\n#[derive(Debug)]\npub struct EvaluateSourceEntryOutput {\n pub data_scope: ScopeValueBuilder,\n pub collected_values: Vec>,\n}\n\npub async fn evaluate_source_entry(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n source_value: value::FieldValues,\n memory: &EvaluationMemory,\n) -> Result {\n let _permit = src_eval_ctx\n .import_op\n .concurrency_controller\n .acquire_bytes_with_reservation(|| source_value.estimated_byte_size())\n .await?;\n let root_schema = &src_eval_ctx.schema.schema;\n let root_scope_value = ScopeValueBuilder::new(root_schema.fields.len());\n let root_scope_entry = ScopeEntry::new(\n ScopeKey::None,\n &root_scope_value,\n root_schema,\n &src_eval_ctx.plan.op_scope,\n );\n\n let table_schema = match &root_schema.fields[src_eval_ctx.import_op.output.field_idx as usize]\n .value_type\n .typ\n {\n schema::ValueType::Table(cs) => cs,\n _ => {\n bail!(\"Expect source output to be a table\")\n }\n };\n\n let scope_value =\n ScopeValueBuilder::augmented_from(&value::ScopeValue(source_value), table_schema)?;\n root_scope_entry.define_field_w_builder(\n &src_eval_ctx.import_op.output,\n value::Value::KTable(BTreeMap::from([(src_eval_ctx.key.clone(), scope_value)])),\n )?;\n\n evaluate_op_scope(\n &src_eval_ctx.plan.op_scope,\n RefList::Nil.prepend(&root_scope_entry),\n memory,\n )\n .await?;\n let collected_values = root_scope_entry\n .collected_values\n .into_iter()\n .map(|v| v.into_inner().unwrap())\n .collect::>();\n Ok(EvaluateSourceEntryOutput {\n data_scope: root_scope_value,\n collected_values,\n })\n}\n\npub async fn evaluate_transient_flow(\n flow: &AnalyzedTransientFlow,\n input_values: &Vec,\n) -> Result {\n let root_schema = &flow.data_schema.schema;\n let root_scope_value = ScopeValueBuilder::new(root_schema.fields.len());\n let root_scope_entry = ScopeEntry::new(\n ScopeKey::None,\n &root_scope_value,\n root_schema,\n &flow.execution_plan.op_scope,\n );\n\n if input_values.len() != flow.execution_plan.input_fields.len() {\n bail!(\n \"Input values length mismatch: expect {}, got {}\",\n flow.execution_plan.input_fields.len(),\n input_values.len()\n );\n }\n for (field, value) in flow.execution_plan.input_fields.iter().zip(input_values) {\n root_scope_entry.define_field(field, value)?;\n }\n let eval_memory = EvaluationMemory::new(\n chrono::Utc::now(),\n None,\n EvaluationMemoryOptions {\n enable_cache: false,\n evaluation_only: true,\n },\n );\n evaluate_op_scope(\n &flow.execution_plan.op_scope,\n RefList::Nil.prepend(&root_scope_entry),\n &eval_memory,\n )\n .await?;\n let output_value = assemble_value(\n &flow.execution_plan.output_value,\n RefList::Nil.prepend(&root_scope_entry),\n );\n Ok(output_value)\n}\n"], ["/cocoindex/src/base/spec.rs", "use crate::prelude::*;\n\nuse super::schema::{EnrichedValueType, FieldSchema};\nuse serde::{Deserialize, Serialize};\nuse std::fmt;\nuse std::ops::Deref;\n\n/// OutputMode enum for displaying spec info in different granularity\n#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"lowercase\")]\npub enum OutputMode {\n Concise,\n Verbose,\n}\n\n/// Formatting spec per output mode\npub trait SpecFormatter {\n fn format(&self, mode: OutputMode) -> String;\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum SpecString {\n /// The value comes from the environment variable.\n Env(String),\n /// The value is defined by the literal string.\n #[serde(untagged)]\n Literal(String),\n}\n\npub type ScopeName = String;\n\n/// Used to identify a data field within a flow.\n/// Within a flow, in each specific scope, each field name must be unique.\n/// - A field is defined by `outputs` of an operation. There must be exactly one definition for each field.\n/// - A field can be used as an input for multiple operations.\npub type FieldName = String;\n\npub const ROOT_SCOPE_NAME: &str = \"_root\";\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Default)]\npub struct FieldPath(pub Vec);\n\nimpl Deref for FieldPath {\n type Target = Vec;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl fmt::Display for FieldPath {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if self.is_empty() {\n write!(f, \"*\")\n } else {\n write!(f, \"{}\", self.join(\".\"))\n }\n }\n}\n\n/// Used to identify an input or output argument for an operator.\n/// Useful to identify different inputs/outputs of the same operation. Usually omitted for operations with the same purpose of input/output.\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]\npub struct OpArgName(pub Option);\n\nimpl fmt::Display for OpArgName {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if let Some(arg_name) = &self.0 {\n write!(f, \"${arg_name}\")\n } else {\n write!(f, \"?\")\n }\n }\n}\n\nimpl OpArgName {\n pub fn is_unnamed(&self) -> bool {\n self.0.is_none()\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct NamedSpec {\n pub name: String,\n\n #[serde(flatten)]\n pub spec: T,\n}\n\nimpl fmt::Display for NamedSpec {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}: {}\", self.name, self.spec)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FieldMapping {\n /// If unspecified, means the current scope.\n /// \"_root\" refers to the top-level scope.\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub scope: Option,\n\n pub field_path: FieldPath,\n}\n\nimpl fmt::Display for FieldMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let scope = self.scope.as_deref().unwrap_or(\"\");\n write!(\n f,\n \"{}{}\",\n if scope.is_empty() {\n \"\".to_string()\n } else {\n format!(\"{scope}.\")\n },\n self.field_path\n )\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ConstantMapping {\n pub schema: EnrichedValueType,\n pub value: serde_json::Value,\n}\n\nimpl fmt::Display for ConstantMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let value = serde_json::to_string(&self.value).unwrap_or(\"#serde_error\".to_string());\n write!(f, \"{value}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CollectionMapping {\n pub field: FieldMapping,\n pub scope_name: ScopeName,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StructMapping {\n pub fields: Vec>,\n}\n\nimpl fmt::Display for StructMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let fields = self\n .fields\n .iter()\n .map(|field| field.name.clone())\n .collect::>()\n .join(\",\");\n write!(f, \"{fields}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum ValueMapping {\n Constant(ConstantMapping),\n Field(FieldMapping),\n Struct(StructMapping),\n // TODO: Add support for collections\n}\n\nimpl ValueMapping {\n pub fn is_entire_scope(&self) -> bool {\n match self {\n ValueMapping::Field(FieldMapping {\n scope: None,\n field_path,\n }) => field_path.is_empty(),\n _ => false,\n }\n }\n}\n\nimpl std::fmt::Display for ValueMapping {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result {\n match self {\n ValueMapping::Constant(v) => write!(\n f,\n \"{}\",\n serde_json::to_string(&v.value)\n .unwrap_or_else(|_| \"#(invalid json value)\".to_string())\n ),\n ValueMapping::Field(v) => {\n write!(f, \"{}.{}\", v.scope.as_deref().unwrap_or(\"\"), v.field_path)\n }\n ValueMapping::Struct(v) => write!(\n f,\n \"Struct({})\",\n v.fields\n .iter()\n .map(|f| format!(\"{}={}\", f.name, f.spec))\n .collect::>()\n .join(\", \")\n ),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct OpArgBinding {\n #[serde(default, skip_serializing_if = \"OpArgName::is_unnamed\")]\n pub arg_name: OpArgName,\n\n #[serde(flatten)]\n pub value: ValueMapping,\n}\n\nimpl fmt::Display for OpArgBinding {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if self.arg_name.is_unnamed() {\n write!(f, \"{}\", self.value)\n } else {\n write!(f, \"{}={}\", self.arg_name, self.value)\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct OpSpec {\n pub kind: String,\n #[serde(flatten, default)]\n pub spec: serde_json::Map,\n}\n\nimpl SpecFormatter for OpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match mode {\n OutputMode::Concise => self.kind.clone(),\n OutputMode::Verbose => {\n let spec_str = serde_json::to_string_pretty(&self.spec)\n .map(|s| {\n let lines: Vec<&str> = s.lines().collect();\n if lines.len() < s.lines().count() {\n lines\n .into_iter()\n .chain([\"...\"])\n .collect::>()\n .join(\"\\n \")\n } else {\n lines.join(\"\\n \")\n }\n })\n .unwrap_or(\"#serde_error\".to_string());\n format!(\"{}({})\", self.kind, spec_str)\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct ExecutionOptions {\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub max_inflight_rows: Option,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub max_inflight_bytes: Option,\n}\n\nimpl ExecutionOptions {\n pub fn get_concur_control_options(&self) -> concur_control::Options {\n concur_control::Options {\n max_inflight_rows: self.max_inflight_rows,\n max_inflight_bytes: self.max_inflight_bytes,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct SourceRefreshOptions {\n pub refresh_interval: Option,\n}\n\nimpl fmt::Display for SourceRefreshOptions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let refresh = self\n .refresh_interval\n .map(|d| format!(\"{d:?}\"))\n .unwrap_or(\"none\".to_string());\n write!(f, \"{refresh}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ImportOpSpec {\n pub source: OpSpec,\n\n #[serde(default)]\n pub refresh_options: SourceRefreshOptions,\n\n #[serde(default)]\n pub execution_options: ExecutionOptions,\n}\n\nimpl SpecFormatter for ImportOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let source = self.source.format(mode);\n format!(\"source={}, refresh={}\", source, self.refresh_options)\n }\n}\n\nimpl fmt::Display for ImportOpSpec {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}\", self.format(OutputMode::Concise))\n }\n}\n\n/// Transform data using a given operator.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TransformOpSpec {\n pub inputs: Vec,\n pub op: OpSpec,\n}\n\nimpl SpecFormatter for TransformOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let inputs = self\n .inputs\n .iter()\n .map(ToString::to_string)\n .collect::>()\n .join(\",\");\n let op_str = self.op.format(mode);\n match mode {\n OutputMode::Concise => format!(\"op={op_str}, inputs={inputs}\"),\n OutputMode::Verbose => format!(\"op={op_str}, inputs=[{inputs}]\"),\n }\n }\n}\n\n/// Apply reactive operations to each row of the input field.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ForEachOpSpec {\n /// Mapping that provides a table to apply reactive operations to.\n pub field_path: FieldPath,\n pub op_scope: ReactiveOpScope,\n\n #[serde(default)]\n pub execution_options: ExecutionOptions,\n}\n\nimpl ForEachOpSpec {\n pub fn get_label(&self) -> String {\n format!(\"Loop over {}\", self.field_path)\n }\n}\n\nimpl SpecFormatter for ForEachOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match mode {\n OutputMode::Concise => self.get_label(),\n OutputMode::Verbose => format!(\"field={}\", self.field_path),\n }\n }\n}\n\n/// Emit data to a given collector at the given scope.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CollectOpSpec {\n /// Field values to be collected.\n pub input: StructMapping,\n /// Scope for the collector.\n pub scope_name: ScopeName,\n /// Name of the collector.\n pub collector_name: FieldName,\n /// If specified, the collector will have an automatically generated UUID field with the given name.\n /// The uuid will remain stable when collected input values remain unchanged.\n pub auto_uuid_field: Option,\n}\n\nimpl SpecFormatter for CollectOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let uuid = self.auto_uuid_field.as_deref().unwrap_or(\"none\");\n match mode {\n OutputMode::Concise => {\n format!(\n \"collector={}, input={}, uuid={}\",\n self.collector_name, self.input, uuid\n )\n }\n OutputMode::Verbose => {\n format!(\n \"scope={}, collector={}, input=[{}], uuid={}\",\n self.scope_name, self.collector_name, self.input, uuid\n )\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum VectorSimilarityMetric {\n CosineSimilarity,\n L2Distance,\n InnerProduct,\n}\n\nimpl fmt::Display for VectorSimilarityMetric {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n match self {\n VectorSimilarityMetric::CosineSimilarity => write!(f, \"Cosine\"),\n VectorSimilarityMetric::L2Distance => write!(f, \"L2\"),\n VectorSimilarityMetric::InnerProduct => write!(f, \"InnerProduct\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct VectorIndexDef {\n pub field_name: FieldName,\n pub metric: VectorSimilarityMetric,\n}\n\nimpl fmt::Display for VectorIndexDef {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}:{}\", self.field_name, self.metric)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct IndexOptions {\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub primary_key_fields: Option>,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n pub vector_indexes: Vec,\n}\n\nimpl IndexOptions {\n pub fn primary_key_fields(&self) -> Result<&[FieldName]> {\n Ok(self\n .primary_key_fields\n .as_ref()\n .ok_or(api_error!(\"Primary key fields are not set\"))?\n .as_ref())\n }\n}\n\nimpl fmt::Display for IndexOptions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let primary_keys = self\n .primary_key_fields\n .as_ref()\n .map(|p| p.join(\",\"))\n .unwrap_or_default();\n let vector_indexes = self\n .vector_indexes\n .iter()\n .map(|v| v.to_string())\n .collect::>()\n .join(\",\");\n write!(f, \"keys={primary_keys}, indexes={vector_indexes}\")\n }\n}\n\n/// Store data to a given sink.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ExportOpSpec {\n pub collector_name: FieldName,\n pub target: OpSpec,\n pub index_options: IndexOptions,\n pub setup_by_user: bool,\n}\n\nimpl SpecFormatter for ExportOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let target_str = self.target.format(mode);\n let base = format!(\n \"collector={}, target={}, {}\",\n self.collector_name, target_str, self.index_options\n );\n match mode {\n OutputMode::Concise => base,\n OutputMode::Verbose => format!(\"{}, setup_by_user={}\", base, self.setup_by_user),\n }\n }\n}\n\n/// A reactive operation reacts on given input values.\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"action\")]\npub enum ReactiveOpSpec {\n Transform(TransformOpSpec),\n ForEach(ForEachOpSpec),\n Collect(CollectOpSpec),\n}\n\nimpl SpecFormatter for ReactiveOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match self {\n ReactiveOpSpec::Transform(t) => format!(\"Transform: {}\", t.format(mode)),\n ReactiveOpSpec::ForEach(fe) => match mode {\n OutputMode::Concise => fe.get_label().to_string(),\n OutputMode::Verbose => format!(\"ForEach: {}\", fe.format(mode)),\n },\n ReactiveOpSpec::Collect(c) => format!(\"Collect: {}\", c.format(mode)),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ReactiveOpScope {\n pub name: ScopeName,\n pub ops: Vec>,\n // TODO: Suport collectors\n}\n\nimpl fmt::Display for ReactiveOpScope {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"Scope: name={}\", self.name)\n }\n}\n\n/// A flow defines the rule to sync data from given sources to given sinks with given transformations.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FlowInstanceSpec {\n /// Name of the flow instance.\n pub name: String,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub import_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub reactive_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub export_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub declarations: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TransientFlowSpec {\n pub name: String,\n pub input_fields: Vec,\n pub reactive_ops: Vec>,\n pub output_value: ValueMapping,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SimpleSemanticsQueryHandlerSpec {\n pub name: String,\n pub flow_instance_name: String,\n pub export_target_name: String,\n pub query_transform_flow: TransientFlowSpec,\n pub default_similarity_metric: VectorSimilarityMetric,\n}\n\npub struct AuthEntryReference {\n pub key: String,\n _phantom: std::marker::PhantomData,\n}\n\nimpl fmt::Debug for AuthEntryReference {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"AuthEntryReference({})\", self.key)\n }\n}\n\nimpl fmt::Display for AuthEntryReference {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"AuthEntryReference({})\", self.key)\n }\n}\n\nimpl Clone for AuthEntryReference {\n fn clone(&self) -> Self {\n Self {\n key: self.key.clone(),\n _phantom: std::marker::PhantomData,\n }\n }\n}\n\n#[derive(Serialize, Deserialize)]\nstruct UntypedAuthEntryReference {\n key: T,\n}\n\nimpl Serialize for AuthEntryReference {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n UntypedAuthEntryReference { key: &self.key }.serialize(serializer)\n }\n}\n\nimpl<'de, T> Deserialize<'de> for AuthEntryReference {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let untyped_ref = UntypedAuthEntryReference::::deserialize(deserializer)?;\n Ok(AuthEntryReference {\n key: untyped_ref.key,\n _phantom: std::marker::PhantomData,\n })\n }\n}\n\nimpl PartialEq for AuthEntryReference {\n fn eq(&self, other: &Self) -> bool {\n self.key == other.key\n }\n}\n\nimpl Eq for AuthEntryReference {}\n\nimpl std::hash::Hash for AuthEntryReference {\n fn hash(&self, state: &mut H) {\n self.key.hash(state);\n }\n}\n"], ["/cocoindex/src/llm/litellm.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_litellm(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"http://127.0.0.1:4000\".to_string());\n let api_key = std::env::var(\"LITELLM_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/ops/functions/parse_json.rs", "use crate::ops::sdk::*;\nuse anyhow::Result;\nuse std::collections::HashMap;\nuse std::sync::{Arc, LazyLock};\nuse unicase::UniCase;\n\npub struct Args {\n text: ResolvedOpArg,\n language: Option,\n}\n\ntype ParseFn = fn(&str) -> Result;\nstruct LanguageConfig {\n parse_fn: ParseFn,\n}\n\nfn add_language(\n output: &mut HashMap, Arc>,\n name: &'static str,\n aliases: impl IntoIterator,\n parse_fn: ParseFn,\n) {\n let lang_config = Arc::new(LanguageConfig { parse_fn });\n for name in std::iter::once(name).chain(aliases.into_iter()) {\n if output.insert(name.into(), lang_config.clone()).is_some() {\n panic!(\"Language `{name}` already exists\");\n }\n }\n}\n\nfn parse_json(text: &str) -> Result {\n Ok(serde_json::from_str(text)?)\n}\n\nstatic PARSE_FN_BY_LANG: LazyLock, Arc>> =\n LazyLock::new(|| {\n let mut map = HashMap::new();\n add_language(&mut map, \"json\", [\".json\"], parse_json);\n map\n });\n\nstruct Executor {\n args: Args,\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n async fn evaluate(&self, input: Vec) -> Result {\n let text = self.args.text.value(&input)?.as_str()?;\n let lang_config = {\n let language = self.args.language.value(&input)?;\n language\n .optional()\n .map(|v| anyhow::Ok(v.as_str()?.as_ref()))\n .transpose()?\n .and_then(|lang| PARSE_FN_BY_LANG.get(&UniCase::new(lang)))\n };\n let parse_fn = lang_config.map(|c| c.parse_fn).unwrap_or(parse_json);\n let parsed_value = parse_fn(text)?;\n Ok(value::Value::Basic(value::BasicValue::Json(Arc::new(\n parsed_value,\n ))))\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = EmptySpec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"ParseJson\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n _spec: &'a EmptySpec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n language: args_resolver\n .next_optional_arg(\"language\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n };\n\n let output_schema = make_output_type(BasicValueType::Json);\n Ok((args, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n _spec: EmptySpec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor { args }))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n use serde_json::json;\n\n #[tokio::test]\n async fn test_parse_json() {\n let spec = EmptySpec {};\n\n let factory = Arc::new(Factory);\n let json_string_content = r#\"{\"city\": \"Magdeburg\"}\"#;\n let lang_value: Value = \"json\".to_string().into();\n\n let input_args_values = vec![json_string_content.to_string().into(), lang_value.clone()];\n\n let input_arg_schemas = vec![\n build_arg_schema(\"text\", BasicValueType::Str),\n build_arg_schema(\"language\", BasicValueType::Str),\n ];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed: {:?}\",\n result.err()\n );\n let value = result.unwrap();\n\n match value {\n Value::Basic(BasicValue::Json(arc_json_value)) => {\n let expected_json = json!({\"city\": \"Magdeburg\"});\n assert_eq!(\n *arc_json_value, expected_json,\n \"Parsed JSON value mismatch with specified language\"\n );\n }\n _ => panic!(\"Expected Value::Basic(BasicValue::Json), got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/setup/driver.rs", "use crate::{\n lib_context::{FlowContext, FlowExecutionContext, LibSetupContext},\n ops::{\n get_optional_executor_factory,\n interface::{ExportTargetFactory, FlowInstanceContext},\n },\n prelude::*,\n};\n\nuse sqlx::PgPool;\nuse std::{\n fmt::{Debug, Display},\n str::FromStr,\n};\n\nuse super::{AllSetupStates, GlobalSetupStatus};\nuse super::{\n CombinedState, DesiredMode, ExistingMode, FlowSetupState, FlowSetupStatus, ObjectSetupStatus,\n ObjectStatus, ResourceIdentifier, ResourceSetupInfo, ResourceSetupStatus, SetupChangeType,\n StateChange, TargetSetupState, db_metadata,\n};\nuse crate::execution::db_tracking_setup;\nuse crate::ops::interface::ExecutorFactory;\nuse std::fmt::Write;\n\nenum MetadataRecordType {\n FlowVersion,\n FlowMetadata,\n TrackingTable,\n Target(String),\n}\n\nimpl Display for MetadataRecordType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n MetadataRecordType::FlowVersion => f.write_str(db_metadata::FLOW_VERSION_RESOURCE_TYPE),\n MetadataRecordType::FlowMetadata => write!(f, \"FlowMetadata\"),\n MetadataRecordType::TrackingTable => write!(f, \"TrackingTable\"),\n MetadataRecordType::Target(target_id) => write!(f, \"Target:{target_id}\"),\n }\n }\n}\n\nimpl std::str::FromStr for MetadataRecordType {\n type Err = anyhow::Error;\n\n fn from_str(s: &str) -> Result {\n if s == db_metadata::FLOW_VERSION_RESOURCE_TYPE {\n Ok(Self::FlowVersion)\n } else if s == \"FlowMetadata\" {\n Ok(Self::FlowMetadata)\n } else if s == \"TrackingTable\" {\n Ok(Self::TrackingTable)\n } else if let Some(target_id) = s.strip_prefix(\"Target:\") {\n Ok(Self::Target(target_id.to_string()))\n } else {\n anyhow::bail!(\"Invalid MetadataRecordType string: {}\", s)\n }\n }\n}\n\nfn from_metadata_record(\n state: Option,\n staging_changes: sqlx::types::Json>>,\n legacy_state_key: Option,\n) -> Result> {\n let current: Option = state.map(serde_json::from_value).transpose()?;\n let staging: Vec> = (staging_changes.0.into_iter())\n .map(|sc| -> Result<_> {\n Ok(match sc {\n StateChange::Upsert(v) => StateChange::Upsert(serde_json::from_value(v)?),\n StateChange::Delete => StateChange::Delete,\n })\n })\n .collect::>()?;\n Ok(CombinedState {\n current,\n staging,\n legacy_state_key,\n })\n}\n\nfn get_export_target_factory(\n target_type: &str,\n) -> Option> {\n match get_optional_executor_factory(target_type) {\n Some(ExecutorFactory::ExportTarget(factory)) => Some(factory),\n _ => None,\n }\n}\n\npub async fn get_existing_setup_state(pool: &PgPool) -> Result> {\n let setup_metadata_records = db_metadata::read_setup_metadata(pool).await?;\n\n let setup_metadata_records = if let Some(records) = setup_metadata_records {\n records\n } else {\n return Ok(AllSetupStates::default());\n };\n\n // Group setup metadata records by flow name\n let setup_metadata_records = setup_metadata_records.into_iter().fold(\n BTreeMap::>::new(),\n |mut acc, record| {\n acc.entry(record.flow_name.clone())\n .or_default()\n .push(record);\n acc\n },\n );\n\n let flows = setup_metadata_records\n .into_iter()\n .map(|(flow_name, metadata_records)| -> anyhow::Result<_> {\n let mut flow_ss = FlowSetupState::default();\n for metadata_record in metadata_records {\n let state = metadata_record.state;\n let staging_changes = metadata_record.staging_changes;\n match MetadataRecordType::from_str(&metadata_record.resource_type)? {\n MetadataRecordType::FlowVersion => {\n flow_ss.seen_flow_metadata_version =\n db_metadata::parse_flow_version(&state);\n }\n MetadataRecordType::FlowMetadata => {\n flow_ss.metadata = from_metadata_record(state, staging_changes, None)?;\n }\n MetadataRecordType::TrackingTable => {\n flow_ss.tracking_table =\n from_metadata_record(state, staging_changes, None)?;\n }\n MetadataRecordType::Target(target_type) => {\n let normalized_key = {\n if let Some(factory) = get_export_target_factory(&target_type) {\n factory.normalize_setup_key(&metadata_record.key)?\n } else {\n metadata_record.key.clone()\n }\n };\n let combined_state = from_metadata_record(\n state,\n staging_changes,\n (normalized_key != metadata_record.key).then_some(metadata_record.key),\n )?;\n flow_ss.targets.insert(\n super::ResourceIdentifier {\n key: normalized_key,\n target_kind: target_type,\n },\n combined_state,\n );\n }\n }\n }\n Ok((flow_name, flow_ss))\n })\n .collect::>()?;\n\n Ok(AllSetupStates {\n has_metadata_table: true,\n flows,\n })\n}\n\nfn diff_state(\n existing_state: Option<&E>,\n desired_state: Option<&D>,\n diff: impl Fn(Option<&E>, &D) -> Option>,\n) -> Option>\nwhere\n E: PartialEq,\n{\n match (existing_state, desired_state) {\n (None, None) => None,\n (Some(_), None) => Some(StateChange::Delete),\n (existing_state, Some(desired_state)) => {\n if existing_state.map(|e| e == desired_state).unwrap_or(false) {\n None\n } else {\n diff(existing_state, desired_state)\n }\n }\n }\n}\n\nfn to_object_status(existing: Option, desired: Option) -> Option {\n Some(match (&existing, &desired) {\n (Some(_), None) => ObjectStatus::Deleted,\n (None, Some(_)) => ObjectStatus::New,\n (Some(_), Some(_)) => ObjectStatus::Existing,\n (None, None) => return None,\n })\n}\n\n#[derive(Debug, Default)]\nstruct GroupedResourceStates {\n desired: Option,\n existing: CombinedState,\n}\n\nfn group_resource_states<'a>(\n desired: impl Iterator,\n existing: impl Iterator)>,\n) -> Result> {\n let mut grouped: IndexMap<&'a ResourceIdentifier, GroupedResourceStates> = desired\n .into_iter()\n .map(|(key, state)| {\n (\n key,\n GroupedResourceStates {\n desired: Some(state.clone()),\n existing: CombinedState::default(),\n },\n )\n })\n .collect();\n for (key, state) in existing {\n let entry = grouped.entry(key);\n if state.current.is_some() {\n if let indexmap::map::Entry::Occupied(entry) = &entry {\n if entry.get().existing.current.is_some() {\n bail!(\"Duplicate existing state for key: {}\", entry.key());\n }\n }\n }\n let entry = entry.or_default();\n if let Some(current) = &state.current {\n entry.existing.current = Some(current.clone());\n }\n if let Some(legacy_state_key) = &state.legacy_state_key {\n if entry\n .existing\n .legacy_state_key\n .as_ref()\n .is_some_and(|v| v != legacy_state_key)\n {\n warn!(\n \"inconsistent legacy key: {:?}, {:?}\",\n key, entry.existing.legacy_state_key\n );\n }\n entry.existing.legacy_state_key = Some(legacy_state_key.clone());\n }\n for s in state.staging.iter() {\n match s {\n StateChange::Upsert(v) => {\n entry.existing.staging.push(StateChange::Upsert(v.clone()))\n }\n StateChange::Delete => entry.existing.staging.push(StateChange::Delete),\n }\n }\n }\n Ok(grouped)\n}\n\npub async fn check_flow_setup_status(\n desired_state: Option<&FlowSetupState>,\n existing_state: Option<&FlowSetupState>,\n flow_instance_ctx: &Arc,\n) -> Result {\n let metadata_change = diff_state(\n existing_state.map(|e| &e.metadata),\n desired_state.map(|d| &d.metadata),\n |_, desired_state| Some(StateChange::Upsert(desired_state.clone())),\n );\n\n let new_source_ids = desired_state\n .iter()\n .flat_map(|d| d.metadata.sources.values().map(|v| v.source_id))\n .collect::>();\n let tracking_table_change = db_tracking_setup::TrackingTableSetupStatus::new(\n desired_state.map(|d| &d.tracking_table),\n &existing_state\n .map(|e| Cow::Borrowed(&e.tracking_table))\n .unwrap_or_default(),\n (existing_state.iter())\n .flat_map(|state| state.metadata.possible_versions())\n .flat_map(|metadata| {\n metadata\n .sources\n .values()\n .map(|v| v.source_id)\n .filter(|id| !new_source_ids.contains(id))\n })\n .collect::>()\n .into_iter()\n .collect(),\n );\n\n let mut target_resources = Vec::new();\n let mut unknown_resources = Vec::new();\n\n let grouped_target_resources = group_resource_states(\n desired_state.iter().flat_map(|d| d.targets.iter()),\n existing_state.iter().flat_map(|e| e.targets.iter()),\n )?;\n for (resource_id, v) in grouped_target_resources.into_iter() {\n let factory = match get_export_target_factory(&resource_id.target_kind) {\n Some(factory) => factory,\n None => {\n unknown_resources.push(resource_id.clone());\n continue;\n }\n };\n let state = v.desired.clone();\n let target_state = v\n .desired\n .and_then(|state| (!state.common.setup_by_user).then_some(state.state));\n let existing_without_setup_by_user = CombinedState {\n current: v\n .existing\n .current\n .and_then(|s| s.state_unless_setup_by_user()),\n staging: v\n .existing\n .staging\n .into_iter()\n .filter_map(|s| match s {\n StateChange::Upsert(s) => {\n s.state_unless_setup_by_user().map(StateChange::Upsert)\n }\n StateChange::Delete => Some(StateChange::Delete),\n })\n .collect(),\n legacy_state_key: v.existing.legacy_state_key.clone(),\n };\n let never_setup_by_sys = target_state.is_none()\n && existing_without_setup_by_user.current.is_none()\n && existing_without_setup_by_user.staging.is_empty();\n let setup_status = if never_setup_by_sys {\n None\n } else {\n Some(\n factory\n .check_setup_status(\n &resource_id.key,\n target_state,\n existing_without_setup_by_user,\n flow_instance_ctx.clone(),\n )\n .await?,\n )\n };\n target_resources.push(ResourceSetupInfo {\n key: resource_id.clone(),\n state,\n description: factory.describe_resource(&resource_id.key)?,\n setup_status,\n legacy_key: v\n .existing\n .legacy_state_key\n .map(|legacy_state_key| ResourceIdentifier {\n target_kind: resource_id.target_kind.clone(),\n key: legacy_state_key,\n }),\n });\n }\n Ok(FlowSetupStatus {\n status: to_object_status(existing_state, desired_state),\n seen_flow_metadata_version: existing_state.and_then(|s| s.seen_flow_metadata_version),\n metadata_change,\n tracking_table: tracking_table_change.map(|c| c.into_setup_info()),\n target_resources,\n unknown_resources,\n })\n}\n\nstruct ResourceSetupChangeItem<'a, K: 'a, C: ResourceSetupStatus> {\n key: &'a K,\n setup_status: &'a C,\n}\n\nasync fn maybe_update_resource_setup<\n 'a,\n K: 'a,\n S: 'a,\n C: ResourceSetupStatus,\n ChangeApplierResultFut: Future>,\n>(\n resource_kind: &str,\n write: &mut (dyn std::io::Write + Send),\n resources: impl Iterator>,\n apply_change: impl FnOnce(Vec>) -> ChangeApplierResultFut,\n) -> Result<()> {\n let mut changes = Vec::new();\n for resource in resources {\n if let Some(setup_status) = &resource.setup_status {\n if setup_status.change_type() != SetupChangeType::NoChange {\n changes.push(ResourceSetupChangeItem {\n key: &resource.key,\n setup_status,\n });\n writeln!(write, \"{}:\", resource.description)?;\n for change in setup_status.describe_changes() {\n match change {\n setup::ChangeDescription::Action(action) => {\n writeln!(write, \" - {action}\")?;\n }\n setup::ChangeDescription::Note(_) => {}\n }\n }\n }\n }\n }\n if !changes.is_empty() {\n write!(write, \"Pushing change for {resource_kind}...\")?;\n apply_change(changes).await?;\n writeln!(write, \"DONE\")?;\n }\n Ok(())\n}\n\nasync fn apply_changes_for_flow(\n write: &mut (dyn std::io::Write + Send),\n flow_ctx: &FlowContext,\n flow_status: &FlowSetupStatus,\n existing_setup_state: &mut Option>,\n pool: &PgPool,\n) -> Result<()> {\n let Some(status) = flow_status.status else {\n return Ok(());\n };\n let verb = match status {\n ObjectStatus::New => \"Creating\",\n ObjectStatus::Deleted => \"Deleting\",\n ObjectStatus::Existing => \"Updating resources for \",\n _ => bail!(\"invalid flow status\"),\n };\n write!(write, \"\\n{verb} flow {}:\\n\", flow_ctx.flow_name())?;\n\n let mut update_info =\n HashMap::::new();\n\n if let Some(metadata_change) = &flow_status.metadata_change {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::FlowMetadata.to_string(),\n serde_json::Value::Null,\n ),\n db_metadata::StateUpdateInfo::new(metadata_change.desired_state(), None)?,\n );\n }\n if let Some(tracking_table) = &flow_status.tracking_table {\n if tracking_table\n .setup_status\n .as_ref()\n .map(|c| c.change_type() != SetupChangeType::NoChange)\n .unwrap_or_default()\n {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::TrackingTable.to_string(),\n serde_json::Value::Null,\n ),\n db_metadata::StateUpdateInfo::new(tracking_table.state.as_ref(), None)?,\n );\n }\n }\n\n for target_resource in &flow_status.target_resources {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::Target(target_resource.key.target_kind.clone()).to_string(),\n target_resource.key.key.clone(),\n ),\n db_metadata::StateUpdateInfo::new(\n target_resource.state.as_ref(),\n target_resource.legacy_key.as_ref().map(|k| {\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::Target(k.target_kind.clone()).to_string(),\n k.key.clone(),\n )\n }),\n )?,\n );\n }\n\n let new_version_id = db_metadata::stage_changes_for_flow(\n flow_ctx.flow_name(),\n flow_status.seen_flow_metadata_version,\n &update_info,\n pool,\n )\n .await?;\n\n if let Some(tracking_table) = &flow_status.tracking_table {\n maybe_update_resource_setup(\n \"tracking table\",\n write,\n std::iter::once(tracking_table),\n |setup_status| setup_status[0].setup_status.apply_change(),\n )\n .await?;\n }\n\n let mut setup_status_by_target_kind = IndexMap::<&str, Vec<_>>::new();\n for target_resource in &flow_status.target_resources {\n setup_status_by_target_kind\n .entry(target_resource.key.target_kind.as_str())\n .or_default()\n .push(target_resource);\n }\n for (target_kind, resources) in setup_status_by_target_kind.into_iter() {\n maybe_update_resource_setup(\n target_kind,\n write,\n resources.into_iter(),\n |setup_status| async move {\n let factory = get_export_target_factory(target_kind).ok_or_else(|| {\n anyhow::anyhow!(\"No factory found for target kind: {}\", target_kind)\n })?;\n factory\n .apply_setup_changes(\n setup_status\n .into_iter()\n .map(|s| interface::ResourceSetupChangeItem {\n key: &s.key.key,\n setup_status: s.setup_status.as_ref(),\n })\n .collect(),\n flow_ctx.flow.flow_instance_ctx.clone(),\n )\n .await?;\n Ok(())\n },\n )\n .await?;\n }\n\n let is_deletion = status == ObjectStatus::Deleted;\n db_metadata::commit_changes_for_flow(\n flow_ctx.flow_name(),\n new_version_id,\n &update_info,\n is_deletion,\n pool,\n )\n .await?;\n if is_deletion {\n *existing_setup_state = None;\n } else {\n let (existing_metadata, existing_tracking_table, existing_targets) =\n match std::mem::take(existing_setup_state) {\n Some(s) => (Some(s.metadata), Some(s.tracking_table), s.targets),\n None => Default::default(),\n };\n let metadata = CombinedState::from_change(\n existing_metadata,\n flow_status\n .metadata_change\n .as_ref()\n .map(|v| v.desired_state()),\n );\n let tracking_table = CombinedState::from_change(\n existing_tracking_table,\n flow_status.tracking_table.as_ref().map(|c| {\n c.setup_status\n .as_ref()\n .and_then(|c| c.desired_state.as_ref())\n }),\n );\n let mut targets = existing_targets;\n for target_resource in &flow_status.target_resources {\n match &target_resource.state {\n Some(state) => {\n targets.insert(\n target_resource.key.clone(),\n CombinedState::from_desired(state.clone()),\n );\n }\n None => {\n targets.shift_remove(&target_resource.key);\n }\n }\n }\n *existing_setup_state = Some(setup::FlowSetupState {\n metadata,\n tracking_table,\n seen_flow_metadata_version: Some(new_version_id),\n targets,\n });\n }\n\n writeln!(write, \"Done for flow {}\", flow_ctx.flow_name())?;\n Ok(())\n}\n\nasync fn apply_global_changes(\n write: &mut (dyn std::io::Write + Send),\n setup_status: &GlobalSetupStatus,\n all_setup_states: &mut AllSetupStates,\n) -> Result<()> {\n maybe_update_resource_setup(\n \"metadata table\",\n write,\n std::iter::once(&setup_status.metadata_table),\n |setup_status| setup_status[0].setup_status.apply_change(),\n )\n .await?;\n\n if setup_status\n .metadata_table\n .setup_status\n .as_ref()\n .is_some_and(|c| c.change_type() == SetupChangeType::Create)\n {\n all_setup_states.has_metadata_table = true;\n }\n\n Ok(())\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum FlowSetupChangeAction {\n Setup,\n Drop,\n}\npub struct SetupChangeBundle {\n pub action: FlowSetupChangeAction,\n pub flow_names: Vec,\n}\n\nimpl SetupChangeBundle {\n async fn get_flow_setup_status<'a>(\n setup_ctx: &LibSetupContext,\n flow_ctx: &'a FlowContext,\n flow_exec_ctx: &'a FlowExecutionContext,\n action: &FlowSetupChangeAction,\n buffer: &'a mut Option,\n ) -> Result<&'a FlowSetupStatus> {\n let result = match action {\n FlowSetupChangeAction::Setup => &flow_exec_ctx.setup_status,\n FlowSetupChangeAction::Drop => {\n let existing_state = setup_ctx.all_setup_states.flows.get(flow_ctx.flow_name());\n buffer.insert(\n check_flow_setup_status(None, existing_state, &flow_ctx.flow.flow_instance_ctx)\n .await?,\n )\n }\n };\n Ok(result)\n }\n\n pub async fn describe(&self, lib_context: &LibContext) -> Result<(String, bool)> {\n let mut text = String::new();\n let mut is_up_to_date = true;\n\n let setup_ctx = lib_context\n .require_persistence_ctx()?\n .setup_ctx\n .read()\n .await;\n let setup_ctx = &*setup_ctx;\n\n if self.action == FlowSetupChangeAction::Setup {\n is_up_to_date = is_up_to_date && setup_ctx.global_setup_status.is_up_to_date();\n write!(&mut text, \"{}\", setup_ctx.global_setup_status)?;\n }\n\n for flow_name in &self.flow_names {\n let flow_ctx = {\n let flows = lib_context.flows.lock().unwrap();\n flows\n .get(flow_name)\n .ok_or_else(|| anyhow::anyhow!(\"Flow instance not found: {flow_name}\"))?\n .clone()\n };\n let flow_exec_ctx = flow_ctx.get_execution_ctx_for_setup().read().await;\n\n let mut setup_status_buffer = None;\n let setup_status = Self::get_flow_setup_status(\n setup_ctx,\n &flow_ctx,\n &flow_exec_ctx,\n &self.action,\n &mut setup_status_buffer,\n )\n .await?;\n\n is_up_to_date = is_up_to_date && setup_status.is_up_to_date();\n write!(\n &mut text,\n \"{}\",\n setup::FormattedFlowSetupStatus(flow_name, setup_status)\n )?;\n }\n Ok((text, is_up_to_date))\n }\n\n pub async fn apply(\n &self,\n lib_context: &LibContext,\n write: &mut (dyn std::io::Write + Send),\n ) -> Result<()> {\n let persistence_ctx = lib_context.require_persistence_ctx()?;\n let mut setup_ctx = persistence_ctx.setup_ctx.write().await;\n let setup_ctx = &mut *setup_ctx;\n\n if self.action == FlowSetupChangeAction::Setup\n && !setup_ctx.global_setup_status.is_up_to_date()\n {\n apply_global_changes(\n write,\n &setup_ctx.global_setup_status,\n &mut setup_ctx.all_setup_states,\n )\n .await?;\n setup_ctx.global_setup_status =\n GlobalSetupStatus::from_setup_states(&setup_ctx.all_setup_states);\n }\n\n for flow_name in &self.flow_names {\n let flow_ctx = {\n let flows = lib_context.flows.lock().unwrap();\n flows\n .get(flow_name)\n .ok_or_else(|| anyhow::anyhow!(\"Flow instance not found: {flow_name}\"))?\n .clone()\n };\n let mut flow_exec_ctx = flow_ctx.get_execution_ctx_for_setup().write().await;\n\n let mut setup_status_buffer = None;\n let setup_status = Self::get_flow_setup_status(\n setup_ctx,\n &flow_ctx,\n &flow_exec_ctx,\n &self.action,\n &mut setup_status_buffer,\n )\n .await?;\n if setup_status.is_up_to_date() {\n continue;\n }\n\n let mut flow_states = setup_ctx.all_setup_states.flows.remove(flow_name);\n apply_changes_for_flow(\n write,\n &flow_ctx,\n setup_status,\n &mut flow_states,\n &persistence_ctx.builtin_db_pool,\n )\n .await?;\n\n flow_exec_ctx\n .update_setup_state(&flow_ctx.flow, flow_states.as_ref())\n .await?;\n if let Some(flow_states) = flow_states {\n setup_ctx\n .all_setup_states\n .flows\n .insert(flow_name.to_string(), flow_states);\n }\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/py/mod.rs", "use crate::execution::evaluator::evaluate_transient_flow;\nuse crate::prelude::*;\n\nuse crate::base::schema::{FieldSchema, ValueType};\nuse crate::base::spec::{NamedSpec, OutputMode, ReactiveOpSpec, SpecFormatter};\nuse crate::lib_context::{clear_lib_context, get_auth_registry, init_lib_context};\nuse crate::ops::py_factory::{PyExportTargetFactory, PyOpArgSchema};\nuse crate::ops::{interface::ExecutorFactory, py_factory::PyFunctionFactory, register_factory};\nuse crate::server::{self, ServerSettings};\nuse crate::settings::Settings;\nuse crate::setup::{self};\nuse pyo3::IntoPyObjectExt;\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pyo3_async_runtimes::tokio::future_into_py;\nuse std::fmt::Write;\nuse std::sync::Arc;\n\nmod convert;\npub(crate) use convert::*;\n\npub struct PythonExecutionContext {\n pub event_loop: Py,\n}\n\nimpl PythonExecutionContext {\n pub fn new(_py: Python<'_>, event_loop: Py) -> Self {\n Self { event_loop }\n }\n}\n\npub trait ToResultWithPyTrace {\n fn to_result_with_py_trace(self, py: Python<'_>) -> anyhow::Result;\n}\n\nimpl ToResultWithPyTrace for Result {\n fn to_result_with_py_trace(self, py: Python<'_>) -> anyhow::Result {\n match self {\n Ok(value) => Ok(value),\n Err(err) => {\n let mut err_str = format!(\"Error calling Python function: {err}\");\n if let Some(tb) = err.traceback(py) {\n write!(&mut err_str, \"\\n{}\", tb.format()?)?;\n }\n Err(anyhow::anyhow!(err_str))\n }\n }\n }\n}\npub trait IntoPyResult {\n fn into_py_result(self) -> PyResult;\n}\n\nimpl IntoPyResult for Result {\n fn into_py_result(self) -> PyResult {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(PyException::new_err(format!(\"{err:?}\"))),\n }\n }\n}\n\n#[pyfunction]\nfn init(py: Python<'_>, settings: Pythonized) -> PyResult<()> {\n py.allow_threads(|| -> anyhow::Result<()> {\n init_lib_context(settings.into_inner())?;\n Ok(())\n })\n .into_py_result()\n}\n\n#[pyfunction]\nfn start_server(py: Python<'_>, settings: Pythonized) -> PyResult<()> {\n py.allow_threads(|| -> anyhow::Result<()> {\n let server = get_runtime().block_on(server::init_server(\n get_lib_context()?,\n settings.into_inner(),\n ))?;\n get_runtime().spawn(server);\n Ok(())\n })\n .into_py_result()\n}\n\n#[pyfunction]\nfn stop(py: Python<'_>) -> PyResult<()> {\n py.allow_threads(clear_lib_context);\n Ok(())\n}\n\n#[pyfunction]\nfn register_function_factory(name: String, py_function_factory: Py) -> PyResult<()> {\n let factory = PyFunctionFactory {\n py_function_factory,\n };\n register_factory(name, ExecutorFactory::SimpleFunction(Arc::new(factory))).into_py_result()\n}\n\n#[pyfunction]\nfn register_target_connector(name: String, py_target_connector: Py) -> PyResult<()> {\n let factory = PyExportTargetFactory {\n py_target_connector,\n };\n register_factory(name, ExecutorFactory::ExportTarget(Arc::new(factory))).into_py_result()\n}\n\n#[pyclass]\npub struct IndexUpdateInfo(pub execution::stats::IndexUpdateInfo);\n\n#[pymethods]\nimpl IndexUpdateInfo {\n pub fn __str__(&self) -> String {\n format!(\"{}\", self.0)\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\n#[pyclass]\npub struct Flow(pub Arc);\n\n/// A single line in the rendered spec, with hierarchical children\n#[pyclass(get_all, set_all)]\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct RenderedSpecLine {\n /// The formatted content of the line (e.g., \"Import: name=documents, source=LocalFile\")\n pub content: String,\n /// Child lines in the hierarchy\n pub children: Vec,\n}\n\n/// A rendered specification, grouped by sections\n#[pyclass(get_all, set_all)]\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct RenderedSpec {\n /// List of (section_name, lines) pairs\n pub sections: Vec<(String, Vec)>,\n}\n\n#[pyclass]\npub struct FlowLiveUpdaterUpdates(execution::FlowLiveUpdaterUpdates);\n\n#[pymethods]\nimpl FlowLiveUpdaterUpdates {\n #[getter]\n pub fn active_sources(&self) -> Vec {\n self.0.active_sources.clone()\n }\n\n #[getter]\n pub fn updated_sources(&self) -> Vec {\n self.0.updated_sources.clone()\n }\n}\n\n#[pyclass]\npub struct FlowLiveUpdater(pub Arc);\n\n#[pymethods]\nimpl FlowLiveUpdater {\n #[staticmethod]\n pub fn create<'py>(\n py: Python<'py>,\n flow: &Flow,\n options: Pythonized,\n ) -> PyResult> {\n let flow = flow.0.clone();\n future_into_py(py, async move {\n let lib_context = get_lib_context().into_py_result()?;\n let live_updater = execution::FlowLiveUpdater::start(\n flow,\n lib_context.require_builtin_db_pool().into_py_result()?,\n options.into_inner(),\n )\n .await\n .into_py_result()?;\n Ok(Self(Arc::new(live_updater)))\n })\n }\n\n pub fn wait_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let live_updater = self.0.clone();\n future_into_py(\n py,\n async move { live_updater.wait().await.into_py_result() },\n )\n }\n\n pub fn next_status_updates_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let live_updater = self.0.clone();\n future_into_py(py, async move {\n let updates = live_updater.next_status_updates().await.into_py_result()?;\n Ok(FlowLiveUpdaterUpdates(updates))\n })\n }\n\n pub fn abort(&self) {\n self.0.abort();\n }\n\n pub fn index_update_info(&self) -> IndexUpdateInfo {\n IndexUpdateInfo(self.0.index_update_info())\n }\n}\n\n#[pymethods]\nimpl Flow {\n pub fn __str__(&self) -> String {\n serde_json::to_string_pretty(&self.0.flow.flow_instance).unwrap()\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn name(&self) -> &str {\n &self.0.flow.flow_instance.name\n }\n\n pub fn evaluate_and_dump(\n &self,\n py: Python<'_>,\n options: Pythonized,\n ) -> PyResult<()> {\n py.allow_threads(|| {\n get_runtime()\n .block_on(async {\n let exec_plan = self.0.flow.get_execution_plan().await?;\n let lib_context = get_lib_context()?;\n let execution_ctx = self.0.use_execution_ctx().await?;\n execution::dumper::evaluate_and_dump(\n &exec_plan,\n &execution_ctx.setup_execution_context,\n &self.0.flow.data_schema,\n options.into_inner(),\n lib_context.require_builtin_db_pool()?,\n )\n .await\n })\n .into_py_result()?;\n Ok(())\n })\n }\n\n #[pyo3(signature = (output_mode=None))]\n pub fn get_spec(&self, output_mode: Option>) -> PyResult {\n let mode = output_mode.map_or(OutputMode::Concise, |m| m.into_inner());\n let spec = &self.0.flow.flow_instance;\n let mut sections: IndexMap> = IndexMap::new();\n\n // Sources\n sections.insert(\n \"Source\".to_string(),\n spec.import_ops\n .iter()\n .map(|op| RenderedSpecLine {\n content: format!(\"Import: name={}, {}\", op.name, op.spec.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n // Processing\n fn walk(op: &NamedSpec, mode: OutputMode) -> RenderedSpecLine {\n let content = format!(\"{}: {}\", op.name, op.spec.format(mode));\n\n let children = match &op.spec {\n ReactiveOpSpec::ForEach(fe) => fe\n .op_scope\n .ops\n .iter()\n .map(|nested| walk(nested, mode))\n .collect(),\n _ => vec![],\n };\n\n RenderedSpecLine { content, children }\n }\n\n sections.insert(\n \"Processing\".to_string(),\n spec.reactive_ops.iter().map(|op| walk(op, mode)).collect(),\n );\n\n // Targets\n sections.insert(\n \"Targets\".to_string(),\n spec.export_ops\n .iter()\n .map(|op| RenderedSpecLine {\n content: format!(\"Export: name={}, {}\", op.name, op.spec.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n // Declarations\n sections.insert(\n \"Declarations\".to_string(),\n spec.declarations\n .iter()\n .map(|decl| RenderedSpecLine {\n content: format!(\"Declaration: {}\", decl.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n Ok(RenderedSpec {\n sections: sections.into_iter().collect(),\n })\n }\n\n pub fn get_schema(&self) -> Vec<(String, String, String)> {\n let schema = &self.0.flow.data_schema;\n let mut result = Vec::new();\n\n fn process_fields(\n fields: &[FieldSchema],\n prefix: &str,\n result: &mut Vec<(String, String, String)>,\n ) {\n for field in fields {\n let field_name = format!(\"{}{}\", prefix, field.name);\n\n let mut field_type = match &field.value_type.typ {\n ValueType::Basic(basic) => format!(\"{basic}\"),\n ValueType::Table(t) => format!(\"{}\", t.kind),\n ValueType::Struct(_) => \"Struct\".to_string(),\n };\n\n if field.value_type.nullable {\n field_type.push('?');\n }\n\n let attr_str = if field.value_type.attrs.is_empty() {\n String::new()\n } else {\n field\n .value_type\n .attrs\n .keys()\n .map(|k| k.to_string())\n .collect::>()\n .join(\", \")\n };\n\n result.push((field_name.clone(), field_type, attr_str));\n\n match &field.value_type.typ {\n ValueType::Struct(s) => {\n process_fields(&s.fields, &format!(\"{field_name}.\"), result);\n }\n ValueType::Table(t) => {\n process_fields(&t.row.fields, &format!(\"{field_name}[].\"), result);\n }\n ValueType::Basic(_) => {}\n }\n }\n }\n\n process_fields(&schema.schema.fields, \"\", &mut result);\n result\n }\n\n pub fn make_setup_action(&self) -> SetupChangeBundle {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Setup,\n flow_names: vec![self.name().to_string()],\n };\n SetupChangeBundle(Arc::new(bundle))\n }\n\n pub fn make_drop_action(&self) -> SetupChangeBundle {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Drop,\n flow_names: vec![self.name().to_string()],\n };\n SetupChangeBundle(Arc::new(bundle))\n }\n}\n\n#[pyclass]\npub struct TransientFlow(pub Arc);\n\n#[pymethods]\nimpl TransientFlow {\n pub fn __str__(&self) -> String {\n serde_json::to_string_pretty(&self.0.transient_flow_instance).unwrap()\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn evaluate_async<'py>(\n &self,\n py: Python<'py>,\n args: Vec>,\n ) -> PyResult> {\n let flow = self.0.clone();\n let input_values: Vec = std::iter::zip(\n self.0.transient_flow_instance.input_fields.iter(),\n args.into_iter(),\n )\n .map(|(input_schema, arg)| value_from_py_object(&input_schema.value_type.typ, &arg))\n .collect::>()?;\n\n future_into_py(py, async move {\n let result = evaluate_transient_flow(&flow, &input_values)\n .await\n .into_py_result()?;\n Python::with_gil(|py| value_to_py_object(py, &result)?.into_py_any(py))\n })\n }\n}\n\n#[pyclass]\npub struct SetupChangeBundle(Arc);\n\n#[pymethods]\nimpl SetupChangeBundle {\n pub fn describe_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let lib_context = get_lib_context().into_py_result()?;\n let bundle = self.0.clone();\n future_into_py(py, async move {\n bundle.describe(&lib_context).await.into_py_result()\n })\n }\n\n pub fn apply_async<'py>(\n &self,\n py: Python<'py>,\n report_to_stdout: bool,\n ) -> PyResult> {\n let lib_context = get_lib_context().into_py_result()?;\n let bundle = self.0.clone();\n\n future_into_py(py, async move {\n let mut stdout = None;\n let mut sink = None;\n bundle\n .apply(\n &lib_context,\n if report_to_stdout {\n stdout.insert(std::io::stdout())\n } else {\n sink.insert(std::io::sink())\n },\n )\n .await\n .into_py_result()\n })\n }\n}\n\n#[pyfunction]\nfn flow_names_with_setup_async(py: Python<'_>) -> PyResult> {\n future_into_py(py, async move {\n let lib_context = get_lib_context().into_py_result()?;\n let setup_ctx = lib_context\n .require_persistence_ctx()\n .into_py_result()?\n .setup_ctx\n .read()\n .await;\n let flow_names: Vec = setup_ctx.all_setup_states.flows.keys().cloned().collect();\n PyResult::Ok(flow_names)\n })\n}\n\n#[pyfunction]\nfn make_setup_bundle(flow_names: Vec) -> PyResult {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Setup,\n flow_names,\n };\n Ok(SetupChangeBundle(Arc::new(bundle)))\n}\n\n#[pyfunction]\nfn make_drop_bundle(flow_names: Vec) -> PyResult {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Drop,\n flow_names,\n };\n Ok(SetupChangeBundle(Arc::new(bundle)))\n}\n\n#[pyfunction]\nfn remove_flow_context(flow_name: String) {\n let lib_context_locked = crate::lib_context::LIB_CONTEXT.read().unwrap();\n if let Some(lib_context) = lib_context_locked.as_ref() {\n lib_context.remove_flow_context(&flow_name)\n }\n}\n\n#[pyfunction]\nfn add_auth_entry(key: String, value: Pythonized) -> PyResult<()> {\n get_auth_registry()\n .add(key, value.into_inner())\n .into_py_result()?;\n Ok(())\n}\n\n#[pyfunction]\nfn seder_roundtrip<'py>(\n py: Python<'py>,\n value: Bound<'py, PyAny>,\n typ: Pythonized,\n) -> PyResult> {\n let typ = typ.into_inner();\n let value = value_from_py_object(&typ, &value)?;\n let value = value::test_util::seder_roundtrip(&value, &typ).into_py_result()?;\n value_to_py_object(py, &value)\n}\n\n/// A Python module implemented in Rust.\n#[pymodule]\n#[pyo3(name = \"_engine\")]\nfn cocoindex_engine(m: &Bound<'_, PyModule>) -> PyResult<()> {\n m.add_function(wrap_pyfunction!(init, m)?)?;\n m.add_function(wrap_pyfunction!(start_server, m)?)?;\n m.add_function(wrap_pyfunction!(stop, m)?)?;\n m.add_function(wrap_pyfunction!(register_function_factory, m)?)?;\n m.add_function(wrap_pyfunction!(register_target_connector, m)?)?;\n m.add_function(wrap_pyfunction!(flow_names_with_setup_async, m)?)?;\n m.add_function(wrap_pyfunction!(make_setup_bundle, m)?)?;\n m.add_function(wrap_pyfunction!(make_drop_bundle, m)?)?;\n m.add_function(wrap_pyfunction!(remove_flow_context, m)?)?;\n m.add_function(wrap_pyfunction!(add_auth_entry, m)?)?;\n\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n\n let testutil_module = PyModule::new(m.py(), \"testutil\")?;\n testutil_module.add_function(wrap_pyfunction!(seder_roundtrip, &testutil_module)?)?;\n m.add_submodule(&testutil_module)?;\n\n Ok(())\n}\n"], ["/cocoindex/src/utils/yaml_ser.rs", "use base64::prelude::*;\nuse serde::ser::{self, Serialize};\nuse yaml_rust2::yaml::Yaml;\n\n#[derive(Debug)]\npub struct YamlSerializerError {\n msg: String,\n}\n\nimpl std::fmt::Display for YamlSerializerError {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"YamlSerializerError: {}\", self.msg)\n }\n}\n\nimpl std::error::Error for YamlSerializerError {}\n\nimpl ser::Error for YamlSerializerError {\n fn custom(msg: T) -> Self\n where\n T: std::fmt::Display,\n {\n YamlSerializerError {\n msg: format!(\"{msg}\"),\n }\n }\n}\n\npub struct YamlSerializer;\n\nimpl YamlSerializer {\n pub fn serialize(value: &T) -> Result\n where\n T: Serialize,\n {\n value.serialize(YamlSerializer)\n }\n}\n\nimpl ser::Serializer for YamlSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n type SerializeSeq = SeqSerializer;\n type SerializeTuple = SeqSerializer;\n type SerializeTupleStruct = SeqSerializer;\n type SerializeTupleVariant = VariantSeqSerializer;\n type SerializeMap = MapSerializer;\n type SerializeStruct = MapSerializer;\n type SerializeStructVariant = VariantMapSerializer;\n\n fn serialize_bool(self, v: bool) -> Result {\n Ok(Yaml::Boolean(v))\n }\n\n fn serialize_i8(self, v: i8) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i16(self, v: i16) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i32(self, v: i32) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i64(self, v: i64) -> Result {\n Ok(Yaml::Integer(v))\n }\n\n fn serialize_u8(self, v: u8) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u16(self, v: u16) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u32(self, v: u32) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u64(self, v: u64) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_f32(self, v: f32) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_f64(self, v: f64) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_char(self, v: char) -> Result {\n Ok(Yaml::String(v.to_string()))\n }\n\n fn serialize_str(self, v: &str) -> Result {\n Ok(Yaml::String(v.to_owned()))\n }\n\n fn serialize_bytes(self, v: &[u8]) -> Result {\n let encoded = BASE64_STANDARD.encode(v);\n Ok(Yaml::String(encoded))\n }\n\n fn serialize_none(self) -> Result {\n Ok(Yaml::Null)\n }\n\n fn serialize_some(self, value: &T) -> Result\n where\n T: Serialize + ?Sized,\n {\n value.serialize(self)\n }\n\n fn serialize_unit(self) -> Result {\n Ok(Yaml::Hash(Default::default()))\n }\n\n fn serialize_unit_struct(self, _name: &'static str) -> Result {\n Ok(Yaml::Hash(Default::default()))\n }\n\n fn serialize_unit_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n ) -> Result {\n Ok(Yaml::String(variant.to_owned()))\n }\n\n fn serialize_newtype_struct(\n self,\n _name: &'static str,\n value: &T,\n ) -> Result\n where\n T: Serialize + ?Sized,\n {\n value.serialize(self)\n }\n\n fn serialize_newtype_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n value: &T,\n ) -> Result\n where\n T: Serialize + ?Sized,\n {\n let mut hash = yaml_rust2::yaml::Hash::new();\n hash.insert(Yaml::String(variant.to_owned()), value.serialize(self)?);\n Ok(Yaml::Hash(hash))\n }\n\n fn serialize_seq(self, len: Option) -> Result {\n Ok(SeqSerializer {\n vec: Vec::with_capacity(len.unwrap_or(0)),\n })\n }\n\n fn serialize_tuple(self, len: usize) -> Result {\n self.serialize_seq(Some(len))\n }\n\n fn serialize_tuple_struct(\n self,\n _name: &'static str,\n len: usize,\n ) -> Result {\n self.serialize_seq(Some(len))\n }\n\n fn serialize_tuple_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n len: usize,\n ) -> Result {\n Ok(VariantSeqSerializer {\n variant_name: variant.to_owned(),\n vec: Vec::with_capacity(len),\n })\n }\n\n fn serialize_map(self, _len: Option) -> Result {\n Ok(MapSerializer {\n map: yaml_rust2::yaml::Hash::new(),\n next_key: None,\n })\n }\n\n fn serialize_struct(\n self,\n _name: &'static str,\n len: usize,\n ) -> Result {\n self.serialize_map(Some(len))\n }\n\n fn serialize_struct_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n Ok(VariantMapSerializer {\n variant_name: variant.to_owned(),\n map: yaml_rust2::yaml::Hash::new(),\n })\n }\n}\n\npub struct SeqSerializer {\n vec: Vec,\n}\n\nimpl ser::SerializeSeq for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.vec.push(value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n Ok(Yaml::Array(self.vec))\n }\n}\n\nimpl ser::SerializeTuple for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeSeq::serialize_element(self, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeSeq::end(self)\n }\n}\n\nimpl ser::SerializeTupleStruct for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeSeq::serialize_element(self, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeSeq::end(self)\n }\n}\n\npub struct MapSerializer {\n map: yaml_rust2::yaml::Hash,\n next_key: Option,\n}\n\nimpl ser::SerializeMap for MapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.next_key = Some(key.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n let key = self.next_key.take().unwrap();\n self.map.insert(key, value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n Ok(Yaml::Hash(self.map))\n }\n}\n\nimpl ser::SerializeStruct for MapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeMap::serialize_entry(self, key, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeMap::end(self)\n }\n}\n\npub struct VariantMapSerializer {\n variant_name: String,\n map: yaml_rust2::yaml::Hash,\n}\n\nimpl ser::SerializeStructVariant for VariantMapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.map.insert(\n Yaml::String(key.to_owned()),\n value.serialize(YamlSerializer)?,\n );\n Ok(())\n }\n\n fn end(self) -> Result {\n let mut outer_map = yaml_rust2::yaml::Hash::new();\n outer_map.insert(Yaml::String(self.variant_name), Yaml::Hash(self.map));\n Ok(Yaml::Hash(outer_map))\n }\n}\n\npub struct VariantSeqSerializer {\n variant_name: String,\n vec: Vec,\n}\n\nimpl ser::SerializeTupleVariant for VariantSeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.vec.push(value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n let mut map = yaml_rust2::yaml::Hash::new();\n map.insert(Yaml::String(self.variant_name), Yaml::Array(self.vec));\n Ok(Yaml::Hash(map))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use serde::ser::Error as SerdeSerError;\n use serde::{Serialize, Serializer};\n use std::collections::BTreeMap;\n use yaml_rust2::yaml::{Hash, Yaml};\n\n fn assert_yaml_serialization(value: T, expected_yaml: Yaml) {\n let result = YamlSerializer::serialize(&value);\n println!(\"Serialized value: {result:?}, Expected value: {expected_yaml:?}\");\n\n assert!(\n result.is_ok(),\n \"Serialization failed when it should have succeeded. Error: {:?}\",\n result.err()\n );\n assert_eq!(\n result.unwrap(),\n expected_yaml,\n \"Serialized YAML did not match expected YAML.\"\n );\n }\n\n #[test]\n fn test_serialize_bool() {\n assert_yaml_serialization(true, Yaml::Boolean(true));\n assert_yaml_serialization(false, Yaml::Boolean(false));\n }\n\n #[test]\n fn test_serialize_integers() {\n assert_yaml_serialization(42i8, Yaml::Integer(42));\n assert_yaml_serialization(-100i16, Yaml::Integer(-100));\n assert_yaml_serialization(123456i32, Yaml::Integer(123456));\n assert_yaml_serialization(7890123456789i64, Yaml::Integer(7890123456789));\n assert_yaml_serialization(255u8, Yaml::Integer(255));\n assert_yaml_serialization(65535u16, Yaml::Integer(65535));\n assert_yaml_serialization(4000000000u32, Yaml::Integer(4000000000));\n // u64 is serialized as Yaml::Real(String) in your implementation\n assert_yaml_serialization(\n 18446744073709551615u64,\n Yaml::Real(\"18446744073709551615\".to_string()),\n );\n }\n\n #[test]\n fn test_serialize_floats() {\n assert_yaml_serialization(3.14f32, Yaml::Real(\"3.14\".to_string()));\n assert_yaml_serialization(-0.001f64, Yaml::Real(\"-0.001\".to_string()));\n assert_yaml_serialization(1.0e10f64, Yaml::Real(\"10000000000\".to_string()));\n }\n\n #[test]\n fn test_serialize_char() {\n assert_yaml_serialization('X', Yaml::String(\"X\".to_string()));\n assert_yaml_serialization('✨', Yaml::String(\"✨\".to_string()));\n }\n\n #[test]\n fn test_serialize_str_and_string() {\n assert_yaml_serialization(\"hello YAML\", Yaml::String(\"hello YAML\".to_string()));\n assert_yaml_serialization(\"\".to_string(), Yaml::String(\"\".to_string()));\n }\n\n #[test]\n fn test_serialize_raw_bytes() {\n let bytes_slice: &[u8] = &[0x48, 0x65, 0x6c, 0x6c, 0x6f]; // \"Hello\"\n let expected = Yaml::Array(vec![\n Yaml::Integer(72),\n Yaml::Integer(101),\n Yaml::Integer(108),\n Yaml::Integer(108),\n Yaml::Integer(111),\n ]);\n assert_yaml_serialization(bytes_slice, expected.clone());\n\n let bytes_vec: Vec = bytes_slice.to_vec();\n assert_yaml_serialization(bytes_vec, expected);\n\n let empty_bytes_slice: &[u8] = &[];\n assert_yaml_serialization(empty_bytes_slice, Yaml::Array(vec![]));\n }\n\n struct MyBytesWrapper<'a>(&'a [u8]);\n\n impl<'a> Serialize for MyBytesWrapper<'a> {\n fn serialize(&self, serializer: S) -> Result\n where\n S: Serializer,\n {\n serializer.serialize_bytes(self.0)\n }\n }\n\n #[test]\n fn test_custom_wrapper_serializes_bytes_as_base64_string() {\n let data: &[u8] = &[72, 101, 108, 108, 111]; // \"Hello\"\n let wrapped_data = MyBytesWrapper(data);\n\n let base64_encoded = BASE64_STANDARD.encode(data);\n let expected_yaml = Yaml::String(base64_encoded);\n\n assert_yaml_serialization(wrapped_data, expected_yaml);\n\n let empty_data: &[u8] = &[];\n let wrapped_empty_data = MyBytesWrapper(empty_data);\n let empty_base64_encoded = BASE64_STANDARD.encode(empty_data);\n let expected_empty_yaml = Yaml::String(empty_base64_encoded);\n assert_yaml_serialization(wrapped_empty_data, expected_empty_yaml);\n }\n\n #[test]\n fn test_serialize_option() {\n let val_none: Option = None;\n assert_yaml_serialization(val_none, Yaml::Null);\n\n let val_some: Option = Some(\"has value\".to_string());\n assert_yaml_serialization(val_some, Yaml::String(\"has value\".to_string()));\n }\n\n #[test]\n fn test_serialize_unit() {\n assert_yaml_serialization((), Yaml::Hash(Hash::new()));\n }\n\n #[test]\n fn test_serialize_unit_struct() {\n #[derive(Serialize)]\n struct MyUnitStruct;\n\n assert_yaml_serialization(MyUnitStruct, Yaml::Hash(Hash::new()));\n }\n\n #[test]\n fn test_serialize_newtype_struct() {\n #[derive(Serialize)]\n struct MyNewtypeStruct(u64);\n\n assert_yaml_serialization(MyNewtypeStruct(12345u64), Yaml::Real(\"12345\".to_string()));\n }\n\n #[test]\n fn test_serialize_seq() {\n let empty_vec: Vec = vec![];\n assert_yaml_serialization(empty_vec, Yaml::Array(vec![]));\n\n let simple_vec = vec![10, 20, 30];\n assert_yaml_serialization(\n simple_vec,\n Yaml::Array(vec![\n Yaml::Integer(10),\n Yaml::Integer(20),\n Yaml::Integer(30),\n ]),\n );\n\n let string_vec = vec![\"a\".to_string(), \"b\".to_string()];\n assert_yaml_serialization(\n string_vec,\n Yaml::Array(vec![\n Yaml::String(\"a\".to_string()),\n Yaml::String(\"b\".to_string()),\n ]),\n );\n }\n\n #[test]\n fn test_serialize_tuple() {\n let tuple_val = (42i32, \"text\", false);\n assert_yaml_serialization(\n tuple_val,\n Yaml::Array(vec![\n Yaml::Integer(42),\n Yaml::String(\"text\".to_string()),\n Yaml::Boolean(false),\n ]),\n );\n }\n\n #[test]\n fn test_serialize_tuple_struct() {\n #[derive(Serialize)]\n struct MyTupleStruct(String, i64);\n\n assert_yaml_serialization(\n MyTupleStruct(\"value\".to_string(), -500),\n Yaml::Array(vec![Yaml::String(\"value\".to_string()), Yaml::Integer(-500)]),\n );\n }\n\n #[test]\n fn test_serialize_map() {\n let mut map = BTreeMap::new(); // BTreeMap for ordered keys, matching yaml::Hash\n map.insert(\"key1\".to_string(), 100);\n map.insert(\"key2\".to_string(), 200);\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"key1\".to_string()), Yaml::Integer(100));\n expected_hash.insert(Yaml::String(\"key2\".to_string()), Yaml::Integer(200));\n assert_yaml_serialization(map, Yaml::Hash(expected_hash));\n\n let empty_map: BTreeMap = BTreeMap::new();\n assert_yaml_serialization(empty_map, Yaml::Hash(Hash::new()));\n }\n\n #[derive(Serialize)]\n struct SimpleStruct {\n id: u32,\n name: String,\n is_active: bool,\n }\n\n #[test]\n fn test_serialize_struct() {\n let s = SimpleStruct {\n id: 101,\n name: \"A Struct\".to_string(),\n is_active: true,\n };\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"id\".to_string()), Yaml::Integer(101));\n expected_hash.insert(\n Yaml::String(\"name\".to_string()),\n Yaml::String(\"A Struct\".to_string()),\n );\n expected_hash.insert(Yaml::String(\"is_active\".to_string()), Yaml::Boolean(true));\n assert_yaml_serialization(s, Yaml::Hash(expected_hash));\n }\n\n #[derive(Serialize)]\n struct NestedStruct {\n description: String,\n data: SimpleStruct,\n tags: Vec,\n }\n\n #[test]\n fn test_serialize_nested_struct() {\n let ns = NestedStruct {\n description: \"Contains another struct and a vec\".to_string(),\n data: SimpleStruct {\n id: 202,\n name: \"Inner\".to_string(),\n is_active: false,\n },\n tags: vec![\"nested\".to_string(), \"complex\".to_string()],\n };\n\n let mut inner_struct_hash = Hash::new();\n inner_struct_hash.insert(Yaml::String(\"id\".to_string()), Yaml::Integer(202));\n inner_struct_hash.insert(\n Yaml::String(\"name\".to_string()),\n Yaml::String(\"Inner\".to_string()),\n );\n inner_struct_hash.insert(Yaml::String(\"is_active\".to_string()), Yaml::Boolean(false));\n\n let tags_array = Yaml::Array(vec![\n Yaml::String(\"nested\".to_string()),\n Yaml::String(\"complex\".to_string()),\n ]);\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(\n Yaml::String(\"description\".to_string()),\n Yaml::String(\"Contains another struct and a vec\".to_string()),\n );\n expected_hash.insert(\n Yaml::String(\"data\".to_string()),\n Yaml::Hash(inner_struct_hash),\n );\n expected_hash.insert(Yaml::String(\"tags\".to_string()), tags_array);\n\n assert_yaml_serialization(ns, Yaml::Hash(expected_hash));\n }\n\n #[derive(Serialize)]\n enum MyEnum {\n Unit,\n Newtype(i32),\n Tuple(String, bool),\n Struct { field_a: u16, field_b: char },\n }\n\n #[test]\n fn test_serialize_enum_unit_variant() {\n assert_yaml_serialization(MyEnum::Unit, Yaml::String(\"Unit\".to_string()));\n }\n\n #[test]\n fn test_serialize_enum_newtype_variant() {\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"Newtype\".to_string()), Yaml::Integer(999));\n assert_yaml_serialization(MyEnum::Newtype(999), Yaml::Hash(expected_hash));\n }\n\n #[test]\n fn test_serialize_enum_tuple_variant() {\n let mut expected_hash = Hash::new();\n let inner_array = Yaml::Array(vec![\n Yaml::String(\"tuple_data\".to_string()),\n Yaml::Boolean(true),\n ]);\n expected_hash.insert(Yaml::String(\"Tuple\".to_string()), inner_array);\n assert_yaml_serialization(\n MyEnum::Tuple(\"tuple_data\".to_string(), true),\n Yaml::Hash(expected_hash),\n );\n }\n\n #[test]\n fn test_serialize_enum_struct_variant() {\n let mut inner_struct_hash = Hash::new();\n inner_struct_hash.insert(Yaml::String(\"field_a\".to_string()), Yaml::Integer(123));\n inner_struct_hash.insert(\n Yaml::String(\"field_b\".to_string()),\n Yaml::String(\"Z\".to_string()),\n );\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(\n Yaml::String(\"Struct\".to_string()),\n Yaml::Hash(inner_struct_hash),\n );\n assert_yaml_serialization(\n MyEnum::Struct {\n field_a: 123,\n field_b: 'Z',\n },\n Yaml::Hash(expected_hash),\n );\n }\n\n #[test]\n fn test_yaml_serializer_error_display() {\n let error = YamlSerializerError {\n msg: \"A test error message\".to_string(),\n };\n assert_eq!(\n format!(\"{error}\"),\n \"YamlSerializerError: A test error message\"\n );\n }\n\n #[test]\n fn test_yaml_serializer_error_custom() {\n let error = YamlSerializerError::custom(\"Custom error detail\");\n assert_eq!(error.msg, \"Custom error detail\");\n assert_eq!(\n format!(\"{error}\"),\n \"YamlSerializerError: Custom error detail\"\n );\n let _err_trait_obj: Box = Box::new(error);\n }\n}\n"], ["/cocoindex/src/ops/sources/local_file.rs", "use async_stream::try_stream;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse log::warn;\nuse std::borrow::Cow;\nuse std::path::Path;\nuse std::{path::PathBuf, sync::Arc};\n\nuse crate::base::field_attrs;\nuse crate::{fields_value, ops::sdk::*};\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n path: String,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n}\n\nstruct Executor {\n root_path: PathBuf,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n}\n\nimpl Executor {\n fn is_excluded(&self, path: impl AsRef + Copy) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(path))\n }\n\n fn is_file_included(&self, path: impl AsRef + Copy) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(path))\n && !self.is_excluded(path)\n }\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n let root_component_size = self.root_path.components().count();\n let mut dirs = Vec::new();\n dirs.push(Cow::Borrowed(&self.root_path));\n let mut new_dirs = Vec::new();\n try_stream! {\n while let Some(dir) = dirs.pop() {\n let mut entries = tokio::fs::read_dir(dir.as_ref()).await?;\n while let Some(entry) = entries.next_entry().await? {\n let path = entry.path();\n let mut path_components = path.components();\n for _ in 0..root_component_size {\n path_components.next();\n }\n let relative_path = path_components.as_path();\n if path.is_dir() {\n if !self.is_excluded(relative_path) {\n new_dirs.push(Cow::Owned(path));\n }\n } else if self.is_file_included(relative_path) {\n let ordinal: Option = if options.include_ordinal {\n Some(path.metadata()?.modified()?.try_into()?)\n } else {\n None\n };\n if let Some(relative_path) = relative_path.to_str() {\n yield vec![PartialSourceRowMetadata {\n key: KeyValue::Str(relative_path.into()),\n ordinal,\n }];\n } else {\n warn!(\"Skipped ill-formed file path: {}\", path.display());\n }\n }\n }\n dirs.extend(new_dirs.drain(..).rev());\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n if !self.is_file_included(key.str_value()?.as_ref()) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n let path = self.root_path.join(key.str_value()?.as_ref());\n let ordinal = if options.include_ordinal {\n Some(path.metadata()?.modified()?.try_into()?)\n } else {\n None\n };\n let value = if options.include_value {\n match std::fs::read(path) {\n Ok(content) => {\n let content = if self.binary {\n fields_value!(content)\n } else {\n fields_value!(String::from_utf8_lossy(&content).to_string())\n };\n Some(SourceValue::Existence(content))\n }\n Err(e) if e.kind() == std::io::ErrorKind::NotFound => {\n Some(SourceValue::NonExistence)\n }\n Err(e) => Err(e)?,\n }\n } else {\n None\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"LocalFile\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor {\n root_path: PathBuf::from(spec.path),\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/ops/targets/shared/table_columns.rs", "use crate::{\n ops::sdk::SetupStateCompatibility,\n prelude::*,\n setup::{CombinedState, SetupChangeType},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TableColumnsSchema {\n #[serde(with = \"indexmap::map::serde_seq\", alias = \"key_fields_schema\")]\n pub key_columns: IndexMap,\n\n #[serde(with = \"indexmap::map::serde_seq\", alias = \"value_fields_schema\")]\n pub value_columns: IndexMap,\n}\n\n#[derive(Debug)]\npub enum TableUpsertionAction {\n Create {\n keys: IndexMap,\n values: IndexMap,\n },\n Update {\n columns_to_delete: IndexSet,\n columns_to_upsert: IndexMap,\n },\n}\n\nimpl TableUpsertionAction {\n pub fn is_empty(&self) -> bool {\n match self {\n Self::Create { .. } => false,\n Self::Update {\n columns_to_delete,\n columns_to_upsert,\n } => columns_to_delete.is_empty() && columns_to_upsert.is_empty(),\n }\n }\n}\n\n#[derive(Debug)]\npub struct TableMainSetupAction {\n pub drop_existing: bool,\n pub table_upsertion: Option>,\n}\n\nimpl TableMainSetupAction {\n pub fn from_states(\n desired_state: Option<&S>,\n existing: &CombinedState,\n existing_invalidated: bool,\n ) -> Self\n where\n for<'a> &'a S: Into>>,\n T: Clone,\n {\n let existing_may_exists = existing.possible_versions().next().is_some();\n let possible_existing_cols: Vec>> = existing\n .possible_versions()\n .map(Into::>>::into)\n .collect();\n let Some(desired_state) = desired_state else {\n return Self {\n drop_existing: existing_may_exists,\n table_upsertion: None,\n };\n };\n\n let desired_cols: Cow<'_, TableColumnsSchema> = desired_state.into();\n let drop_existing = existing_invalidated\n || possible_existing_cols\n .iter()\n .any(|v| v.key_columns != desired_cols.key_columns)\n || (existing_may_exists && !existing.always_exists());\n\n let table_upsertion = if existing.always_exists() && !drop_existing {\n TableUpsertionAction::Update {\n columns_to_delete: possible_existing_cols\n .iter()\n .flat_map(|v| v.value_columns.keys())\n .filter(|column_name| !desired_cols.value_columns.contains_key(*column_name))\n .cloned()\n .collect(),\n columns_to_upsert: desired_cols\n .value_columns\n .iter()\n .filter(|(column_name, schema)| {\n !possible_existing_cols\n .iter()\n .all(|v| v.value_columns.get(*column_name) == Some(schema))\n })\n .map(|(k, v)| (k.to_owned(), v.to_owned()))\n .collect(),\n }\n } else {\n TableUpsertionAction::Create {\n keys: desired_cols.key_columns.to_owned(),\n values: desired_cols.value_columns.to_owned(),\n }\n };\n\n Self {\n drop_existing,\n table_upsertion: Some(table_upsertion).filter(|action| !action.is_empty()),\n }\n }\n\n pub fn describe_changes(&self) -> Vec\n where\n T: std::fmt::Display,\n {\n let mut descriptions = vec![];\n if self.drop_existing {\n descriptions.push(setup::ChangeDescription::Action(\"Drop table\".to_string()));\n }\n if let Some(table_upsertion) = &self.table_upsertion {\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Create table:\\n key columns: {}\\n value columns: {}\\n\",\n keys.iter().map(|(k, v)| format!(\"{k} {v}\")).join(\", \"),\n values.iter().map(|(k, v)| format!(\"{k} {v}\")).join(\", \"),\n )));\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n if !columns_to_delete.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Delete column from table: {}\",\n columns_to_delete.iter().join(\", \"),\n )));\n }\n if !columns_to_upsert.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Add / update columns in table: {}\",\n columns_to_upsert\n .iter()\n .map(|(k, v)| format!(\"{k} {v}\"))\n .join(\", \"),\n )));\n }\n }\n }\n }\n descriptions\n }\n\n pub fn change_type(&self, has_other_update: bool) -> SetupChangeType {\n match (self.drop_existing, &self.table_upsertion) {\n (_, Some(TableUpsertionAction::Create { .. })) => SetupChangeType::Create,\n (_, Some(TableUpsertionAction::Update { .. })) => SetupChangeType::Update,\n (true, None) => SetupChangeType::Delete,\n (false, None) => {\n if has_other_update {\n SetupChangeType::Update\n } else {\n SetupChangeType::NoChange\n }\n }\n }\n }\n}\n\npub fn check_table_compatibility(\n desired: &TableColumnsSchema,\n existing: &TableColumnsSchema,\n) -> SetupStateCompatibility {\n let is_key_identical = existing.key_columns == desired.key_columns;\n if is_key_identical {\n let is_value_lossy = existing\n .value_columns\n .iter()\n .any(|(k, v)| desired.value_columns.get(k) != Some(v));\n if is_value_lossy {\n SetupStateCompatibility::PartialCompatible\n } else {\n SetupStateCompatibility::Compatible\n }\n } else {\n SetupStateCompatibility::NotCompatible\n }\n}\n"], ["/cocoindex/src/execution/dumper.rs", "use crate::prelude::*;\n\nuse futures::{StreamExt, future::try_join_all};\nuse itertools::Itertools;\nuse serde::ser::SerializeSeq;\nuse sqlx::PgPool;\nuse std::path::{Path, PathBuf};\nuse yaml_rust2::YamlEmitter;\n\nuse super::evaluator::SourceRowEvaluationContext;\nuse super::memoization::EvaluationMemoryOptions;\nuse super::row_indexer;\nuse crate::base::{schema, value};\nuse crate::builder::plan::{AnalyzedImportOp, ExecutionPlan};\nuse crate::ops::interface::SourceExecutorListOptions;\nuse crate::utils::yaml_ser::YamlSerializer;\n\n#[derive(Debug, Clone, Deserialize)]\npub struct EvaluateAndDumpOptions {\n pub output_dir: String,\n pub use_cache: bool,\n}\n\nconst FILENAME_PREFIX_MAX_LENGTH: usize = 128;\n\nstruct TargetExportData<'a> {\n schema: &'a Vec,\n // The purpose is to make rows sorted by primary key.\n data: BTreeMap,\n}\n\nimpl Serialize for TargetExportData<'_> {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let mut seq = serializer.serialize_seq(Some(self.data.len()))?;\n for (_, values) in self.data.iter() {\n seq.serialize_element(&value::TypedFieldsValue {\n schema: self.schema,\n values_iter: values.fields.iter(),\n })?;\n }\n seq.end()\n }\n}\n\n#[derive(Serialize)]\nstruct SourceOutputData<'a> {\n key: value::TypedValue<'a>,\n\n #[serde(skip_serializing_if = \"Option::is_none\")]\n exports: Option>>,\n\n #[serde(skip_serializing_if = \"Option::is_none\")]\n error: Option,\n}\n\nstruct Dumper<'a> {\n plan: &'a ExecutionPlan,\n setup_execution_ctx: &'a exec_ctx::FlowSetupExecutionContext,\n schema: &'a schema::FlowSchema,\n pool: &'a PgPool,\n options: EvaluateAndDumpOptions,\n}\n\nimpl<'a> Dumper<'a> {\n async fn evaluate_source_entry<'b>(\n &'a self,\n import_op_idx: usize,\n import_op: &'a AnalyzedImportOp,\n key: &value::KeyValue,\n collected_values_buffer: &'b mut Vec>,\n ) -> Result>>>\n where\n 'a: 'b,\n {\n let data_builder = row_indexer::evaluate_source_entry_with_memory(\n &SourceRowEvaluationContext {\n plan: self.plan,\n import_op,\n schema: self.schema,\n key,\n import_op_idx,\n },\n self.setup_execution_ctx,\n EvaluationMemoryOptions {\n enable_cache: self.options.use_cache,\n evaluation_only: true,\n },\n self.pool,\n )\n .await?;\n\n let data_builder = if let Some(data_builder) = data_builder {\n data_builder\n } else {\n return Ok(None);\n };\n\n *collected_values_buffer = data_builder.collected_values;\n let exports = self\n .plan\n .export_ops\n .iter()\n .map(|export_op| -> Result<_> {\n let collector_idx = export_op.input.collector_idx as usize;\n let entry = (\n export_op.name.as_str(),\n TargetExportData {\n schema: &self.schema.root_op_scope.collectors[collector_idx]\n .spec\n .fields,\n data: collected_values_buffer[collector_idx]\n .iter()\n .map(|v| -> Result<_> {\n let key = row_indexer::extract_primary_key(\n &export_op.primary_key_def,\n v,\n )?;\n Ok((key, v))\n })\n .collect::>()?,\n },\n );\n Ok(entry)\n })\n .collect::>()?;\n Ok(Some(exports))\n }\n\n async fn evaluate_and_dump_source_entry(\n &self,\n import_op_idx: usize,\n import_op: &AnalyzedImportOp,\n key: value::KeyValue,\n file_path: PathBuf,\n ) -> Result<()> {\n let _permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n let mut collected_values_buffer = Vec::new();\n let (exports, error) = match self\n .evaluate_source_entry(import_op_idx, import_op, &key, &mut collected_values_buffer)\n .await\n {\n Ok(exports) => (exports, None),\n Err(e) => (None, Some(format!(\"{e:?}\"))),\n };\n let key_value = value::Value::from(key);\n let file_data = SourceOutputData {\n key: value::TypedValue {\n t: &import_op.primary_key_type,\n v: &key_value,\n },\n exports,\n error,\n };\n\n let yaml_output = {\n let mut yaml_output = String::new();\n let yaml_data = YamlSerializer::serialize(&file_data)?;\n let mut yaml_emitter = YamlEmitter::new(&mut yaml_output);\n yaml_emitter.multiline_strings(true);\n yaml_emitter.compact(true);\n yaml_emitter.dump(&yaml_data)?;\n yaml_output\n };\n tokio::fs::write(file_path, yaml_output).await?;\n\n Ok(())\n }\n\n async fn evaluate_and_dump_for_source(\n &self,\n import_op_idx: usize,\n import_op: &AnalyzedImportOp,\n ) -> Result<()> {\n let mut keys_by_filename_prefix: IndexMap> = IndexMap::new();\n\n let mut rows_stream = import_op.executor.list(&SourceExecutorListOptions {\n include_ordinal: false,\n });\n while let Some(rows) = rows_stream.next().await {\n for row in rows?.into_iter() {\n let mut s = row\n .key\n .to_strs()\n .into_iter()\n .map(|s| urlencoding::encode(&s).into_owned())\n .join(\":\");\n s.truncate(\n (0..(FILENAME_PREFIX_MAX_LENGTH - import_op.name.as_str().len()))\n .rev()\n .find(|i| s.is_char_boundary(*i))\n .unwrap_or(0),\n );\n keys_by_filename_prefix.entry(s).or_default().push(row.key);\n }\n }\n let output_dir = Path::new(&self.options.output_dir);\n let evaluate_futs =\n keys_by_filename_prefix\n .into_iter()\n .flat_map(|(filename_prefix, keys)| {\n let num_keys = keys.len();\n keys.into_iter().enumerate().map(move |(i, key)| {\n let extra_id = if num_keys > 1 {\n Cow::Owned(format!(\".{i}\"))\n } else {\n Cow::Borrowed(\"\")\n };\n let file_name =\n format!(\"{}@{}{}.yaml\", import_op.name, filename_prefix, extra_id);\n let file_path = output_dir.join(Path::new(&file_name));\n self.evaluate_and_dump_source_entry(\n import_op_idx,\n import_op,\n key,\n file_path,\n )\n })\n });\n try_join_all(evaluate_futs).await?;\n Ok(())\n }\n\n async fn evaluate_and_dump(&self) -> Result<()> {\n try_join_all(\n self.plan\n .import_ops\n .iter()\n .enumerate()\n .map(|(idx, import_op)| self.evaluate_and_dump_for_source(idx, import_op)),\n )\n .await?;\n Ok(())\n }\n}\n\npub async fn evaluate_and_dump(\n plan: &ExecutionPlan,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n schema: &schema::FlowSchema,\n options: EvaluateAndDumpOptions,\n pool: &PgPool,\n) -> Result<()> {\n let output_dir = Path::new(&options.output_dir);\n if output_dir.exists() {\n if !output_dir.is_dir() {\n return Err(anyhow::anyhow!(\"The path exists and is not a directory\"));\n }\n } else {\n tokio::fs::create_dir(output_dir).await?;\n }\n\n let dumper = Dumper {\n plan,\n setup_execution_ctx,\n schema,\n pool,\n options,\n };\n dumper.evaluate_and_dump().await\n}\n"], ["/cocoindex/src/setup/db_metadata.rs", "use crate::prelude::*;\n\nuse super::{ResourceSetupInfo, ResourceSetupStatus, SetupChangeType, StateChange};\nuse crate::utils::db::WriteAction;\nuse axum::http::StatusCode;\nuse sqlx::PgPool;\n\nconst SETUP_METADATA_TABLE_NAME: &str = \"cocoindex_setup_metadata\";\npub const FLOW_VERSION_RESOURCE_TYPE: &str = \"__FlowVersion\";\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SetupMetadataRecord {\n pub flow_name: String,\n // e.g. \"Flow\", \"SourceTracking\", \"Target:{TargetType}\"\n pub resource_type: String,\n pub key: serde_json::Value,\n pub state: Option,\n pub staging_changes: sqlx::types::Json>>,\n}\n\npub fn parse_flow_version(state: &Option) -> Option {\n match state {\n Some(serde_json::Value::Number(n)) => n.as_u64(),\n _ => None,\n }\n}\n\n/// Returns None if metadata table doesn't exist.\npub async fn read_setup_metadata(pool: &PgPool) -> Result>> {\n let mut db_conn = pool.acquire().await?;\n let query_str = format!(\n \"SELECT flow_name, resource_type, key, state, staging_changes FROM {SETUP_METADATA_TABLE_NAME}\",\n );\n let metadata = sqlx::query_as(&query_str).fetch_all(&mut *db_conn).await;\n let result = match metadata {\n Ok(metadata) => Some(metadata),\n Err(err) => {\n let exists: Option = sqlx::query_scalar(\n \"SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = $1)\",\n )\n .bind(SETUP_METADATA_TABLE_NAME)\n .fetch_one(&mut *db_conn)\n .await?;\n if !exists.unwrap_or(false) {\n None\n } else {\n return Err(err.into());\n }\n }\n };\n Ok(result)\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct ResourceTypeKey {\n pub resource_type: String,\n pub key: serde_json::Value,\n}\n\nimpl ResourceTypeKey {\n pub fn new(resource_type: String, key: serde_json::Value) -> Self {\n Self { resource_type, key }\n }\n}\n\nstatic VERSION_RESOURCE_TYPE_ID: LazyLock = LazyLock::new(|| ResourceTypeKey {\n resource_type: FLOW_VERSION_RESOURCE_TYPE.to_string(),\n key: serde_json::Value::Null,\n});\n\nasync fn read_metadata_records_for_flow(\n flow_name: &str,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT flow_name, resource_type, key, state, staging_changes FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1\",\n );\n let metadata: Vec = sqlx::query_as(&query_str)\n .bind(flow_name)\n .fetch_all(db_executor)\n .await?;\n let result = metadata\n .into_iter()\n .map(|m| {\n (\n ResourceTypeKey {\n resource_type: m.resource_type.clone(),\n key: m.key.clone(),\n },\n m,\n )\n })\n .collect();\n Ok(result)\n}\n\nasync fn read_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT state FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n );\n let state: Option = sqlx::query_scalar(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .fetch_optional(db_executor)\n .await?;\n Ok(state)\n}\n\nasync fn upsert_staging_changes(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n staging_changes: Vec>,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {SETUP_METADATA_TABLE_NAME} (flow_name, resource_type, key, staging_changes) VALUES ($1, $2, $3, $4)\",\n ),\n WriteAction::Update => format!(\n \"UPDATE {SETUP_METADATA_TABLE_NAME} SET staging_changes = $4 WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n ),\n };\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .bind(sqlx::types::Json(staging_changes))\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\nasync fn upsert_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n state: &serde_json::Value,\n action: WriteAction,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {SETUP_METADATA_TABLE_NAME} (flow_name, resource_type, key, state, staging_changes) VALUES ($1, $2, $3, $4, $5)\",\n ),\n WriteAction::Update => format!(\n \"UPDATE {SETUP_METADATA_TABLE_NAME} SET state = $4, staging_changes = $5 WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n ),\n };\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .bind(sqlx::types::Json(state))\n .bind(sqlx::types::Json(Vec::::new()))\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\nasync fn delete_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"DELETE FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n );\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\npub struct StateUpdateInfo {\n pub desired_state: Option,\n pub legacy_key: Option,\n}\n\nimpl StateUpdateInfo {\n pub fn new(\n desired_state: Option<&impl Serialize>,\n legacy_key: Option,\n ) -> Result {\n Ok(Self {\n desired_state: desired_state\n .as_ref()\n .map(serde_json::to_value)\n .transpose()?,\n legacy_key,\n })\n }\n}\n\npub async fn stage_changes_for_flow(\n flow_name: &str,\n seen_metadata_version: Option,\n resource_update_info: &HashMap,\n pool: &PgPool,\n) -> Result {\n let mut txn = pool.begin().await?;\n let mut existing_records = read_metadata_records_for_flow(flow_name, &mut *txn).await?;\n let latest_metadata_version = existing_records\n .get(&VERSION_RESOURCE_TYPE_ID)\n .and_then(|m| parse_flow_version(&m.state));\n if seen_metadata_version < latest_metadata_version {\n return Err(ApiError::new(\n \"seen newer version in the metadata table\",\n StatusCode::CONFLICT,\n ))?;\n }\n let new_metadata_version = seen_metadata_version.unwrap_or_default() + 1;\n upsert_state(\n flow_name,\n &VERSION_RESOURCE_TYPE_ID,\n &serde_json::Value::Number(new_metadata_version.into()),\n if latest_metadata_version.is_some() {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n &mut *txn,\n )\n .await?;\n\n for (type_id, update_info) in resource_update_info {\n let existing = existing_records.remove(type_id);\n let change = match &update_info.desired_state {\n Some(desired_state) => StateChange::Upsert(desired_state.clone()),\n None => StateChange::Delete,\n };\n let mut new_staging_changes = vec![];\n if let Some(legacy_key) = &update_info.legacy_key {\n if let Some(legacy_record) = existing_records.remove(legacy_key) {\n new_staging_changes.extend(legacy_record.staging_changes.0);\n delete_state(flow_name, legacy_key, &mut *txn).await?;\n }\n }\n let (action, existing_staging_changes) = match existing {\n Some(existing) => {\n let existing_staging_changes = existing.staging_changes.0;\n if existing_staging_changes.iter().all(|c| c != &change) {\n new_staging_changes.push(change);\n }\n (WriteAction::Update, existing_staging_changes)\n }\n None => {\n if update_info.desired_state.is_some() {\n new_staging_changes.push(change);\n }\n (WriteAction::Insert, vec![])\n }\n };\n if !new_staging_changes.is_empty() {\n upsert_staging_changes(\n flow_name,\n type_id,\n [existing_staging_changes, new_staging_changes].concat(),\n &mut *txn,\n action,\n )\n .await?;\n }\n }\n txn.commit().await?;\n Ok(new_metadata_version)\n}\n\npub async fn commit_changes_for_flow(\n flow_name: &str,\n curr_metadata_version: u64,\n state_updates: &HashMap,\n delete_version: bool,\n pool: &PgPool,\n) -> Result<()> {\n let mut txn = pool.begin().await?;\n let latest_metadata_version =\n parse_flow_version(&read_state(flow_name, &VERSION_RESOURCE_TYPE_ID, &mut *txn).await?);\n if latest_metadata_version != Some(curr_metadata_version) {\n return Err(ApiError::new(\n \"seen newer version in the metadata table\",\n StatusCode::CONFLICT,\n ))?;\n }\n for (type_id, update_info) in state_updates.iter() {\n match &update_info.desired_state {\n Some(desired_state) => {\n upsert_state(\n flow_name,\n type_id,\n desired_state,\n WriteAction::Update,\n &mut *txn,\n )\n .await?;\n }\n None => {\n delete_state(flow_name, type_id, &mut *txn).await?;\n }\n }\n }\n if delete_version {\n delete_state(flow_name, &VERSION_RESOURCE_TYPE_ID, &mut *txn).await?;\n }\n txn.commit().await?;\n Ok(())\n}\n\n#[derive(Debug)]\npub struct MetadataTableSetup {\n pub metadata_table_missing: bool,\n}\n\nimpl MetadataTableSetup {\n pub fn into_setup_info(self) -> ResourceSetupInfo<(), (), MetadataTableSetup> {\n ResourceSetupInfo {\n key: (),\n state: None,\n description: \"CocoIndex Metadata Table\".to_string(),\n setup_status: Some(self),\n legacy_key: None,\n }\n }\n}\n\nimpl ResourceSetupStatus for MetadataTableSetup {\n fn describe_changes(&self) -> Vec {\n if self.metadata_table_missing {\n vec![setup::ChangeDescription::Action(format!(\n \"Create the cocoindex metadata table {SETUP_METADATA_TABLE_NAME}\"\n ))]\n } else {\n vec![]\n }\n }\n\n fn change_type(&self) -> SetupChangeType {\n if self.metadata_table_missing {\n SetupChangeType::Create\n } else {\n SetupChangeType::NoChange\n }\n }\n}\n\nimpl MetadataTableSetup {\n pub async fn apply_change(&self) -> Result<()> {\n if !self.metadata_table_missing {\n return Ok(());\n }\n let lib_context = get_lib_context()?;\n let pool = lib_context.require_builtin_db_pool()?;\n let query_str = format!(\n \"CREATE TABLE IF NOT EXISTS {SETUP_METADATA_TABLE_NAME} (\n flow_name TEXT NOT NULL,\n resource_type TEXT NOT NULL,\n key JSONB NOT NULL,\n state JSONB,\n staging_changes JSONB NOT NULL,\n\n PRIMARY KEY (flow_name, resource_type, key)\n )\n \",\n );\n sqlx::query(&query_str).execute(pool).await?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/llm/openrouter.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_openrouter(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"https://openrouter.ai/api/v1\".to_string());\n let api_key = std::env::var(\"OPENROUTER_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/py/convert.rs", "use crate::prelude::*;\n\nuse bytes::Bytes;\nuse numpy::{PyArray1, PyArrayDyn, PyArrayMethods};\nuse pyo3::IntoPyObjectExt;\nuse pyo3::exceptions::PyTypeError;\nuse pyo3::types::PyAny;\nuse pyo3::types::{PyList, PyTuple};\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pythonize::{depythonize, pythonize};\nuse serde::de::DeserializeOwned;\nuse std::ops::Deref;\n\nuse super::IntoPyResult;\n\n#[derive(Debug)]\npub struct Pythonized(pub T);\n\nimpl<'py, T: DeserializeOwned> FromPyObject<'py> for Pythonized {\n fn extract_bound(obj: &Bound<'py, PyAny>) -> PyResult {\n Ok(Pythonized(depythonize(obj).into_py_result()?))\n }\n}\n\nimpl<'py, T: Serialize> IntoPyObject<'py> for &Pythonized {\n type Target = PyAny;\n type Output = Bound<'py, PyAny>;\n type Error = PyErr;\n\n fn into_pyobject(self, py: Python<'py>) -> PyResult {\n pythonize(py, &self.0).into_py_result()\n }\n}\n\nimpl<'py, T: Serialize> IntoPyObject<'py> for Pythonized {\n type Target = PyAny;\n type Output = Bound<'py, PyAny>;\n type Error = PyErr;\n\n fn into_pyobject(self, py: Python<'py>) -> PyResult {\n (&self).into_pyobject(py)\n }\n}\n\nimpl Pythonized {\n pub fn into_inner(self) -> T {\n self.0\n }\n}\n\nimpl Deref for Pythonized {\n type Target = T;\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nfn basic_value_to_py_object<'py>(\n py: Python<'py>,\n v: &value::BasicValue,\n) -> PyResult> {\n let result = match v {\n value::BasicValue::Bytes(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Str(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Bool(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Int64(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Float32(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Float64(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Range(v) => pythonize(py, v).into_py_result()?,\n value::BasicValue::Uuid(uuid_val) => uuid_val.into_bound_py_any(py)?,\n value::BasicValue::Date(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Time(v) => v.into_bound_py_any(py)?,\n value::BasicValue::LocalDateTime(v) => v.into_bound_py_any(py)?,\n value::BasicValue::OffsetDateTime(v) => v.into_bound_py_any(py)?,\n value::BasicValue::TimeDelta(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Json(v) => pythonize(py, v).into_py_result()?,\n value::BasicValue::Vector(v) => handle_vector_to_py(py, v)?,\n value::BasicValue::UnionVariant { tag_id, value } => {\n (*tag_id, basic_value_to_py_object(py, value)?).into_bound_py_any(py)?\n }\n };\n Ok(result)\n}\n\npub fn field_values_to_py_object<'py, 'a>(\n py: Python<'py>,\n values: impl Iterator,\n) -> PyResult> {\n let fields = values\n .map(|v| value_to_py_object(py, v))\n .collect::>>()?;\n Ok(PyTuple::new(py, fields)?.into_any())\n}\n\npub fn value_to_py_object<'py>(py: Python<'py>, v: &value::Value) -> PyResult> {\n let result = match v {\n value::Value::Null => py.None().into_bound(py),\n value::Value::Basic(v) => basic_value_to_py_object(py, v)?,\n value::Value::Struct(v) => field_values_to_py_object(py, v.fields.iter())?,\n value::Value::UTable(v) | value::Value::LTable(v) => {\n let rows = v\n .iter()\n .map(|v| field_values_to_py_object(py, v.0.fields.iter()))\n .collect::>>()?;\n PyList::new(py, rows)?.into_any()\n }\n value::Value::KTable(v) => {\n let rows = v\n .iter()\n .map(|(k, v)| {\n field_values_to_py_object(\n py,\n std::iter::once(&value::Value::from(k.clone())).chain(v.0.fields.iter()),\n )\n })\n .collect::>>()?;\n PyList::new(py, rows)?.into_any()\n }\n };\n Ok(result)\n}\n\nfn basic_value_from_py_object<'py>(\n typ: &schema::BasicValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let result = match typ {\n schema::BasicValueType::Bytes => {\n value::BasicValue::Bytes(Bytes::from(v.extract::>()?))\n }\n schema::BasicValueType::Str => value::BasicValue::Str(Arc::from(v.extract::()?)),\n schema::BasicValueType::Bool => value::BasicValue::Bool(v.extract::()?),\n schema::BasicValueType::Int64 => value::BasicValue::Int64(v.extract::()?),\n schema::BasicValueType::Float32 => value::BasicValue::Float32(v.extract::()?),\n schema::BasicValueType::Float64 => value::BasicValue::Float64(v.extract::()?),\n schema::BasicValueType::Range => value::BasicValue::Range(depythonize(v)?),\n schema::BasicValueType::Uuid => value::BasicValue::Uuid(v.extract::()?),\n schema::BasicValueType::Date => value::BasicValue::Date(v.extract::()?),\n schema::BasicValueType::Time => value::BasicValue::Time(v.extract::()?),\n schema::BasicValueType::LocalDateTime => {\n value::BasicValue::LocalDateTime(v.extract::()?)\n }\n schema::BasicValueType::OffsetDateTime => {\n if v.getattr_opt(\"tzinfo\")?\n .ok_or_else(|| {\n PyErr::new::(format!(\n \"expecting a datetime.datetime value, got {}\",\n v.get_type()\n ))\n })?\n .is_none()\n {\n value::BasicValue::OffsetDateTime(\n v.extract::()?.and_utc().into(),\n )\n } else {\n value::BasicValue::OffsetDateTime(\n v.extract::>()?,\n )\n }\n }\n schema::BasicValueType::TimeDelta => {\n value::BasicValue::TimeDelta(v.extract::()?)\n }\n schema::BasicValueType::Json => {\n value::BasicValue::Json(Arc::from(depythonize::(v)?))\n }\n schema::BasicValueType::Vector(elem) => {\n if let Some(vector) = handle_ndarray_from_py(&elem.element_type, v)? {\n vector\n } else {\n // Fallback to list\n value::BasicValue::Vector(Arc::from(\n v.extract::>>()?\n .into_iter()\n .map(|v| basic_value_from_py_object(&elem.element_type, &v))\n .collect::>>()?,\n ))\n }\n }\n schema::BasicValueType::Union(s) => {\n let mut valid_value = None;\n\n // Try parsing the value\n for (i, typ) in s.types.iter().enumerate() {\n if let Ok(value) = basic_value_from_py_object(typ, v) {\n valid_value = Some(value::BasicValue::UnionVariant {\n tag_id: i,\n value: Box::new(value),\n });\n break;\n }\n }\n\n valid_value.ok_or_else(|| {\n PyErr::new::(format!(\n \"invalid union value: {}, available types: {:?}\",\n v, s.types\n ))\n })?\n }\n };\n Ok(result)\n}\n\n// Helper function to convert PyAny to BasicValue for NDArray\nfn handle_ndarray_from_py<'py>(\n elem_type: &schema::BasicValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult> {\n macro_rules! try_convert {\n ($t:ty, $cast:expr) => {\n if let Ok(array) = v.downcast::>() {\n let data = array.readonly().as_slice()?.to_vec();\n let vec = data.into_iter().map($cast).collect::>();\n return Ok(Some(value::BasicValue::Vector(Arc::from(vec))));\n }\n };\n }\n\n match *elem_type {\n schema::BasicValueType::Float32 => try_convert!(f32, value::BasicValue::Float32),\n schema::BasicValueType::Float64 => try_convert!(f64, value::BasicValue::Float64),\n schema::BasicValueType::Int64 => try_convert!(i64, value::BasicValue::Int64),\n _ => {}\n }\n\n Ok(None)\n}\n\n// Helper function to convert BasicValue::Vector to PyAny\nfn handle_vector_to_py<'py>(\n py: Python<'py>,\n v: &[value::BasicValue],\n) -> PyResult> {\n match v.first() {\n Some(value::BasicValue::Float32(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Float32(f) => Ok(*f),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Float32\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n Some(value::BasicValue::Float64(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Float64(f) => Ok(*f),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Float64\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n Some(value::BasicValue::Int64(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Int64(i) => Ok(*i),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Int64\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n _ => Ok(v\n .iter()\n .map(|v| basic_value_to_py_object(py, v))\n .collect::>>()?\n .into_bound_py_any(py)?),\n }\n}\n\nfn field_values_from_py_object<'py>(\n schema: &schema::StructSchema,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let list = v.extract::>>()?;\n if list.len() != schema.fields.len() {\n return Err(PyException::new_err(format!(\n \"struct field number mismatch, expected {}, got {}\",\n schema.fields.len(),\n list.len()\n )));\n }\n\n Ok(value::FieldValues {\n fields: schema\n .fields\n .iter()\n .zip(list.into_iter())\n .map(|(f, v)| value_from_py_object(&f.value_type.typ, &v))\n .collect::>>()?,\n })\n}\n\npub fn value_from_py_object<'py>(\n typ: &schema::ValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let result = if v.is_none() {\n value::Value::Null\n } else {\n match typ {\n schema::ValueType::Basic(typ) => {\n value::Value::Basic(basic_value_from_py_object(typ, v)?)\n }\n schema::ValueType::Struct(schema) => {\n value::Value::Struct(field_values_from_py_object(schema, v)?)\n }\n schema::ValueType::Table(schema) => {\n let list = v.extract::>>()?;\n let values = list\n .into_iter()\n .map(|v| field_values_from_py_object(&schema.row, &v))\n .collect::>>()?;\n\n match schema.kind {\n schema::TableKind::UTable => {\n value::Value::UTable(values.into_iter().map(|v| v.into()).collect())\n }\n schema::TableKind::LTable => {\n value::Value::LTable(values.into_iter().map(|v| v.into()).collect())\n }\n\n schema::TableKind::KTable => value::Value::KTable(\n values\n .into_iter()\n .map(|v| {\n let mut iter = v.fields.into_iter();\n let key = iter.next().unwrap().into_key().into_py_result()?;\n Ok((\n key,\n value::ScopeValue(value::FieldValues {\n fields: iter.collect::>(),\n }),\n ))\n })\n .collect::>>()?,\n ),\n }\n }\n }\n };\n Ok(result)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::base::schema;\n use crate::base::value;\n use crate::base::value::ScopeValue;\n use pyo3::Python;\n use std::collections::BTreeMap;\n use std::sync::Arc;\n\n fn assert_roundtrip_conversion(original_value: &value::Value, value_type: &schema::ValueType) {\n Python::with_gil(|py| {\n // Convert Rust value to Python object using value_to_py_object\n let py_object = value_to_py_object(py, original_value)\n .expect(\"Failed to convert Rust value to Python object\");\n\n println!(\"Python object: {py_object:?}\");\n let roundtripped_value = value_from_py_object(value_type, &py_object)\n .expect(\"Failed to convert Python object back to Rust value\");\n\n println!(\"Roundtripped value: {roundtripped_value:?}\");\n assert_eq!(\n original_value, &roundtripped_value,\n \"Value mismatch after roundtrip\"\n );\n });\n }\n\n #[test]\n fn test_roundtrip_basic_values() {\n let values_and_types = vec![\n (\n value::Value::Basic(value::BasicValue::Int64(42)),\n schema::ValueType::Basic(schema::BasicValueType::Int64),\n ),\n (\n value::Value::Basic(value::BasicValue::Float64(3.14)),\n schema::ValueType::Basic(schema::BasicValueType::Float64),\n ),\n (\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"hello\"))),\n schema::ValueType::Basic(schema::BasicValueType::Str),\n ),\n (\n value::Value::Basic(value::BasicValue::Bool(true)),\n schema::ValueType::Basic(schema::BasicValueType::Bool),\n ),\n ];\n\n for (val, typ) in values_and_types {\n assert_roundtrip_conversion(&val, &typ);\n }\n }\n\n #[test]\n fn test_roundtrip_struct() {\n let struct_schema = schema::StructSchema {\n description: Some(Arc::from(\"Test struct description\")),\n fields: Arc::new(vec![\n schema::FieldSchema {\n name: \"a\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Int64),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"b\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Str),\n nullable: false,\n attrs: Default::default(),\n },\n },\n ]),\n };\n\n let struct_val_data = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(10)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"world\"))),\n ],\n };\n\n let struct_val = value::Value::Struct(struct_val_data);\n let struct_typ = schema::ValueType::Struct(struct_schema); // No clone needed\n\n assert_roundtrip_conversion(&struct_val, &struct_typ);\n }\n\n #[test]\n fn test_roundtrip_table_types() {\n let row_schema_struct = Arc::new(schema::StructSchema {\n description: Some(Arc::from(\"Test table row description\")),\n fields: Arc::new(vec![\n schema::FieldSchema {\n name: \"key_col\".to_string(), // Will be used as key for KTable implicitly\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Int64),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"data_col_1\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Str),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"data_col_2\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Bool),\n nullable: false,\n attrs: Default::default(),\n },\n },\n ]),\n });\n\n let row1_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(1)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row1_data\"))),\n value::Value::Basic(value::BasicValue::Bool(true)),\n ],\n };\n let row1_scope_val: value::ScopeValue = row1_fields.into();\n\n let row2_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(2)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row2_data\"))),\n value::Value::Basic(value::BasicValue::Bool(false)),\n ],\n };\n let row2_scope_val: value::ScopeValue = row2_fields.into();\n\n // UTable\n let utable_schema = schema::TableSchema {\n kind: schema::TableKind::UTable,\n row: (*row_schema_struct).clone(),\n };\n let utable_val = value::Value::UTable(vec![row1_scope_val.clone(), row2_scope_val.clone()]);\n let utable_typ = schema::ValueType::Table(utable_schema);\n assert_roundtrip_conversion(&utable_val, &utable_typ);\n\n // LTable\n let ltable_schema = schema::TableSchema {\n kind: schema::TableKind::LTable,\n row: (*row_schema_struct).clone(),\n };\n let ltable_val = value::Value::LTable(vec![row1_scope_val.clone(), row2_scope_val.clone()]);\n let ltable_typ = schema::ValueType::Table(ltable_schema);\n assert_roundtrip_conversion(<able_val, <able_typ);\n\n // KTable\n let ktable_schema = schema::TableSchema {\n kind: schema::TableKind::KTable,\n row: (*row_schema_struct).clone(),\n };\n let mut ktable_data = BTreeMap::new();\n\n // Create KTable entries where the ScopeValue doesn't include the key field\n // This matches how the Python code will serialize/deserialize\n let row1_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row1_data\"))),\n value::Value::Basic(value::BasicValue::Bool(true)),\n ],\n };\n let row1_scope_val: value::ScopeValue = row1_fields.into();\n\n let row2_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row2_data\"))),\n value::Value::Basic(value::BasicValue::Bool(false)),\n ],\n };\n let row2_scope_val: value::ScopeValue = row2_fields.into();\n\n // For KTable, the key is extracted from the first field of ScopeValue based on current serialization\n let key1 = value::Value::::Basic(value::BasicValue::Int64(1))\n .into_key()\n .unwrap();\n let key2 = value::Value::::Basic(value::BasicValue::Int64(2))\n .into_key()\n .unwrap();\n\n ktable_data.insert(key1, row1_scope_val.clone());\n ktable_data.insert(key2, row2_scope_val.clone());\n\n let ktable_val = value::Value::KTable(ktable_data);\n let ktable_typ = schema::ValueType::Table(ktable_schema);\n assert_roundtrip_conversion(&ktable_val, &ktable_typ);\n }\n}\n"], ["/cocoindex/src/base/json_schema.rs", "use crate::prelude::*;\n\nuse crate::utils::immutable::RefList;\nuse schemars::schema::{\n ArrayValidation, InstanceType, ObjectValidation, Schema, SchemaObject, SingleOrVec,\n SubschemaValidation,\n};\nuse std::fmt::Write;\n\npub struct ToJsonSchemaOptions {\n /// If true, mark all fields as required.\n /// Use union type (with `null`) for optional fields instead.\n /// Models like OpenAI will reject the schema if a field is not required.\n pub fields_always_required: bool,\n\n /// If true, the JSON schema supports the `format` keyword.\n pub supports_format: bool,\n\n /// If true, extract descriptions to a separate extra instruction.\n pub extract_descriptions: bool,\n\n /// If true, the top level must be a JSON object.\n pub top_level_must_be_object: bool,\n}\n\nstruct JsonSchemaBuilder {\n options: ToJsonSchemaOptions,\n extra_instructions_per_field: IndexMap,\n}\n\nimpl JsonSchemaBuilder {\n fn new(options: ToJsonSchemaOptions) -> Self {\n Self {\n options,\n extra_instructions_per_field: IndexMap::new(),\n }\n }\n\n fn set_description(\n &mut self,\n schema: &mut SchemaObject,\n description: impl ToString,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) {\n if self.options.extract_descriptions {\n let mut fields: Vec<_> = field_path.iter().map(|f| f.as_str()).collect();\n fields.reverse();\n self.extra_instructions_per_field\n .insert(fields.join(\".\"), description.to_string());\n } else {\n schema.metadata.get_or_insert_default().description = Some(description.to_string());\n }\n }\n\n fn for_basic_value_type(\n &mut self,\n basic_type: &schema::BasicValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n let mut schema = SchemaObject::default();\n match basic_type {\n schema::BasicValueType::Str => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n }\n schema::BasicValueType::Bytes => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n }\n schema::BasicValueType::Bool => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Boolean)));\n }\n schema::BasicValueType::Int64 => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Integer)));\n }\n schema::BasicValueType::Float32 | schema::BasicValueType::Float64 => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Number)));\n }\n schema::BasicValueType::Range => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Array)));\n schema.array = Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n SchemaObject {\n instance_type: Some(SingleOrVec::Single(Box::new(\n InstanceType::Integer,\n ))),\n ..Default::default()\n }\n .into(),\n ))),\n min_items: Some(2),\n max_items: Some(2),\n ..Default::default()\n }));\n self.set_description(\n &mut schema,\n \"A range represented by a list of two positions, start pos (inclusive), end pos (exclusive).\",\n field_path,\n );\n }\n schema::BasicValueType::Uuid => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"uuid\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A UUID, e.g. 123e4567-e89b-12d3-a456-426614174000\",\n field_path,\n );\n }\n schema::BasicValueType::Date => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A date in YYYY-MM-DD format, e.g. 2025-03-27\",\n field_path,\n );\n }\n schema::BasicValueType::Time => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A time in HH:MM:SS format, e.g. 13:32:12\",\n field_path,\n );\n }\n schema::BasicValueType::LocalDateTime => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date-time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"Date time without timezone offset in YYYY-MM-DDTHH:MM:SS format, e.g. 2025-03-27T13:32:12\",\n field_path,\n );\n }\n schema::BasicValueType::OffsetDateTime => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date-time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"Date time with timezone offset in RFC3339, e.g. 2025-03-27T13:32:12Z, 2025-03-27T07:32:12.313-06:00\",\n field_path,\n );\n }\n &schema::BasicValueType::TimeDelta => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"duration\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A duration, e.g. 'PT1H2M3S' (ISO 8601) or '1 day 2 hours 3 seconds'\",\n field_path,\n );\n }\n schema::BasicValueType::Json => {\n // Can be any value. No type constraint.\n }\n schema::BasicValueType::Vector(s) => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Array)));\n schema.array = Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n self.for_basic_value_type(&s.element_type, field_path)\n .into(),\n ))),\n min_items: s.dimension.and_then(|d| u32::try_from(d).ok()),\n max_items: s.dimension.and_then(|d| u32::try_from(d).ok()),\n ..Default::default()\n }));\n }\n schema::BasicValueType::Union(s) => {\n schema.subschemas = Some(Box::new(SubschemaValidation {\n one_of: Some(\n s.types\n .iter()\n .map(|t| Schema::Object(self.for_basic_value_type(t, field_path)))\n .collect(),\n ),\n ..Default::default()\n }));\n }\n }\n schema\n }\n\n fn for_struct_schema(\n &mut self,\n struct_schema: &schema::StructSchema,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n let mut schema = SchemaObject::default();\n if let Some(description) = &struct_schema.description {\n self.set_description(&mut schema, description, field_path);\n }\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Object)));\n schema.object = Some(Box::new(ObjectValidation {\n properties: struct_schema\n .fields\n .iter()\n .map(|f| {\n let mut schema =\n self.for_enriched_value_type(&f.value_type, field_path.prepend(&f.name));\n if self.options.fields_always_required && f.value_type.nullable {\n if let Some(instance_type) = &mut schema.instance_type {\n let mut types = match instance_type {\n SingleOrVec::Single(t) => vec![**t],\n SingleOrVec::Vec(t) => std::mem::take(t),\n };\n types.push(InstanceType::Null);\n *instance_type = SingleOrVec::Vec(types);\n }\n }\n (f.name.to_string(), schema.into())\n })\n .collect(),\n required: struct_schema\n .fields\n .iter()\n .filter(|&f| (self.options.fields_always_required || !f.value_type.nullable))\n .map(|f| f.name.to_string())\n .collect(),\n additional_properties: Some(Schema::Bool(false).into()),\n ..Default::default()\n }));\n schema\n }\n\n fn for_value_type(\n &mut self,\n value_type: &schema::ValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n match value_type {\n schema::ValueType::Basic(b) => self.for_basic_value_type(b, field_path),\n schema::ValueType::Struct(s) => self.for_struct_schema(s, field_path),\n schema::ValueType::Table(c) => SchemaObject {\n instance_type: Some(SingleOrVec::Single(Box::new(InstanceType::Array))),\n array: Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n self.for_struct_schema(&c.row, field_path).into(),\n ))),\n ..Default::default()\n })),\n ..Default::default()\n },\n }\n }\n\n fn for_enriched_value_type(\n &mut self,\n enriched_value_type: &schema::EnrichedValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n self.for_value_type(&enriched_value_type.typ, field_path)\n }\n\n fn build_extra_instructions(&self) -> Result> {\n if self.extra_instructions_per_field.is_empty() {\n return Ok(None);\n }\n\n let mut instructions = String::new();\n write!(&mut instructions, \"Instructions for specific fields:\\n\\n\")?;\n for (field_path, instruction) in self.extra_instructions_per_field.iter() {\n write!(\n &mut instructions,\n \"- {}: {}\\n\\n\",\n if field_path.is_empty() {\n \"(root object)\"\n } else {\n field_path.as_str()\n },\n instruction\n )?;\n }\n Ok(Some(instructions))\n }\n}\n\npub struct ValueExtractor {\n value_type: schema::ValueType,\n object_wrapper_field_name: Option,\n}\n\nimpl ValueExtractor {\n pub fn extract_value(&self, json_value: serde_json::Value) -> Result {\n let unwrapped_json_value =\n if let Some(object_wrapper_field_name) = &self.object_wrapper_field_name {\n match json_value {\n serde_json::Value::Object(mut o) => o\n .remove(object_wrapper_field_name)\n .unwrap_or(serde_json::Value::Null),\n _ => {\n bail!(\"Field `{}` not found\", object_wrapper_field_name)\n }\n }\n } else {\n json_value\n };\n let result = value::Value::from_json(unwrapped_json_value, &self.value_type)?;\n Ok(result)\n }\n}\n\npub struct BuildJsonSchemaOutput {\n pub schema: SchemaObject,\n pub extra_instructions: Option,\n pub value_extractor: ValueExtractor,\n}\n\npub fn build_json_schema(\n value_type: schema::EnrichedValueType,\n options: ToJsonSchemaOptions,\n) -> Result {\n let mut builder = JsonSchemaBuilder::new(options);\n let (schema, object_wrapper_field_name) = if builder.options.top_level_must_be_object\n && !matches!(value_type.typ, schema::ValueType::Struct(_))\n {\n let object_wrapper_field_name = \"value\".to_string();\n let wrapper_struct = schema::StructSchema {\n fields: Arc::new(vec![schema::FieldSchema {\n name: object_wrapper_field_name.clone(),\n value_type: value_type.clone(),\n }]),\n description: None,\n };\n (\n builder.for_struct_schema(&wrapper_struct, RefList::Nil),\n Some(object_wrapper_field_name),\n )\n } else {\n (\n builder.for_enriched_value_type(&value_type, RefList::Nil),\n None,\n )\n };\n Ok(BuildJsonSchemaOutput {\n schema,\n extra_instructions: builder.build_extra_instructions()?,\n value_extractor: ValueExtractor {\n value_type: value_type.typ,\n object_wrapper_field_name,\n },\n })\n}\n"], ["/cocoindex/src/lib_context.rs", "use crate::prelude::*;\n\nuse crate::builder::AnalyzedFlow;\nuse crate::execution::source_indexer::SourceIndexingContext;\nuse crate::service::error::ApiError;\nuse crate::settings;\nuse crate::setup::ObjectSetupStatus;\nuse axum::http::StatusCode;\nuse sqlx::PgPool;\nuse sqlx::postgres::PgConnectOptions;\nuse tokio::runtime::Runtime;\n\npub struct FlowExecutionContext {\n pub setup_execution_context: Arc,\n pub setup_status: setup::FlowSetupStatus,\n source_indexing_contexts: Vec>>,\n}\n\nasync fn build_setup_context(\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n) -> Result<(\n Arc,\n setup::FlowSetupStatus,\n)> {\n let setup_execution_context = Arc::new(exec_ctx::build_flow_setup_execution_context(\n &analyzed_flow.flow_instance,\n &analyzed_flow.data_schema,\n &analyzed_flow.setup_state,\n existing_flow_ss,\n )?);\n\n let setup_status = setup::check_flow_setup_status(\n Some(&setup_execution_context.setup_state),\n existing_flow_ss,\n &analyzed_flow.flow_instance_ctx,\n )\n .await?;\n\n Ok((setup_execution_context, setup_status))\n}\n\nimpl FlowExecutionContext {\n async fn new(\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result {\n let (setup_execution_context, setup_status) =\n build_setup_context(analyzed_flow, existing_flow_ss).await?;\n\n let mut source_indexing_contexts = Vec::new();\n source_indexing_contexts.resize_with(analyzed_flow.flow_instance.import_ops.len(), || {\n tokio::sync::OnceCell::new()\n });\n\n Ok(Self {\n setup_execution_context,\n setup_status,\n source_indexing_contexts,\n })\n }\n\n pub async fn update_setup_state(\n &mut self,\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result<()> {\n let (setup_execution_context, setup_status) =\n build_setup_context(analyzed_flow, existing_flow_ss).await?;\n\n self.setup_execution_context = setup_execution_context;\n self.setup_status = setup_status;\n Ok(())\n }\n\n pub async fn get_source_indexing_context(\n &self,\n flow: &Arc,\n source_idx: usize,\n pool: &PgPool,\n ) -> Result<&Arc> {\n self.source_indexing_contexts[source_idx]\n .get_or_try_init(|| async move {\n anyhow::Ok(Arc::new(\n SourceIndexingContext::load(\n flow.clone(),\n source_idx,\n self.setup_execution_context.clone(),\n pool,\n )\n .await?,\n ))\n })\n .await\n }\n}\n\npub struct FlowContext {\n pub flow: Arc,\n execution_ctx: Arc>,\n}\n\nimpl FlowContext {\n pub fn flow_name(&self) -> &str {\n &self.flow.flow_instance.name\n }\n\n pub async fn new(\n flow: Arc,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result {\n let execution_ctx = Arc::new(tokio::sync::RwLock::new(\n FlowExecutionContext::new(&flow, existing_flow_ss).await?,\n ));\n Ok(Self {\n flow,\n execution_ctx,\n })\n }\n\n pub async fn use_execution_ctx(\n &self,\n ) -> Result> {\n let execution_ctx = self.execution_ctx.read().await;\n if !execution_ctx.setup_status.is_up_to_date() {\n api_bail!(\n \"Setup for flow `{}` is not up-to-date. Please run `cocoindex setup` to update the setup.\",\n self.flow_name()\n );\n }\n Ok(execution_ctx)\n }\n\n pub async fn use_owned_execution_ctx(\n &self,\n ) -> Result> {\n let execution_ctx = self.execution_ctx.clone().read_owned().await;\n if !execution_ctx.setup_status.is_up_to_date() {\n api_bail!(\n \"Setup for flow `{}` is not up-to-date. Please run `cocoindex setup` to update the setup.\",\n self.flow_name()\n );\n }\n Ok(execution_ctx)\n }\n\n pub fn get_execution_ctx_for_setup(&self) -> &tokio::sync::RwLock {\n &self.execution_ctx\n }\n}\n\nstatic TOKIO_RUNTIME: LazyLock = LazyLock::new(|| Runtime::new().unwrap());\nstatic AUTH_REGISTRY: LazyLock> = LazyLock::new(|| Arc::new(AuthRegistry::new()));\n\ntype PoolKey = (String, Option);\ntype PoolValue = Arc>;\n\n#[derive(Default)]\npub struct DbPools {\n pub pools: Mutex>,\n}\n\nimpl DbPools {\n pub async fn get_pool(&self, conn_spec: &settings::DatabaseConnectionSpec) -> Result {\n let db_pool_cell = {\n let key = (conn_spec.url.clone(), conn_spec.user.clone());\n let mut db_pools = self.pools.lock().unwrap();\n db_pools.entry(key).or_default().clone()\n };\n let pool = db_pool_cell\n .get_or_try_init(|| async move {\n let mut pg_options: PgConnectOptions = conn_spec.url.parse()?;\n if let Some(user) = &conn_spec.user {\n pg_options = pg_options.username(user);\n }\n if let Some(password) = &conn_spec.password {\n pg_options = pg_options.password(password);\n }\n let pool = PgPool::connect_with(pg_options)\n .await\n .context(\"Failed to connect to database\")?;\n anyhow::Ok(pool)\n })\n .await?;\n Ok(pool.clone())\n }\n}\n\npub struct LibSetupContext {\n pub all_setup_states: setup::AllSetupStates,\n pub global_setup_status: setup::GlobalSetupStatus,\n}\npub struct PersistenceContext {\n pub builtin_db_pool: PgPool,\n pub setup_ctx: tokio::sync::RwLock,\n}\n\npub struct LibContext {\n pub db_pools: DbPools,\n pub persistence_ctx: Option,\n pub flows: Mutex>>,\n\n pub global_concurrency_controller: Arc,\n}\n\nimpl LibContext {\n pub fn get_flow_context(&self, flow_name: &str) -> Result> {\n let flows = self.flows.lock().unwrap();\n let flow_ctx = flows\n .get(flow_name)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"Flow instance not found: {flow_name}\"),\n StatusCode::NOT_FOUND,\n )\n })?\n .clone();\n Ok(flow_ctx)\n }\n\n pub fn remove_flow_context(&self, flow_name: &str) {\n let mut flows = self.flows.lock().unwrap();\n flows.remove(flow_name);\n }\n\n pub fn require_persistence_ctx(&self) -> Result<&PersistenceContext> {\n self.persistence_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Database is required for this operation. Please set COCOINDEX_DATABASE_URL environment variable and call cocoindex.init() with database settings.\"))\n }\n\n pub fn require_builtin_db_pool(&self) -> Result<&PgPool> {\n Ok(&self.require_persistence_ctx()?.builtin_db_pool)\n }\n}\n\npub fn get_runtime() -> &'static Runtime {\n &TOKIO_RUNTIME\n}\n\npub fn get_auth_registry() -> &'static Arc {\n &AUTH_REGISTRY\n}\n\nstatic LIB_INIT: OnceLock<()> = OnceLock::new();\npub fn create_lib_context(settings: settings::Settings) -> Result {\n LIB_INIT.get_or_init(|| {\n let _ = env_logger::try_init();\n\n pyo3_async_runtimes::tokio::init_with_runtime(get_runtime()).unwrap();\n\n let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();\n });\n\n let db_pools = DbPools::default();\n let persistence_ctx = if let Some(database_spec) = &settings.database {\n let (pool, all_setup_states) = get_runtime().block_on(async {\n let pool = db_pools.get_pool(database_spec).await?;\n let existing_ss = setup::get_existing_setup_state(&pool).await?;\n anyhow::Ok((pool, existing_ss))\n })?;\n Some(PersistenceContext {\n builtin_db_pool: pool,\n setup_ctx: tokio::sync::RwLock::new(LibSetupContext {\n global_setup_status: setup::GlobalSetupStatus::from_setup_states(&all_setup_states),\n all_setup_states,\n }),\n })\n } else {\n // No database configured\n None\n };\n\n Ok(LibContext {\n db_pools,\n persistence_ctx,\n flows: Mutex::new(BTreeMap::new()),\n global_concurrency_controller: Arc::new(concur_control::ConcurrencyController::new(\n &concur_control::Options {\n max_inflight_rows: settings.global_execution_options.source_max_inflight_rows,\n max_inflight_bytes: settings.global_execution_options.source_max_inflight_bytes,\n },\n )),\n })\n}\n\npub static LIB_CONTEXT: RwLock>> = RwLock::new(None);\n\npub(crate) fn init_lib_context(settings: settings::Settings) -> Result<()> {\n let mut lib_context_locked = LIB_CONTEXT.write().unwrap();\n *lib_context_locked = Some(Arc::new(create_lib_context(settings)?));\n Ok(())\n}\n\npub(crate) fn get_lib_context() -> Result> {\n let lib_context_locked = LIB_CONTEXT.read().unwrap();\n lib_context_locked\n .as_ref()\n .cloned()\n .ok_or_else(|| anyhow!(\"CocoIndex library is not initialized or already stopped\"))\n}\n\npub(crate) fn clear_lib_context() {\n let mut lib_context_locked = LIB_CONTEXT.write().unwrap();\n *lib_context_locked = None;\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_db_pools_default() {\n let db_pools = DbPools::default();\n assert!(db_pools.pools.lock().unwrap().is_empty());\n }\n\n #[test]\n fn test_lib_context_without_database() {\n let lib_context = create_lib_context(settings::Settings::default()).unwrap();\n assert!(lib_context.persistence_ctx.is_none());\n assert!(lib_context.require_builtin_db_pool().is_err());\n }\n\n #[test]\n fn test_persistence_context_type_safety() {\n // This test ensures that PersistenceContext groups related fields together\n let settings = settings::Settings {\n database: Some(settings::DatabaseConnectionSpec {\n url: \"postgresql://test\".to_string(),\n user: None,\n password: None,\n }),\n ..Default::default()\n };\n\n // This would fail at runtime due to invalid connection, but we're testing the structure\n let result = create_lib_context(settings);\n // We expect this to fail due to invalid connection, but the structure should be correct\n assert!(result.is_err());\n }\n}\n"], ["/cocoindex/src/ops/targets/shared/property_graph.rs", "use crate::prelude::*;\n\nuse crate::ops::sdk::{AuthEntryReference, FieldSchema};\n\n#[derive(Debug, Deserialize)]\npub struct TargetFieldMapping {\n pub source: spec::FieldName,\n\n /// Field name for the node in the Knowledge Graph.\n /// If unspecified, it's the same as `field_name`.\n #[serde(default)]\n pub target: Option,\n}\n\nimpl TargetFieldMapping {\n pub fn get_target(&self) -> &spec::FieldName {\n self.target.as_ref().unwrap_or(&self.source)\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct NodeFromFieldsSpec {\n pub label: String,\n pub fields: Vec,\n}\n\n#[derive(Debug, Deserialize)]\npub struct NodesSpec {\n pub label: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct RelationshipsSpec {\n pub rel_type: String,\n pub source: NodeFromFieldsSpec,\n pub target: NodeFromFieldsSpec,\n}\n\n#[derive(Debug, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum GraphElementMapping {\n Relationship(RelationshipsSpec),\n Node(NodesSpec),\n}\n\n#[derive(Debug, Deserialize)]\npub struct GraphDeclaration {\n pub nodes_label: String,\n\n #[serde(flatten)]\n pub index_options: spec::IndexOptions,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Clone)]\npub enum ElementType {\n Node(String),\n Relationship(String),\n}\n\nimpl ElementType {\n pub fn label(&self) -> &str {\n match self {\n ElementType::Node(label) => label,\n ElementType::Relationship(label) => label,\n }\n }\n\n pub fn from_mapping_spec(spec: &GraphElementMapping) -> Self {\n match spec {\n GraphElementMapping::Relationship(spec) => {\n ElementType::Relationship(spec.rel_type.clone())\n }\n GraphElementMapping::Node(spec) => ElementType::Node(spec.label.clone()),\n }\n }\n\n pub fn matcher(&self, var_name: &str) -> String {\n match self {\n ElementType::Relationship(label) => format!(\"()-[{var_name}:{label}]->()\"),\n ElementType::Node(label) => format!(\"({var_name}:{label})\"),\n }\n }\n}\n\nimpl std::fmt::Display for ElementType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ElementType::Node(label) => write!(f, \"Node(label:{label})\"),\n ElementType::Relationship(rel_type) => write!(f, \"Relationship(type:{rel_type})\"),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Derivative)]\n#[derivative(\n Clone(bound = \"\"),\n PartialEq(bound = \"\"),\n Eq(bound = \"\"),\n Hash(bound = \"\")\n)]\npub struct GraphElementType {\n #[serde(bound = \"\")]\n pub connection: AuthEntryReference,\n pub typ: ElementType,\n}\n\nimpl std::fmt::Display for GraphElementType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}/{}\", self.connection.key, self.typ)\n }\n}\n\npub struct GraphElementSchema {\n pub elem_type: ElementType,\n pub key_fields: Vec,\n pub value_fields: Vec,\n}\n\npub struct GraphElementInputFieldsIdx {\n pub key: Vec,\n pub value: Vec,\n}\n\nimpl GraphElementInputFieldsIdx {\n pub fn extract_key(&self, fields: &[value::Value]) -> Result {\n value::KeyValue::from_values(self.key.iter().map(|idx| &fields[*idx]))\n }\n}\n\npub struct AnalyzedGraphElementFieldMapping {\n pub schema: Arc,\n pub fields_input_idx: GraphElementInputFieldsIdx,\n}\n\nimpl AnalyzedGraphElementFieldMapping {\n pub fn has_value_fields(&self) -> bool {\n !self.fields_input_idx.value.is_empty()\n }\n}\n\npub struct AnalyzedRelationshipInfo {\n pub source: AnalyzedGraphElementFieldMapping,\n pub target: AnalyzedGraphElementFieldMapping,\n}\n\npub struct AnalyzedDataCollection {\n pub schema: Arc,\n pub value_fields_input_idx: Vec,\n\n pub rel: Option,\n}\n\nimpl AnalyzedDataCollection {\n pub fn dependent_node_labels(&self) -> IndexSet<&str> {\n let mut dependent_node_labels = IndexSet::new();\n if let Some(rel) = &self.rel {\n dependent_node_labels.insert(rel.source.schema.elem_type.label());\n dependent_node_labels.insert(rel.target.schema.elem_type.label());\n }\n dependent_node_labels\n }\n}\n\nstruct GraphElementSchemaBuilder {\n elem_type: ElementType,\n key_fields: Vec,\n value_fields: Vec,\n}\n\nimpl GraphElementSchemaBuilder {\n fn new(elem_type: ElementType) -> Self {\n Self {\n elem_type,\n key_fields: vec![],\n value_fields: vec![],\n }\n }\n\n fn merge_fields(\n elem_type: &ElementType,\n kind: &str,\n existing_fields: &mut Vec,\n fields: Vec<(usize, schema::FieldSchema)>,\n ) -> Result> {\n if fields.is_empty() {\n return Ok(vec![]);\n }\n let result: Vec = if existing_fields.is_empty() {\n let fields_idx: Vec = fields.iter().map(|(idx, _)| *idx).collect();\n existing_fields.extend(fields.into_iter().map(|(_, f)| f));\n fields_idx\n } else {\n if existing_fields.len() != fields.len() {\n bail!(\n \"{elem_type} {kind} fields number mismatch: {} vs {}\",\n existing_fields.len(),\n fields.len()\n );\n }\n let mut fields_map: HashMap<_, _> = fields\n .into_iter()\n .map(|(idx, schema)| (schema.name, (idx, schema.value_type)))\n .collect();\n // Follow the order of existing fields\n existing_fields\n .iter()\n .map(|existing_field| {\n let (idx, typ) = fields_map.remove(&existing_field.name).ok_or_else(|| {\n anyhow!(\n \"{elem_type} {kind} field `{}` not found in some collector\",\n existing_field.name\n )\n })?;\n if typ != existing_field.value_type {\n bail!(\n \"{elem_type} {kind} field `{}` type mismatch: {} vs {}\",\n existing_field.name,\n typ,\n existing_field.value_type\n )\n }\n Ok(idx)\n })\n .collect::>>()?\n };\n Ok(result)\n }\n\n fn merge(\n &mut self,\n key_fields: Vec<(usize, schema::FieldSchema)>,\n value_fields: Vec<(usize, schema::FieldSchema)>,\n ) -> Result {\n let key_fields_idx =\n Self::merge_fields(&self.elem_type, \"key\", &mut self.key_fields, key_fields)?;\n let value_fields_idx = Self::merge_fields(\n &self.elem_type,\n \"value\",\n &mut self.value_fields,\n value_fields,\n )?;\n Ok(GraphElementInputFieldsIdx {\n key: key_fields_idx,\n value: value_fields_idx,\n })\n }\n\n fn build_schema(self) -> Result {\n if self.key_fields.is_empty() {\n bail!(\n \"No key fields specified for Node label `{}`\",\n self.elem_type\n );\n }\n Ok(GraphElementSchema {\n elem_type: self.elem_type,\n key_fields: self.key_fields,\n value_fields: self.value_fields,\n })\n }\n}\nstruct DependentNodeLabelAnalyzer<'a, AuthEntry> {\n graph_elem_type: GraphElementType,\n fields: IndexMap,\n remaining_fields: HashMap<&'a str, &'a TargetFieldMapping>,\n primary_key_fields: &'a [String],\n}\n\nimpl<'a, AuthEntry> DependentNodeLabelAnalyzer<'a, AuthEntry> {\n fn new(\n conn: &'a spec::AuthEntryReference,\n rel_end_spec: &'a NodeFromFieldsSpec,\n primary_key_fields_map: &'a HashMap<&'a GraphElementType, &'a [String]>,\n ) -> Result {\n let graph_elem_type = GraphElementType {\n connection: conn.clone(),\n typ: ElementType::Node(rel_end_spec.label.clone()),\n };\n let primary_key_fields = primary_key_fields_map\n .get(&graph_elem_type)\n .ok_or_else(invariance_violation)?;\n Ok(Self {\n graph_elem_type,\n fields: IndexMap::new(),\n remaining_fields: rel_end_spec\n .fields\n .iter()\n .map(|f| (f.source.as_str(), f))\n .collect(),\n primary_key_fields,\n })\n }\n\n fn process_field(&mut self, field_idx: usize, field_schema: &schema::FieldSchema) -> bool {\n let field_mapping = match self.remaining_fields.remove(field_schema.name.as_str()) {\n Some(field_mapping) => field_mapping,\n None => return false,\n };\n self.fields.insert(\n field_mapping.get_target().clone(),\n (field_idx, field_schema.value_type.clone()),\n );\n true\n }\n\n fn build(\n self,\n schema_builders: &mut HashMap, GraphElementSchemaBuilder>,\n ) -> Result<(GraphElementType, GraphElementInputFieldsIdx)> {\n if !self.remaining_fields.is_empty() {\n anyhow::bail!(\n \"Fields not mapped for {}: {}\",\n self.graph_elem_type,\n self.remaining_fields.keys().join(\", \")\n );\n }\n\n let (mut key_fields, value_fields): (Vec<_>, Vec<_>) = self\n .fields\n .into_iter()\n .map(|(field_name, (idx, typ))| (idx, FieldSchema::new(field_name, typ)))\n .partition(|(_, f)| self.primary_key_fields.contains(&f.name));\n if key_fields.len() != self.primary_key_fields.len() {\n bail!(\n \"Primary key fields number mismatch: {} vs {}\",\n key_fields.iter().map(|(_, f)| &f.name).join(\", \"),\n self.primary_key_fields.iter().join(\", \")\n );\n }\n key_fields.sort_by_key(|(_, f)| {\n self.primary_key_fields\n .iter()\n .position(|k| k == &f.name)\n .unwrap()\n });\n\n let fields_idx = schema_builders\n .entry(self.graph_elem_type.clone())\n .or_insert_with(|| GraphElementSchemaBuilder::new(self.graph_elem_type.typ.clone()))\n .merge(key_fields, value_fields)?;\n Ok((self.graph_elem_type, fields_idx))\n }\n}\n\npub struct DataCollectionGraphMappingInput<'a, AuthEntry> {\n pub auth_ref: &'a spec::AuthEntryReference,\n pub mapping: &'a GraphElementMapping,\n pub index_options: &'a spec::IndexOptions,\n\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n}\n\npub fn analyze_graph_mappings<'a, AuthEntry: 'a>(\n data_coll_inputs: impl Iterator>,\n declarations: impl Iterator<\n Item = (\n &'a spec::AuthEntryReference,\n &'a GraphDeclaration,\n ),\n >,\n) -> Result<(Vec, Vec>)> {\n let data_coll_inputs: Vec<_> = data_coll_inputs.collect();\n let decls: Vec<_> = declarations.collect();\n\n // 1a. Prepare graph element types\n let graph_elem_types = data_coll_inputs\n .iter()\n .map(|d| GraphElementType {\n connection: d.auth_ref.clone(),\n typ: ElementType::from_mapping_spec(d.mapping),\n })\n .collect::>();\n let decl_graph_elem_types = decls\n .iter()\n .map(|(auth_ref, decl)| GraphElementType {\n connection: (*auth_ref).clone(),\n typ: ElementType::Node(decl.nodes_label.clone()),\n })\n .collect::>();\n\n // 1b. Prepare primary key fields map\n let primary_key_fields_map: HashMap<&GraphElementType, &[spec::FieldName]> =\n std::iter::zip(data_coll_inputs.iter(), graph_elem_types.iter())\n .map(|(data_coll_input, graph_elem_type)| {\n (\n graph_elem_type,\n data_coll_input.index_options.primary_key_fields(),\n )\n })\n .chain(\n std::iter::zip(decl_graph_elem_types.iter(), decls.iter()).map(\n |(graph_elem_type, (_, decl))| {\n (graph_elem_type, decl.index_options.primary_key_fields())\n },\n ),\n )\n .map(|(graph_elem_type, primary_key_fields)| {\n Ok((\n graph_elem_type,\n primary_key_fields.with_context(|| {\n format!(\"Primary key fields are not set for {graph_elem_type}\")\n })?,\n ))\n })\n .collect::>()?;\n\n // 2. Analyze data collection graph mappings and build target schema\n let mut node_schema_builders =\n HashMap::, GraphElementSchemaBuilder>::new();\n struct RelationshipProcessedInfo {\n rel_schema: GraphElementSchema,\n source_typ: GraphElementType,\n source_fields_idx: GraphElementInputFieldsIdx,\n target_typ: GraphElementType,\n target_fields_idx: GraphElementInputFieldsIdx,\n }\n struct DataCollectionProcessedInfo {\n value_input_fields_idx: Vec,\n rel_specific: Option>,\n }\n let data_collection_processed_info = std::iter::zip(data_coll_inputs, graph_elem_types.iter())\n .map(|(data_coll_input, graph_elem_type)| -> Result<_> {\n let processed_info = match data_coll_input.mapping {\n GraphElementMapping::Node(_) => {\n let input_fields_idx = node_schema_builders\n .entry(graph_elem_type.clone())\n .or_insert_with_key(|graph_elem| {\n GraphElementSchemaBuilder::new(graph_elem.typ.clone())\n })\n .merge(\n data_coll_input\n .key_fields_schema\n .into_iter()\n .enumerate()\n .collect(),\n data_coll_input\n .value_fields_schema\n .into_iter()\n .enumerate()\n .collect(),\n )?;\n\n if !(0..input_fields_idx.key.len()).eq(input_fields_idx.key.into_iter()) {\n return Err(invariance_violation());\n }\n DataCollectionProcessedInfo {\n value_input_fields_idx: input_fields_idx.value,\n rel_specific: None,\n }\n }\n GraphElementMapping::Relationship(rel_spec) => {\n let mut src_analyzer = DependentNodeLabelAnalyzer::new(\n data_coll_input.auth_ref,\n &rel_spec.source,\n &primary_key_fields_map,\n )?;\n let mut tgt_analyzer = DependentNodeLabelAnalyzer::new(\n data_coll_input.auth_ref,\n &rel_spec.target,\n &primary_key_fields_map,\n )?;\n\n let mut value_fields_schema = vec![];\n let mut value_input_fields_idx = vec![];\n for (field_idx, field_schema) in\n data_coll_input.value_fields_schema.into_iter().enumerate()\n {\n if !src_analyzer.process_field(field_idx, &field_schema)\n && !tgt_analyzer.process_field(field_idx, &field_schema)\n {\n value_fields_schema.push(field_schema.clone());\n value_input_fields_idx.push(field_idx);\n }\n }\n\n let rel_schema = GraphElementSchema {\n elem_type: graph_elem_type.typ.clone(),\n key_fields: data_coll_input.key_fields_schema,\n value_fields: value_fields_schema,\n };\n let (source_typ, source_fields_idx) =\n src_analyzer.build(&mut node_schema_builders)?;\n let (target_typ, target_fields_idx) =\n tgt_analyzer.build(&mut node_schema_builders)?;\n DataCollectionProcessedInfo {\n value_input_fields_idx,\n rel_specific: Some(RelationshipProcessedInfo {\n rel_schema,\n source_typ,\n source_fields_idx,\n target_typ,\n target_fields_idx,\n }),\n }\n }\n };\n Ok(processed_info)\n })\n .collect::>>()?;\n\n let node_schemas: HashMap, Arc> =\n node_schema_builders\n .into_iter()\n .map(|(graph_elem_type, schema_builder)| {\n Ok((graph_elem_type, Arc::new(schema_builder.build_schema()?)))\n })\n .collect::>()?;\n\n // 3. Build output\n let analyzed_data_colls: Vec =\n std::iter::zip(data_collection_processed_info, graph_elem_types.iter())\n .map(|(processed_info, graph_elem_type)| {\n let result = match processed_info.rel_specific {\n // Node\n None => AnalyzedDataCollection {\n schema: node_schemas\n .get(graph_elem_type)\n .ok_or_else(invariance_violation)?\n .clone(),\n value_fields_input_idx: processed_info.value_input_fields_idx,\n rel: None,\n },\n // Relationship\n Some(rel_info) => AnalyzedDataCollection {\n schema: Arc::new(rel_info.rel_schema),\n value_fields_input_idx: processed_info.value_input_fields_idx,\n rel: Some(AnalyzedRelationshipInfo {\n source: AnalyzedGraphElementFieldMapping {\n schema: node_schemas\n .get(&rel_info.source_typ)\n .ok_or_else(invariance_violation)?\n .clone(),\n fields_input_idx: rel_info.source_fields_idx,\n },\n target: AnalyzedGraphElementFieldMapping {\n schema: node_schemas\n .get(&rel_info.target_typ)\n .ok_or_else(invariance_violation)?\n .clone(),\n fields_input_idx: rel_info.target_fields_idx,\n },\n }),\n },\n };\n Ok(result)\n })\n .collect::>()?;\n let decl_schemas: Vec> = decl_graph_elem_types\n .iter()\n .map(|graph_elem_type| {\n Ok(node_schemas\n .get(graph_elem_type)\n .ok_or_else(invariance_violation)?\n .clone())\n })\n .collect::>()?;\n Ok((analyzed_data_colls, decl_schemas))\n}\n"], ["/cocoindex/src/builder/flow_builder.rs", "use crate::{prelude::*, py::Pythonized};\n\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pyo3_async_runtimes::tokio::future_into_py;\nuse std::{collections::btree_map, ops::Deref};\nuse tokio::task::LocalSet;\n\nuse super::analyzer::{\n AnalyzerContext, CollectorBuilder, DataScopeBuilder, OpScope, build_flow_instance_context,\n};\nuse crate::{\n base::{\n schema::{CollectorSchema, FieldSchema},\n spec::{FieldName, NamedSpec},\n },\n lib_context::LibContext,\n ops::interface::FlowInstanceContext,\n py::IntoPyResult,\n};\nuse crate::{lib_context::FlowContext, py};\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct OpScopeRef(Arc);\n\nimpl From> for OpScopeRef {\n fn from(scope: Arc) -> Self {\n Self(scope)\n }\n}\n\nimpl Deref for OpScopeRef {\n type Target = Arc;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl std::fmt::Display for OpScopeRef {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.0)\n }\n}\n\n#[pymethods]\nimpl OpScopeRef {\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn add_collector(&mut self, name: String) -> PyResult {\n let collector = DataCollector {\n name,\n scope: self.0.clone(),\n collector: Mutex::new(None),\n };\n Ok(collector)\n }\n}\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct DataType {\n schema: schema::EnrichedValueType,\n}\n\nimpl From for DataType {\n fn from(schema: schema::EnrichedValueType) -> Self {\n Self { schema }\n }\n}\n\n#[pymethods]\nimpl DataType {\n pub fn __str__(&self) -> String {\n format!(\"{}\", self.schema)\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn schema(&self) -> Pythonized {\n Pythonized(self.schema.clone())\n }\n}\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct DataSlice {\n scope: Arc,\n value: Arc,\n data_type: DataType,\n}\n\n#[pymethods]\nimpl DataSlice {\n pub fn data_type(&self) -> DataType {\n self.data_type.clone()\n }\n\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn field(&self, field_name: &str) -> PyResult> {\n let field_schema = match &self.data_type.schema.typ {\n schema::ValueType::Struct(struct_type) => {\n match struct_type.fields.iter().find(|f| f.name == field_name) {\n Some(field) => field,\n None => return Ok(None),\n }\n }\n _ => return Err(PyException::new_err(\"expect struct type\")),\n };\n let value_mapping = match self.value.as_ref() {\n spec::ValueMapping::Field(spec::FieldMapping {\n scope,\n field_path: spec::FieldPath(field_path),\n }) => spec::ValueMapping::Field(spec::FieldMapping {\n scope: scope.clone(),\n field_path: spec::FieldPath(\n field_path\n .iter()\n .cloned()\n .chain([field_name.to_string()])\n .collect(),\n ),\n }),\n\n spec::ValueMapping::Struct(v) => v\n .fields\n .iter()\n .find(|f| f.name == field_name)\n .map(|f| f.spec.clone())\n .ok_or_else(|| PyException::new_err(format!(\"field {field_name} not found\")))?,\n\n spec::ValueMapping::Constant { .. } => {\n return Err(PyException::new_err(\n \"field access not supported for literal\",\n ));\n }\n };\n Ok(Some(DataSlice {\n scope: self.scope.clone(),\n value: Arc::new(value_mapping),\n data_type: field_schema.value_type.clone().into(),\n }))\n }\n}\n\nimpl DataSlice {\n fn extract_value_mapping(&self) -> spec::ValueMapping {\n match self.value.as_ref() {\n spec::ValueMapping::Field(v) => spec::ValueMapping::Field(spec::FieldMapping {\n field_path: v.field_path.clone(),\n scope: v.scope.clone().or_else(|| Some(self.scope.name.clone())),\n }),\n v => v.clone(),\n }\n }\n}\n\nimpl std::fmt::Display for DataSlice {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(\n f,\n \"DataSlice({}; {} {}) \",\n self.data_type.schema, self.scope, self.value\n )?;\n Ok(())\n }\n}\n\n#[pyclass]\npub struct DataCollector {\n name: String,\n scope: Arc,\n collector: Mutex>,\n}\n\n#[pymethods]\nimpl DataCollector {\n fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\nimpl std::fmt::Display for DataCollector {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let collector = self.collector.lock().unwrap();\n write!(f, \"DataCollector \\\"{}\\\" ({}\", self.name, self.scope)?;\n if let Some(collector) = collector.as_ref() {\n write!(f, \": {}\", collector.schema)?;\n if collector.is_used {\n write!(f, \" (used)\")?;\n }\n }\n write!(f, \")\")?;\n Ok(())\n }\n}\n\n#[pyclass]\npub struct FlowBuilder {\n lib_context: Arc,\n flow_inst_context: Arc,\n\n root_op_scope: Arc,\n flow_instance_name: String,\n reactive_ops: Vec>,\n\n direct_input_fields: Vec,\n direct_output_value: Option,\n\n import_ops: Vec>,\n export_ops: Vec>,\n\n declarations: Vec,\n\n next_generated_op_id: usize,\n}\n\n#[pymethods]\nimpl FlowBuilder {\n #[new]\n pub fn new(name: &str) -> PyResult {\n let lib_context = get_lib_context().into_py_result()?;\n let root_op_scope = OpScope::new(\n spec::ROOT_SCOPE_NAME.to_string(),\n None,\n Arc::new(Mutex::new(DataScopeBuilder::new())),\n );\n let flow_inst_context = build_flow_instance_context(name, None);\n let result = Self {\n lib_context,\n flow_inst_context,\n root_op_scope,\n flow_instance_name: name.to_string(),\n\n reactive_ops: vec![],\n\n import_ops: vec![],\n export_ops: vec![],\n\n direct_input_fields: vec![],\n direct_output_value: None,\n\n declarations: vec![],\n\n next_generated_op_id: 0,\n };\n Ok(result)\n }\n\n pub fn root_scope(&self) -> OpScopeRef {\n OpScopeRef(self.root_op_scope.clone())\n }\n\n #[pyo3(signature = (kind, op_spec, target_scope, name, refresh_options=None, execution_options=None))]\n #[allow(clippy::too_many_arguments)]\n pub fn add_source(\n &mut self,\n py: Python<'_>,\n kind: String,\n op_spec: py::Pythonized>,\n target_scope: Option,\n name: String,\n refresh_options: Option>,\n execution_options: Option>,\n ) -> PyResult {\n if let Some(target_scope) = target_scope {\n if *target_scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"source can only be added to the root scope\",\n ));\n }\n }\n let import_op = spec::NamedSpec {\n name,\n spec: spec::ImportOpSpec {\n source: spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n },\n refresh_options: refresh_options.map(|o| o.into_inner()).unwrap_or_default(),\n execution_options: execution_options\n .map(|o| o.into_inner())\n .unwrap_or_default(),\n },\n };\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(\n analyzer_ctx.analyze_import_op(&self.root_op_scope, import_op.clone()),\n )\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n let result = Self::last_field_to_data_slice(&self.root_op_scope).into_py_result()?;\n self.import_ops.push(import_op);\n Ok(result)\n }\n\n pub fn constant(\n &self,\n value_type: py::Pythonized,\n value: Bound<'_, PyAny>,\n ) -> PyResult {\n let schema = value_type.into_inner();\n let value = py::value_from_py_object(&schema.typ, &value)?;\n let slice = DataSlice {\n scope: self.root_op_scope.clone(),\n value: Arc::new(spec::ValueMapping::Constant(spec::ConstantMapping {\n schema: schema.clone(),\n value: serde_json::to_value(value).into_py_result()?,\n })),\n data_type: schema.into(),\n };\n Ok(slice)\n }\n\n pub fn add_direct_input(\n &mut self,\n name: String,\n value_type: py::Pythonized,\n ) -> PyResult {\n let value_type = value_type.into_inner();\n {\n let mut root_data_scope = self.root_op_scope.data.lock().unwrap();\n root_data_scope\n .add_field(name.clone(), &value_type)\n .into_py_result()?;\n }\n let result = Self::last_field_to_data_slice(&self.root_op_scope).into_py_result()?;\n self.direct_input_fields\n .push(FieldSchema { name, value_type });\n Ok(result)\n }\n\n pub fn set_direct_output(&mut self, data_slice: DataSlice) -> PyResult<()> {\n if data_slice.scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"direct output must be value in the root scope\",\n ));\n }\n self.direct_output_value = Some(data_slice.extract_value_mapping());\n Ok(())\n }\n\n #[pyo3(signature = (data_slice, execution_options=None))]\n pub fn for_each(\n &mut self,\n data_slice: DataSlice,\n execution_options: Option>,\n ) -> PyResult {\n let parent_scope = &data_slice.scope;\n let field_path = match data_slice.value.as_ref() {\n spec::ValueMapping::Field(v) => &v.field_path,\n _ => return Err(PyException::new_err(\"expect field path\")),\n };\n let num_parent_layers = parent_scope.ancestors().count();\n let scope_name = format!(\n \"{}_{}\",\n field_path.last().map_or(\"\", |s| s.as_str()),\n num_parent_layers\n );\n let (_, child_op_scope) = parent_scope\n .new_foreach_op_scope(scope_name.clone(), field_path)\n .into_py_result()?;\n\n let reactive_op = spec::NamedSpec {\n name: format!(\".for_each.{}\", self.next_generated_op_id),\n spec: spec::ReactiveOpSpec::ForEach(spec::ForEachOpSpec {\n field_path: field_path.clone(),\n op_scope: spec::ReactiveOpScope {\n name: scope_name,\n ops: vec![],\n },\n execution_options: execution_options\n .map(|o| o.into_inner())\n .unwrap_or_default(),\n }),\n };\n self.next_generated_op_id += 1;\n self.get_mut_reactive_ops(parent_scope)\n .into_py_result()?\n .push(reactive_op);\n\n Ok(OpScopeRef(child_op_scope))\n }\n\n #[pyo3(signature = (kind, op_spec, args, target_scope, name))]\n pub fn transform(\n &mut self,\n py: Python<'_>,\n kind: String,\n op_spec: py::Pythonized>,\n args: Vec<(DataSlice, Option)>,\n target_scope: Option,\n name: String,\n ) -> PyResult {\n let spec = spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n };\n let op_scope = Self::minimum_common_scope(\n args.iter().map(|(ds, _)| &ds.scope),\n target_scope.as_ref().map(|s| &s.0),\n )\n .into_py_result()?;\n\n let reactive_op = spec::NamedSpec {\n name,\n spec: spec::ReactiveOpSpec::Transform(spec::TransformOpSpec {\n inputs: args\n .iter()\n .map(|(ds, arg_name)| spec::OpArgBinding {\n arg_name: spec::OpArgName(arg_name.clone()),\n value: ds.extract_value_mapping(),\n })\n .collect(),\n op: spec,\n }),\n };\n\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(analyzer_ctx.analyze_reactive_op(op_scope, &reactive_op))\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n self.get_mut_reactive_ops(op_scope)\n .into_py_result()?\n .push(reactive_op);\n\n let result = Self::last_field_to_data_slice(op_scope).into_py_result()?;\n Ok(result)\n }\n\n #[pyo3(signature = (collector, fields, auto_uuid_field=None))]\n pub fn collect(\n &mut self,\n py: Python<'_>,\n collector: &DataCollector,\n fields: Vec<(FieldName, DataSlice)>,\n auto_uuid_field: Option,\n ) -> PyResult<()> {\n let common_scope = Self::minimum_common_scope(fields.iter().map(|(_, ds)| &ds.scope), None)\n .into_py_result()?;\n let name = format!(\".collect.{}\", self.next_generated_op_id);\n self.next_generated_op_id += 1;\n\n let reactive_op = spec::NamedSpec {\n name,\n spec: spec::ReactiveOpSpec::Collect(spec::CollectOpSpec {\n input: spec::StructMapping {\n fields: fields\n .iter()\n .map(|(name, ds)| NamedSpec {\n name: name.clone(),\n spec: ds.extract_value_mapping(),\n })\n .collect(),\n },\n scope_name: collector.scope.name.clone(),\n collector_name: collector.name.clone(),\n auto_uuid_field: auto_uuid_field.clone(),\n }),\n };\n\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(analyzer_ctx.analyze_reactive_op(common_scope, &reactive_op))\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n self.get_mut_reactive_ops(common_scope)\n .into_py_result()?\n .push(reactive_op);\n\n let collector_schema = CollectorSchema::from_fields(\n fields\n .into_iter()\n .map(|(name, ds)| FieldSchema {\n name,\n value_type: ds.data_type.schema,\n })\n .collect(),\n auto_uuid_field,\n );\n {\n let mut collector = collector.collector.lock().unwrap();\n if let Some(collector) = collector.as_mut() {\n collector.merge_schema(&collector_schema).into_py_result()?;\n } else {\n *collector = Some(CollectorBuilder::new(Arc::new(collector_schema)));\n }\n }\n\n Ok(())\n }\n\n #[pyo3(signature = (name, kind, op_spec, index_options, input, setup_by_user=false))]\n pub fn export(\n &mut self,\n name: String,\n kind: String,\n op_spec: py::Pythonized>,\n index_options: py::Pythonized,\n input: &DataCollector,\n setup_by_user: bool,\n ) -> PyResult<()> {\n let spec = spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n };\n\n if input.scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"Export can only work on collectors belonging to the root scope.\",\n ));\n }\n self.export_ops.push(spec::NamedSpec {\n name,\n spec: spec::ExportOpSpec {\n collector_name: input.name.clone(),\n target: spec,\n index_options: index_options.into_inner(),\n setup_by_user,\n },\n });\n Ok(())\n }\n\n pub fn declare(&mut self, op_spec: py::Pythonized) -> PyResult<()> {\n self.declarations.push(op_spec.into_inner());\n Ok(())\n }\n\n pub fn scope_field(&self, scope: OpScopeRef, field_name: &str) -> PyResult> {\n let field_type = {\n let scope_builder = scope.0.data.lock().unwrap();\n let (_, field_schema) = scope_builder\n .data\n .find_field(field_name)\n .ok_or_else(|| PyException::new_err(format!(\"field {field_name} not found\")))?;\n schema::EnrichedValueType::from_alternative(&field_schema.value_type)\n .into_py_result()?\n };\n Ok(Some(DataSlice {\n scope: scope.0,\n value: Arc::new(spec::ValueMapping::Field(spec::FieldMapping {\n scope: None,\n field_path: spec::FieldPath(vec![field_name.to_string()]),\n })),\n data_type: DataType { schema: field_type },\n }))\n }\n\n pub fn build_flow(&self, py: Python<'_>, py_event_loop: Py) -> PyResult {\n let spec = spec::FlowInstanceSpec {\n name: self.flow_instance_name.clone(),\n import_ops: self.import_ops.clone(),\n reactive_ops: self.reactive_ops.clone(),\n export_ops: self.export_ops.clone(),\n declarations: self.declarations.clone(),\n };\n let flow_instance_ctx = build_flow_instance_context(\n &self.flow_instance_name,\n Some(crate::py::PythonExecutionContext::new(py, py_event_loop)),\n );\n let flow_ctx = py\n .allow_threads(|| {\n get_runtime().block_on(async move {\n let analyzed_flow =\n super::AnalyzedFlow::from_flow_instance(spec, flow_instance_ctx).await?;\n let persistence_ctx = self.lib_context.require_persistence_ctx()?;\n let execution_ctx = {\n let flow_setup_ctx = persistence_ctx.setup_ctx.read().await;\n FlowContext::new(\n Arc::new(analyzed_flow),\n flow_setup_ctx\n .all_setup_states\n .flows\n .get(&self.flow_instance_name),\n )\n .await?\n };\n anyhow::Ok(execution_ctx)\n })\n })\n .into_py_result()?;\n let mut flow_ctxs = self.lib_context.flows.lock().unwrap();\n let flow_ctx = match flow_ctxs.entry(self.flow_instance_name.clone()) {\n btree_map::Entry::Occupied(_) => {\n return Err(PyException::new_err(format!(\n \"flow instance name already exists: {}\",\n self.flow_instance_name\n )));\n }\n btree_map::Entry::Vacant(entry) => {\n let flow_ctx = Arc::new(flow_ctx);\n entry.insert(flow_ctx.clone());\n flow_ctx\n }\n };\n Ok(py::Flow(flow_ctx))\n }\n\n pub fn build_transient_flow_async<'py>(\n &self,\n py: Python<'py>,\n py_event_loop: Py,\n ) -> PyResult> {\n if self.direct_input_fields.is_empty() {\n return Err(PyException::new_err(\"expect at least one direct input\"));\n }\n let direct_output_value = if let Some(direct_output_value) = &self.direct_output_value {\n direct_output_value\n } else {\n return Err(PyException::new_err(\"expect direct output\"));\n };\n let spec = spec::TransientFlowSpec {\n name: self.flow_instance_name.clone(),\n input_fields: self.direct_input_fields.clone(),\n reactive_ops: self.reactive_ops.clone(),\n output_value: direct_output_value.clone(),\n };\n let py_ctx = crate::py::PythonExecutionContext::new(py, py_event_loop);\n\n let analyzed_flow = get_runtime().spawn_blocking(|| {\n let local_set = LocalSet::new();\n local_set.block_on(\n get_runtime(),\n super::AnalyzedTransientFlow::from_transient_flow(spec, Some(py_ctx)),\n )\n });\n future_into_py(py, async move {\n Ok(py::TransientFlow(Arc::new(\n analyzed_flow.await.into_py_result()?.into_py_result()?,\n )))\n })\n }\n\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\nimpl std::fmt::Display for FlowBuilder {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Flow instance name: {}\\n\\n\", self.flow_instance_name)?;\n for op in self.import_ops.iter() {\n write!(\n f,\n \"Source op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n for field in self.direct_input_fields.iter() {\n writeln!(f, \"Direct input {}: {}\", field.name, field.value_type)?;\n }\n if !self.direct_input_fields.is_empty() {\n writeln!(f)?;\n }\n for op in self.reactive_ops.iter() {\n write!(\n f,\n \"Reactive op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n for op in self.export_ops.iter() {\n write!(\n f,\n \"Export op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n if let Some(output) = &self.direct_output_value {\n write!(f, \"Direct output: {output}\\n\\n\")?;\n }\n Ok(())\n }\n}\n\nimpl FlowBuilder {\n fn last_field_to_data_slice(op_scope: &Arc) -> Result {\n let data_scope = op_scope.data.lock().unwrap();\n let last_field = data_scope.last_field().unwrap();\n let result = DataSlice {\n scope: op_scope.clone(),\n value: Arc::new(spec::ValueMapping::Field(spec::FieldMapping {\n scope: None,\n field_path: spec::FieldPath(vec![last_field.name.clone()]),\n })),\n data_type: schema::EnrichedValueType::from_alternative(&last_field.value_type)?.into(),\n };\n Ok(result)\n }\n\n fn minimum_common_scope<'a>(\n scopes: impl Iterator>,\n target_scope: Option<&'a Arc>,\n ) -> Result<&'a Arc> {\n let mut scope_iter = scopes;\n let mut common_scope = scope_iter\n .next()\n .ok_or_else(|| PyException::new_err(\"expect at least one input\"))?;\n for scope in scope_iter {\n if scope.is_op_scope_descendant(common_scope) {\n common_scope = scope;\n } else if !common_scope.is_op_scope_descendant(scope) {\n api_bail!(\n \"expect all arguments share the common scope, got {} and {} exclusive to each other\",\n common_scope,\n scope\n );\n }\n }\n if let Some(target_scope) = target_scope {\n if !target_scope.is_op_scope_descendant(common_scope) {\n api_bail!(\n \"the field can only be attached to a scope or sub-scope of the input value. Target scope: {}, input scope: {}\",\n target_scope,\n common_scope\n );\n }\n common_scope = target_scope;\n }\n Ok(common_scope)\n }\n\n fn get_mut_reactive_ops<'a>(\n &'a mut self,\n op_scope: &OpScope,\n ) -> Result<&'a mut Vec>> {\n Self::get_mut_reactive_ops_internal(op_scope, &mut self.reactive_ops)\n }\n\n fn get_mut_reactive_ops_internal<'a>(\n op_scope: &OpScope,\n root_reactive_ops: &'a mut Vec>,\n ) -> Result<&'a mut Vec>> {\n let result = match &op_scope.parent {\n None => root_reactive_ops,\n Some((parent_op_scope, field_path)) => {\n let parent_reactive_ops =\n Self::get_mut_reactive_ops_internal(parent_op_scope, root_reactive_ops)?;\n // Reuse the last foreach if matched, otherwise create a new one.\n match parent_reactive_ops.last() {\n Some(spec::NamedSpec {\n spec: spec::ReactiveOpSpec::ForEach(foreach_spec),\n ..\n }) if &foreach_spec.field_path == field_path\n && foreach_spec.op_scope.name == op_scope.name => {}\n\n _ => {\n api_bail!(\"already out of op scope `{}`\", op_scope.name);\n }\n }\n match &mut parent_reactive_ops.last_mut().unwrap().spec {\n spec::ReactiveOpSpec::ForEach(foreach_spec) => &mut foreach_spec.op_scope.ops,\n _ => unreachable!(),\n }\n }\n };\n Ok(result)\n }\n}\n"], ["/cocoindex/src/utils/fingerprint.rs", "use anyhow::bail;\nuse base64::prelude::*;\nuse blake2::digest::typenum;\nuse blake2::{Blake2b, Digest};\nuse serde::Deserialize;\nuse serde::ser::{\n Serialize, SerializeMap, SerializeSeq, SerializeStruct, SerializeStructVariant, SerializeTuple,\n SerializeTupleStruct, SerializeTupleVariant, Serializer,\n};\n\n#[derive(Debug)]\npub struct FingerprinterError {\n msg: String,\n}\n\nimpl std::fmt::Display for FingerprinterError {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"FingerprinterError: {}\", self.msg)\n }\n}\nimpl std::error::Error for FingerprinterError {}\nimpl serde::ser::Error for FingerprinterError {\n fn custom(msg: T) -> Self\n where\n T: std::fmt::Display,\n {\n FingerprinterError {\n msg: format!(\"{msg}\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\npub struct Fingerprint(pub [u8; 16]);\n\nimpl Fingerprint {\n pub fn to_base64(self) -> String {\n BASE64_STANDARD.encode(self.0)\n }\n\n pub fn from_base64(s: &str) -> anyhow::Result {\n let bytes = match s.len() {\n 24 => BASE64_STANDARD.decode(s)?,\n\n // For backward compatibility. Some old version (<= v0.1.2) is using hex encoding.\n 32 => hex::decode(s)?,\n _ => bail!(\"Encoded fingerprint length is unexpected: {}\", s.len()),\n };\n match bytes.try_into() {\n Ok(bytes) => Ok(Fingerprint(bytes)),\n Err(e) => bail!(\"Fingerprint bytes length is unexpected: {}\", e.len()),\n }\n }\n}\n\nimpl Serialize for Fingerprint {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n serializer.serialize_str(&self.to_base64())\n }\n}\n\nimpl<'de> Deserialize<'de> for Fingerprint {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let s = String::deserialize(deserializer)?;\n Self::from_base64(&s).map_err(serde::de::Error::custom)\n }\n}\n#[derive(Clone, Default)]\npub struct Fingerprinter {\n hasher: Blake2b,\n}\n\nimpl Fingerprinter {\n pub fn into_fingerprint(self) -> Fingerprint {\n Fingerprint(self.hasher.finalize().into())\n }\n\n pub fn with(self, value: &S) -> Result {\n let mut fingerprinter = self;\n value.serialize(&mut fingerprinter)?;\n Ok(fingerprinter)\n }\n\n pub fn write(&mut self, value: &S) -> Result<(), FingerprinterError> {\n value.serialize(self)\n }\n\n fn write_type_tag(&mut self, tag: &str) {\n self.hasher.update(tag.as_bytes());\n self.hasher.update(b\";\");\n }\n\n fn write_end_tag(&mut self) {\n self.hasher.update(b\".\");\n }\n\n fn write_varlen_bytes(&mut self, bytes: &[u8]) {\n self.write_usize(bytes.len());\n self.hasher.update(bytes);\n }\n\n fn write_usize(&mut self, value: usize) {\n self.hasher.update((value as u32).to_le_bytes());\n }\n}\n\nimpl Serializer for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n type SerializeSeq = Self;\n type SerializeTuple = Self;\n type SerializeTupleStruct = Self;\n type SerializeTupleVariant = Self;\n type SerializeMap = Self;\n type SerializeStruct = Self;\n type SerializeStructVariant = Self;\n\n fn serialize_bool(self, v: bool) -> Result<(), Self::Error> {\n self.write_type_tag(if v { \"t\" } else { \"f\" });\n Ok(())\n }\n\n fn serialize_i8(self, v: i8) -> Result<(), Self::Error> {\n self.write_type_tag(\"i1\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i16(self, v: i16) -> Result<(), Self::Error> {\n self.write_type_tag(\"i2\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i32(self, v: i32) -> Result<(), Self::Error> {\n self.write_type_tag(\"i4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i64(self, v: i64) -> Result<(), Self::Error> {\n self.write_type_tag(\"i8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u8(self, v: u8) -> Result<(), Self::Error> {\n self.write_type_tag(\"u1\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u16(self, v: u16) -> Result<(), Self::Error> {\n self.write_type_tag(\"u2\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u32(self, v: u32) -> Result<(), Self::Error> {\n self.write_type_tag(\"u4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u64(self, v: u64) -> Result<(), Self::Error> {\n self.write_type_tag(\"u8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_f32(self, v: f32) -> Result<(), Self::Error> {\n self.write_type_tag(\"f4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_f64(self, v: f64) -> Result<(), Self::Error> {\n self.write_type_tag(\"f8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_char(self, v: char) -> Result<(), Self::Error> {\n self.write_type_tag(\"c\");\n self.write_usize(v as usize);\n Ok(())\n }\n\n fn serialize_str(self, v: &str) -> Result<(), Self::Error> {\n self.write_type_tag(\"s\");\n self.write_varlen_bytes(v.as_bytes());\n Ok(())\n }\n\n fn serialize_bytes(self, v: &[u8]) -> Result<(), Self::Error> {\n self.write_type_tag(\"b\");\n self.write_varlen_bytes(v);\n Ok(())\n }\n\n fn serialize_none(self) -> Result<(), Self::Error> {\n self.write_type_tag(\"\");\n Ok(())\n }\n\n fn serialize_some(self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(self)\n }\n\n fn serialize_unit(self) -> Result<(), Self::Error> {\n self.write_type_tag(\"()\");\n Ok(())\n }\n\n fn serialize_unit_struct(self, name: &'static str) -> Result<(), Self::Error> {\n self.write_type_tag(\"US\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(())\n }\n\n fn serialize_unit_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n ) -> Result<(), Self::Error> {\n self.write_type_tag(\"UV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(())\n }\n\n fn serialize_newtype_struct(self, name: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.write_type_tag(\"NS\");\n self.write_varlen_bytes(name.as_bytes());\n value.serialize(self)\n }\n\n fn serialize_newtype_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n value: &T,\n ) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.write_type_tag(\"NV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n value.serialize(self)\n }\n\n fn serialize_seq(self, _len: Option) -> Result {\n self.write_type_tag(\"L\");\n Ok(self)\n }\n\n fn serialize_tuple(self, _len: usize) -> Result {\n self.write_type_tag(\"T\");\n Ok(self)\n }\n\n fn serialize_tuple_struct(\n self,\n name: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"TS\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(self)\n }\n\n fn serialize_tuple_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"TV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(self)\n }\n\n fn serialize_map(self, _len: Option) -> Result {\n self.write_type_tag(\"M\");\n Ok(self)\n }\n\n fn serialize_struct(\n self,\n name: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"S\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(self)\n }\n\n fn serialize_struct_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"SV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(self)\n }\n}\n\nimpl SerializeSeq for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTuple for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTupleStruct for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTupleVariant for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeMap for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n key.serialize(&mut **self)\n }\n\n fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeStruct for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.hasher.update(key.as_bytes());\n self.hasher.update(b\"\\n\");\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeStructVariant for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.hasher.update(key.as_bytes());\n self.hasher.update(b\"\\n\");\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n"], ["/cocoindex/src/setup/components.rs", "use super::{CombinedState, ResourceSetupStatus, SetupChangeType, StateChange};\nuse crate::prelude::*;\nuse std::fmt::Debug;\n\npub trait State: Debug + Send + Sync {\n fn key(&self) -> Key;\n}\n\n#[async_trait]\npub trait SetupOperator: 'static + Send + Sync {\n type Key: Debug + Hash + Eq + Clone + Send + Sync;\n type State: State;\n type SetupState: Send + Sync + IntoIterator;\n type Context: Sync;\n\n fn describe_key(&self, key: &Self::Key) -> String;\n\n fn describe_state(&self, state: &Self::State) -> String;\n\n fn is_up_to_date(&self, current: &Self::State, desired: &Self::State) -> bool;\n\n async fn create(&self, state: &Self::State, context: &Self::Context) -> Result<()>;\n\n async fn delete(&self, key: &Self::Key, context: &Self::Context) -> Result<()>;\n\n async fn update(&self, state: &Self::State, context: &Self::Context) -> Result<()> {\n self.delete(&state.key(), context).await?;\n self.create(state, context).await\n }\n}\n\n#[derive(Debug)]\nstruct CompositeStateUpsert {\n state: S,\n already_exists: bool,\n}\n\n#[derive(Derivative)]\n#[derivative(Debug)]\npub struct SetupStatus {\n #[derivative(Debug = \"ignore\")]\n desc: D,\n keys_to_delete: IndexSet,\n states_to_upsert: Vec>,\n}\n\nimpl SetupStatus {\n pub fn create(\n desc: D,\n desired: Option,\n existing: CombinedState,\n ) -> Result {\n let existing_component_states = CombinedState {\n current: existing.current.map(|s| {\n s.into_iter()\n .map(|s| (s.key(), s))\n .collect::>()\n }),\n staging: existing\n .staging\n .into_iter()\n .map(|s| match s {\n StateChange::Delete => StateChange::Delete,\n StateChange::Upsert(s) => {\n StateChange::Upsert(s.into_iter().map(|s| (s.key(), s)).collect())\n }\n })\n .collect(),\n legacy_state_key: existing.legacy_state_key,\n };\n let mut keys_to_delete = IndexSet::new();\n let mut states_to_upsert = vec![];\n\n // Collect all existing component keys\n for c in existing_component_states.possible_versions() {\n keys_to_delete.extend(c.keys().cloned());\n }\n\n if let Some(desired_state) = desired {\n for desired_comp_state in desired_state {\n let key = desired_comp_state.key();\n\n // Remove keys that should be kept from deletion list\n keys_to_delete.shift_remove(&key);\n\n // Add components that need to be updated\n let is_up_to_date = existing_component_states.always_exists()\n && existing_component_states.possible_versions().all(|v| {\n v.get(&key)\n .is_some_and(|s| desc.is_up_to_date(s, &desired_comp_state))\n });\n if !is_up_to_date {\n let already_exists = existing_component_states\n .possible_versions()\n .any(|v| v.contains_key(&key));\n states_to_upsert.push(CompositeStateUpsert {\n state: desired_comp_state,\n already_exists,\n });\n }\n }\n }\n\n Ok(Self {\n desc,\n keys_to_delete,\n states_to_upsert,\n })\n }\n}\n\nimpl ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n\n for key in &self.keys_to_delete {\n result.push(setup::ChangeDescription::Action(format!(\n \"Delete {}\",\n self.desc.describe_key(key)\n )));\n }\n\n for state in &self.states_to_upsert {\n result.push(setup::ChangeDescription::Action(format!(\n \"{} {}\",\n if state.already_exists {\n \"Update\"\n } else {\n \"Create\"\n },\n self.desc.describe_state(&state.state)\n )));\n }\n\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n if self.keys_to_delete.is_empty() && self.states_to_upsert.is_empty() {\n SetupChangeType::NoChange\n } else if self.keys_to_delete.is_empty() {\n SetupChangeType::Create\n } else if self.states_to_upsert.is_empty() {\n SetupChangeType::Delete\n } else {\n SetupChangeType::Update\n }\n }\n}\n\npub async fn apply_component_changes(\n changes: Vec<&SetupStatus>,\n context: &D::Context,\n) -> Result<()> {\n // First delete components that need to be removed\n for change in changes.iter() {\n for key in &change.keys_to_delete {\n change.desc.delete(key, context).await?;\n }\n }\n\n // Then upsert components that need to be updated\n for change in changes.iter() {\n for state in &change.states_to_upsert {\n if state.already_exists {\n change.desc.update(&state.state, context).await?;\n } else {\n change.desc.create(&state.state, context).await?;\n }\n }\n }\n\n Ok(())\n}\n\nimpl ResourceSetupStatus for (A, B) {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n result.extend(self.0.describe_changes());\n result.extend(self.1.describe_changes());\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n match (self.0.change_type(), self.1.change_type()) {\n (SetupChangeType::Invalid, _) | (_, SetupChangeType::Invalid) => {\n SetupChangeType::Invalid\n }\n (SetupChangeType::NoChange, b) => b,\n (a, _) => a,\n }\n }\n}\n"], ["/cocoindex/src/service/flows.rs", "use crate::prelude::*;\n\nuse crate::execution::{evaluator, indexing_status, memoization, row_indexer, stats};\nuse crate::lib_context::LibContext;\nuse crate::{base::schema::FlowSchema, ops::interface::SourceExecutorListOptions};\nuse axum::{\n Json,\n extract::{Path, State},\n http::StatusCode,\n};\nuse axum_extra::extract::Query;\n\npub async fn list_flows(\n State(lib_context): State>,\n) -> Result>, ApiError> {\n Ok(Json(\n lib_context.flows.lock().unwrap().keys().cloned().collect(),\n ))\n}\n\npub async fn get_flow_schema(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n Ok(Json(flow_ctx.flow.data_schema.clone()))\n}\n\n#[derive(Serialize)]\npub struct GetFlowResponse {\n flow_spec: spec::FlowInstanceSpec,\n data_schema: FlowSchema,\n fingerprint: utils::fingerprint::Fingerprint,\n}\n\npub async fn get_flow(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let flow_spec = flow_ctx.flow.flow_instance.clone();\n let data_schema = flow_ctx.flow.data_schema.clone();\n let fingerprint = utils::fingerprint::Fingerprinter::default()\n .with(&flow_spec)\n .map_err(|e| api_error!(\"failed to fingerprint flow spec: {e}\"))?\n .with(&data_schema)\n .map_err(|e| api_error!(\"failed to fingerprint data schema: {e}\"))?\n .into_fingerprint();\n Ok(Json(GetFlowResponse {\n flow_spec,\n data_schema,\n fingerprint,\n }))\n}\n\n#[derive(Deserialize)]\npub struct GetKeysParam {\n field: String,\n}\n\n#[derive(Serialize)]\npub struct GetKeysResponse {\n key_type: schema::EnrichedValueType,\n keys: Vec,\n}\n\npub async fn get_keys(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let schema = &flow_ctx.flow.data_schema;\n\n let field_idx = schema\n .fields\n .iter()\n .position(|f| f.name == query.field)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field not found: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n let key_type = schema.fields[field_idx]\n .value_type\n .typ\n .key_type()\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field has no key: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n\n let execution_plan = flow_ctx.flow.get_execution_plan().await?;\n let import_op = execution_plan\n .import_ops\n .iter()\n .find(|op| op.output.field_idx == field_idx as u32)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field is not a source: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n\n let mut rows_stream = import_op.executor.list(&SourceExecutorListOptions {\n include_ordinal: false,\n });\n let mut keys = Vec::new();\n while let Some(rows) = rows_stream.next().await {\n keys.extend(rows?.into_iter().map(|row| row.key));\n }\n Ok(Json(GetKeysResponse {\n key_type: key_type.clone(),\n keys,\n }))\n}\n\n#[derive(Deserialize)]\npub struct SourceRowKeyParams {\n field: String,\n key: Vec,\n}\n\n#[derive(Serialize)]\npub struct EvaluateDataResponse {\n schema: FlowSchema,\n data: value::ScopeValue,\n}\n\nstruct SourceRowKeyContextHolder<'a> {\n plan: Arc,\n import_op_idx: usize,\n schema: &'a FlowSchema,\n key: value::KeyValue,\n}\n\nimpl<'a> SourceRowKeyContextHolder<'a> {\n async fn create(flow_ctx: &'a FlowContext, source_row_key: SourceRowKeyParams) -> Result {\n let schema = &flow_ctx.flow.data_schema;\n let import_op_idx = flow_ctx\n .flow\n .flow_instance\n .import_ops\n .iter()\n .position(|op| op.name == source_row_key.field)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"source field not found: {}\", source_row_key.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n let plan = flow_ctx.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[import_op_idx];\n let field_schema = &schema.fields[import_op.output.field_idx as usize];\n let table_schema = match &field_schema.value_type.typ {\n schema::ValueType::Table(table) => table,\n _ => api_bail!(\"field is not a table: {}\", source_row_key.field),\n };\n let key_field = table_schema\n .key_field()\n .ok_or_else(|| api_error!(\"field {} does not have a key\", source_row_key.field))?;\n let key = value::KeyValue::from_strs(source_row_key.key, &key_field.value_type.typ)?;\n Ok(Self {\n plan,\n import_op_idx,\n schema,\n key,\n })\n }\n\n fn as_context<'b>(&'b self) -> evaluator::SourceRowEvaluationContext<'b> {\n evaluator::SourceRowEvaluationContext {\n plan: &self.plan,\n import_op: &self.plan.import_ops[self.import_op_idx],\n schema: self.schema,\n key: &self.key,\n import_op_idx: self.import_op_idx,\n }\n }\n}\n\npub async fn evaluate_data(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let source_row_key_ctx = SourceRowKeyContextHolder::create(&flow_ctx, query).await?;\n let execution_ctx = flow_ctx.use_execution_ctx().await?;\n let evaluate_output = row_indexer::evaluate_source_entry_with_memory(\n &source_row_key_ctx.as_context(),\n &execution_ctx.setup_execution_context,\n memoization::EvaluationMemoryOptions {\n enable_cache: true,\n evaluation_only: true,\n },\n lib_context.require_builtin_db_pool()?,\n )\n .await?\n .ok_or_else(|| {\n api_error!(\n \"value not found for source at the specified key: {key:?}\",\n key = source_row_key_ctx.key\n )\n })?;\n\n Ok(Json(EvaluateDataResponse {\n schema: flow_ctx.flow.data_schema.clone(),\n data: evaluate_output.data_scope.into(),\n }))\n}\n\npub async fn update(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let live_updater = execution::FlowLiveUpdater::start(\n flow_ctx.clone(),\n lib_context.require_builtin_db_pool()?,\n execution::FlowLiveUpdaterOptions {\n live_mode: false,\n ..Default::default()\n },\n )\n .await?;\n live_updater.wait().await?;\n Ok(Json(live_updater.index_update_info()))\n}\n\npub async fn get_row_indexing_status(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let source_row_key_ctx = SourceRowKeyContextHolder::create(&flow_ctx, query).await?;\n\n let execution_ctx = flow_ctx.use_execution_ctx().await?;\n let indexing_status = indexing_status::get_source_row_indexing_status(\n &source_row_key_ctx.as_context(),\n &execution_ctx.setup_execution_context,\n lib_context.require_builtin_db_pool()?,\n )\n .await?;\n Ok(Json(indexing_status))\n}\n"], ["/cocoindex/src/execution/memoization.rs", "use anyhow::{Result, bail};\nuse serde::{Deserialize, Serialize};\nuse std::{\n borrow::Cow,\n collections::HashMap,\n future::Future,\n sync::{Arc, Mutex},\n};\n\nuse crate::{\n base::{schema, value},\n service::error::{SharedError, SharedResultExtRef},\n utils::fingerprint::{Fingerprint, Fingerprinter},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StoredCacheEntry {\n time_sec: i64,\n value: serde_json::Value,\n}\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct StoredMemoizationInfo {\n #[serde(default, skip_serializing_if = \"HashMap::is_empty\")]\n pub cache: HashMap,\n\n #[serde(default, skip_serializing_if = \"HashMap::is_empty\")]\n pub uuids: HashMap>,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub content_hash: Option,\n}\n\npub type CacheEntryCell = Arc>>;\nenum CacheData {\n /// Existing entry in previous runs, but not in current run yet.\n Previous(serde_json::Value),\n /// Value appeared in current run.\n Current(CacheEntryCell),\n}\n\nstruct CacheEntry {\n time: chrono::DateTime,\n data: CacheData,\n}\n\n#[derive(Default)]\nstruct UuidEntry {\n uuids: Vec,\n num_current: usize,\n}\n\nimpl UuidEntry {\n fn new(uuids: Vec) -> Self {\n Self {\n uuids,\n num_current: 0,\n }\n }\n\n fn into_stored(self) -> Option> {\n if self.num_current == 0 {\n return None;\n }\n let mut uuids = self.uuids;\n if self.num_current < uuids.len() {\n uuids.truncate(self.num_current);\n }\n Some(uuids)\n }\n}\n\npub struct EvaluationMemoryOptions {\n pub enable_cache: bool,\n\n /// If true, it's for evaluation only.\n /// In this mode, we don't memoize anything.\n pub evaluation_only: bool,\n}\n\npub struct EvaluationMemory {\n current_time: chrono::DateTime,\n cache: Option>>,\n uuids: Mutex>,\n evaluation_only: bool,\n}\n\nimpl EvaluationMemory {\n pub fn new(\n current_time: chrono::DateTime,\n stored_info: Option,\n options: EvaluationMemoryOptions,\n ) -> Self {\n let (stored_cache, stored_uuids) = stored_info\n .map(|stored_info| (stored_info.cache, stored_info.uuids))\n .unzip();\n Self {\n current_time,\n cache: options.enable_cache.then(|| {\n Mutex::new(\n stored_cache\n .into_iter()\n .flat_map(|iter| iter.into_iter())\n .map(|(k, e)| {\n (\n k,\n CacheEntry {\n time: chrono::DateTime::from_timestamp(e.time_sec, 0)\n .unwrap_or(chrono::DateTime::::MIN_UTC),\n data: CacheData::Previous(e.value),\n },\n )\n })\n .collect(),\n )\n }),\n uuids: Mutex::new(\n (!options.evaluation_only)\n .then_some(stored_uuids)\n .flatten()\n .into_iter()\n .flat_map(|iter| iter.into_iter())\n .map(|(k, v)| (k, UuidEntry::new(v)))\n .collect(),\n ),\n evaluation_only: options.evaluation_only,\n }\n }\n\n pub fn into_stored(self) -> Result {\n if self.evaluation_only {\n bail!(\"For evaluation only, cannot convert to stored MemoizationInfo\");\n }\n let cache = if let Some(cache) = self.cache {\n cache\n .into_inner()?\n .into_iter()\n .filter_map(|(k, e)| match e.data {\n CacheData::Previous(_) => None,\n CacheData::Current(entry) => match entry.get() {\n Some(Ok(v)) => Some(serde_json::to_value(v).map(|value| {\n (\n k,\n StoredCacheEntry {\n time_sec: e.time.timestamp(),\n value,\n },\n )\n })),\n _ => None,\n },\n })\n .collect::>()?\n } else {\n bail!(\"Cache is disabled, cannot convert to stored MemoizationInfo\");\n };\n let uuids = self\n .uuids\n .into_inner()?\n .into_iter()\n .filter_map(|(k, v)| v.into_stored().map(|uuids| (k, uuids)))\n .collect();\n Ok(StoredMemoizationInfo {\n cache,\n uuids,\n content_hash: None,\n })\n }\n\n pub fn get_cache_entry(\n &self,\n key: impl FnOnce() -> Result,\n typ: &schema::ValueType,\n ttl: Option,\n ) -> Result> {\n let mut cache = if let Some(cache) = &self.cache {\n cache.lock().unwrap()\n } else {\n return Ok(None);\n };\n let result = match cache.entry(key()?) {\n std::collections::hash_map::Entry::Occupied(mut entry)\n if !ttl\n .map(|ttl| entry.get().time + ttl < self.current_time)\n .unwrap_or(false) =>\n {\n let entry_mut = &mut entry.get_mut();\n match &mut entry_mut.data {\n CacheData::Previous(value) => {\n let value = value::Value::from_json(std::mem::take(value), typ)?;\n let cell = Arc::new(tokio::sync::OnceCell::from(Ok(value)));\n let time = entry_mut.time;\n entry.insert(CacheEntry {\n time,\n data: CacheData::Current(cell.clone()),\n });\n cell\n }\n CacheData::Current(cell) => cell.clone(),\n }\n }\n entry => {\n let cell = Arc::new(tokio::sync::OnceCell::new());\n entry.insert_entry(CacheEntry {\n time: self.current_time,\n data: CacheData::Current(cell.clone()),\n });\n cell\n }\n };\n Ok(Some(result))\n }\n\n pub fn next_uuid(&self, key: Fingerprint) -> Result {\n let mut uuids = self.uuids.lock().unwrap();\n\n let entry = uuids.entry(key).or_default();\n let uuid = if self.evaluation_only {\n let fp = Fingerprinter::default()\n .with(&key)?\n .with(&entry.num_current)?\n .into_fingerprint();\n uuid::Uuid::new_v8(fp.0)\n } else if entry.num_current < entry.uuids.len() {\n entry.uuids[entry.num_current]\n } else {\n let uuid = uuid::Uuid::new_v4();\n entry.uuids.push(uuid);\n uuid\n };\n entry.num_current += 1;\n Ok(uuid)\n }\n}\n\npub async fn evaluate_with_cell(\n cell: Option<&CacheEntryCell>,\n compute: impl FnOnce() -> Fut,\n) -> Result>\nwhere\n Fut: Future>,\n{\n let result = match cell {\n Some(cell) => Cow::Borrowed(\n cell.get_or_init(|| {\n let fut = compute();\n async move { fut.await.map_err(SharedError::new) }\n })\n .await\n .std_result()?,\n ),\n None => Cow::Owned(compute().await?),\n };\n Ok(result)\n}\n"], ["/cocoindex/src/builder/plan.rs", "use crate::prelude::*;\n\nuse crate::ops::interface::*;\nuse crate::utils::fingerprint::{Fingerprint, Fingerprinter};\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedLocalFieldReference {\n /// Must be non-empty.\n pub fields_idx: Vec,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedFieldReference {\n pub local: AnalyzedLocalFieldReference,\n /// How many levels up the scope the field is at.\n /// 0 means the current scope.\n #[serde(skip_serializing_if = \"u32_is_zero\")]\n pub scope_up_level: u32,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedLocalCollectorReference {\n pub collector_idx: u32,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedCollectorReference {\n pub local: AnalyzedLocalCollectorReference,\n /// How many levels up the scope the field is at.\n /// 0 means the current scope.\n #[serde(skip_serializing_if = \"u32_is_zero\")]\n pub scope_up_level: u32,\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct AnalyzedStructMapping {\n pub fields: Vec,\n}\n\n#[derive(Debug, Clone, Serialize)]\n#[serde(tag = \"kind\")]\npub enum AnalyzedValueMapping {\n Constant { value: value::Value },\n Field(AnalyzedFieldReference),\n Struct(AnalyzedStructMapping),\n}\n\n#[derive(Debug, Clone)]\npub struct AnalyzedOpOutput {\n pub field_idx: u32,\n}\n\npub struct AnalyzedImportOp {\n pub name: String,\n pub executor: Box,\n pub output: AnalyzedOpOutput,\n pub primary_key_type: schema::ValueType,\n pub refresh_options: spec::SourceRefreshOptions,\n\n pub concurrency_controller: concur_control::CombinedConcurrencyController,\n}\n\npub struct AnalyzedFunctionExecInfo {\n pub enable_cache: bool,\n pub behavior_version: Option,\n\n /// Fingerprinter of the function's behavior.\n pub fingerprinter: Fingerprinter,\n /// To deserialize cached value.\n pub output_type: schema::ValueType,\n}\n\npub struct AnalyzedTransformOp {\n pub name: String,\n pub inputs: Vec,\n pub function_exec_info: AnalyzedFunctionExecInfo,\n pub executor: Box,\n pub output: AnalyzedOpOutput,\n}\n\npub struct AnalyzedForEachOp {\n pub name: String,\n pub local_field_ref: AnalyzedLocalFieldReference,\n pub op_scope: AnalyzedOpScope,\n pub concurrency_controller: concur_control::ConcurrencyController,\n}\n\npub struct AnalyzedCollectOp {\n pub name: String,\n pub has_auto_uuid_field: bool,\n pub input: AnalyzedStructMapping,\n pub collector_ref: AnalyzedCollectorReference,\n /// Fingerprinter of the collector's schema. Used to decide when to reuse auto-generated UUIDs.\n pub fingerprinter: Fingerprinter,\n}\n\npub enum AnalyzedPrimaryKeyDef {\n Fields(Vec),\n}\n\npub struct AnalyzedExportOp {\n pub name: String,\n pub input: AnalyzedLocalCollectorReference,\n pub export_target_factory: Arc,\n pub export_context: Arc,\n pub primary_key_def: AnalyzedPrimaryKeyDef,\n pub primary_key_type: schema::ValueType,\n /// idx for value fields - excluding the primary key field.\n pub value_fields: Vec,\n /// If true, value is never changed on the same primary key.\n /// This is guaranteed if the primary key contains auto-generated UUIDs.\n pub value_stable: bool,\n}\n\npub struct AnalyzedExportTargetOpGroup {\n pub target_factory: Arc,\n pub op_idx: Vec,\n}\n\npub enum AnalyzedReactiveOp {\n Transform(AnalyzedTransformOp),\n ForEach(AnalyzedForEachOp),\n Collect(AnalyzedCollectOp),\n}\n\npub struct AnalyzedOpScope {\n pub reactive_ops: Vec,\n pub collector_len: usize,\n}\n\npub struct ExecutionPlan {\n pub logic_fingerprint: Fingerprint,\n\n pub import_ops: Vec,\n pub op_scope: AnalyzedOpScope,\n pub export_ops: Vec,\n pub export_op_groups: Vec,\n}\n\npub struct TransientExecutionPlan {\n pub input_fields: Vec,\n pub op_scope: AnalyzedOpScope,\n pub output_value: AnalyzedValueMapping,\n}\n\nfn u32_is_zero(v: &u32) -> bool {\n *v == 0\n}\n"], ["/cocoindex/src/execution/db_tracking.rs", "use crate::prelude::*;\n\nuse super::{db_tracking_setup::TrackingTableSetupState, memoization::StoredMemoizationInfo};\nuse crate::utils::{db::WriteAction, fingerprint::Fingerprint};\nuse futures::Stream;\nuse serde::de::{self, Deserializer, SeqAccess, Visitor};\nuse serde::ser::SerializeSeq;\nuse sqlx::PgPool;\nuse std::fmt;\n\n#[derive(Debug, Clone)]\npub struct TrackedTargetKeyInfo {\n pub key: serde_json::Value,\n pub additional_key: serde_json::Value,\n pub process_ordinal: i64,\n pub fingerprint: Option,\n}\n\nimpl Serialize for TrackedTargetKeyInfo {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let mut seq = serializer.serialize_seq(None)?;\n seq.serialize_element(&self.key)?;\n seq.serialize_element(&self.process_ordinal)?;\n seq.serialize_element(&self.fingerprint)?;\n if !self.additional_key.is_null() {\n seq.serialize_element(&self.additional_key)?;\n }\n seq.end()\n }\n}\n\nimpl<'de> serde::Deserialize<'de> for TrackedTargetKeyInfo {\n fn deserialize(deserializer: D) -> Result\n where\n D: Deserializer<'de>,\n {\n struct TrackedTargetKeyVisitor;\n\n impl<'de> Visitor<'de> for TrackedTargetKeyVisitor {\n type Value = TrackedTargetKeyInfo;\n\n fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n formatter.write_str(\"a sequence of 3 or 4 elements for TrackedTargetKey\")\n }\n\n fn visit_seq(self, mut seq: A) -> Result\n where\n A: SeqAccess<'de>,\n {\n let target_key: serde_json::Value = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(0, &self))?;\n let process_ordinal: i64 = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(1, &self))?;\n let fingerprint: Option = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(2, &self))?;\n let additional_key: Option = seq.next_element()?;\n\n Ok(TrackedTargetKeyInfo {\n key: target_key,\n process_ordinal,\n fingerprint,\n additional_key: additional_key.unwrap_or(serde_json::Value::Null),\n })\n }\n }\n\n deserializer.deserialize_seq(TrackedTargetKeyVisitor)\n }\n}\n\n/// (source_id, target_key)\npub type TrackedTargetKeyForSource = Vec<(i32, Vec)>;\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForProcessing {\n pub memoization_info: Option>>,\n\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub max_process_ordinal: Option,\n pub process_ordinal: Option,\n}\n\npub async fn read_source_tracking_info_for_processing(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n pool: &PgPool,\n) -> Result> {\n let query_str = format!(\n \"SELECT memoization_info, processed_source_ordinal, process_logic_fingerprint, max_process_ordinal, process_ordinal FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(pool)\n .await?;\n\n Ok(tracking_info)\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForPrecommit {\n pub max_process_ordinal: i64,\n pub staging_target_keys: sqlx::types::Json,\n\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub process_ordinal: Option,\n pub target_keys: Option>,\n}\n\npub async fn read_source_tracking_info_for_precommit(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT max_process_ordinal, staging_target_keys, processed_source_ordinal, process_logic_fingerprint, process_ordinal, target_keys FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let precommit_tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(db_executor)\n .await?;\n\n Ok(precommit_tracking_info)\n}\n\n#[allow(clippy::too_many_arguments)]\npub async fn precommit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n max_process_ordinal: i64,\n staging_target_keys: TrackedTargetKeyForSource,\n memoization_info: Option<&StoredMemoizationInfo>,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {} (source_id, source_key, max_process_ordinal, staging_target_keys, memoization_info) VALUES ($1, $2, $3, $4, $5)\",\n db_setup.table_name\n ),\n WriteAction::Update => format!(\n \"UPDATE {} SET max_process_ordinal = $3, staging_target_keys = $4, memoization_info = $5 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n ),\n };\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(max_process_ordinal) // $3\n .bind(sqlx::types::Json(staging_target_keys)) // $4\n .bind(memoization_info.map(sqlx::types::Json)) // $5\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForCommit {\n pub staging_target_keys: sqlx::types::Json,\n pub process_ordinal: Option,\n}\n\npub async fn read_source_tracking_info_for_commit(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT staging_target_keys, process_ordinal FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let commit_tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(db_executor)\n .await?;\n Ok(commit_tracking_info)\n}\n\n#[allow(clippy::too_many_arguments)]\npub async fn commit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n staging_target_keys: TrackedTargetKeyForSource,\n processed_source_ordinal: Option,\n logic_fingerprint: &[u8],\n process_ordinal: i64,\n process_time_micros: i64,\n target_keys: TrackedTargetKeyForSource,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {} ( \\\n source_id, source_key, \\\n max_process_ordinal, staging_target_keys, \\\n processed_source_ordinal, process_logic_fingerprint, process_ordinal, process_time_micros, target_keys) \\\n VALUES ($1, $2, $6 + 1, $3, $4, $5, $6, $7, $8)\",\n db_setup.table_name\n ),\n WriteAction::Update => format!(\n \"UPDATE {} SET staging_target_keys = $3, processed_source_ordinal = $4, process_logic_fingerprint = $5, process_ordinal = $6, process_time_micros = $7, target_keys = $8 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n ),\n };\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(sqlx::types::Json(staging_target_keys)) // $3\n .bind(processed_source_ordinal) // $4\n .bind(logic_fingerprint) // $5\n .bind(process_ordinal) // $6\n .bind(process_time_micros) // $7\n .bind(sqlx::types::Json(target_keys)) // $8\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\npub async fn delete_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"DELETE FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n sqlx::query(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct TrackedSourceKeyMetadata {\n pub source_key: serde_json::Value,\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n}\n\npub struct ListTrackedSourceKeyMetadataState {\n query_str: String,\n}\n\nimpl ListTrackedSourceKeyMetadataState {\n pub fn new() -> Self {\n Self {\n query_str: String::new(),\n }\n }\n\n pub fn list<'a>(\n &'a mut self,\n source_id: i32,\n db_setup: &'a TrackingTableSetupState,\n pool: &'a PgPool,\n ) -> impl Stream> + 'a {\n self.query_str = format!(\n \"SELECT source_key, processed_source_ordinal, process_logic_fingerprint FROM {} WHERE source_id = $1\",\n db_setup.table_name\n );\n sqlx::query_as(&self.query_str).bind(source_id).fetch(pool)\n }\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceLastProcessedInfo {\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub process_time_micros: Option,\n}\n\npub async fn read_source_last_processed_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n pool: &PgPool,\n) -> Result> {\n let query_str = format!(\n \"SELECT processed_source_ordinal, process_logic_fingerprint, process_time_micros FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let last_processed_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(pool)\n .await?;\n Ok(last_processed_info)\n}\n\npub async fn update_source_tracking_ordinal(\n source_id: i32,\n source_key_json: &serde_json::Value,\n processed_source_ordinal: Option,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"UPDATE {} SET processed_source_ordinal = $3 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(processed_source_ordinal) // $3\n .execute(db_executor)\n .await?;\n Ok(())\n}\n"], ["/cocoindex/src/execution/row_indexer.rs", "use crate::prelude::*;\n\nuse futures::future::try_join_all;\nuse sqlx::PgPool;\nuse std::collections::{HashMap, HashSet};\n\nuse super::db_tracking::{self, TrackedTargetKeyInfo, read_source_tracking_info_for_processing};\nuse super::db_tracking_setup;\nuse super::evaluator::{\n EvaluateSourceEntryOutput, SourceRowEvaluationContext, evaluate_source_entry,\n};\nuse super::memoization::{EvaluationMemory, EvaluationMemoryOptions, StoredMemoizationInfo};\nuse super::stats;\n\nuse crate::base::value::{self, FieldValues, KeyValue};\nuse crate::builder::plan::*;\nuse crate::ops::interface::{\n ExportTargetMutation, ExportTargetUpsertEntry, Ordinal, SourceExecutorGetOptions,\n};\nuse crate::utils::db::WriteAction;\nuse crate::utils::fingerprint::{Fingerprint, Fingerprinter};\n\npub fn extract_primary_key(\n primary_key_def: &AnalyzedPrimaryKeyDef,\n record: &FieldValues,\n) -> Result {\n match primary_key_def {\n AnalyzedPrimaryKeyDef::Fields(fields) => {\n KeyValue::from_values(fields.iter().map(|field| &record.fields[*field]))\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]\npub enum SourceVersionKind {\n #[default]\n UnknownLogic,\n DifferentLogic,\n CurrentLogic,\n NonExistence,\n}\n\n#[derive(Debug, Clone, Default)]\npub struct SourceVersion {\n pub ordinal: Ordinal,\n pub kind: SourceVersionKind,\n}\n\nimpl SourceVersion {\n pub fn from_stored(\n stored_ordinal: Option,\n stored_fp: &Option>,\n curr_fp: Fingerprint,\n ) -> Self {\n Self {\n ordinal: Ordinal(stored_ordinal),\n kind: match &stored_fp {\n Some(stored_fp) => {\n if stored_fp.as_slice() == curr_fp.0.as_slice() {\n SourceVersionKind::CurrentLogic\n } else {\n SourceVersionKind::DifferentLogic\n }\n }\n None => SourceVersionKind::UnknownLogic,\n },\n }\n }\n\n pub fn from_stored_processing_info(\n info: &db_tracking::SourceTrackingInfoForProcessing,\n curr_fp: Fingerprint,\n ) -> Self {\n Self::from_stored(\n info.processed_source_ordinal,\n &info.process_logic_fingerprint,\n curr_fp,\n )\n }\n\n pub fn from_stored_precommit_info(\n info: &db_tracking::SourceTrackingInfoForPrecommit,\n curr_fp: Fingerprint,\n ) -> Self {\n Self::from_stored(\n info.processed_source_ordinal,\n &info.process_logic_fingerprint,\n curr_fp,\n )\n }\n\n pub fn from_current_with_ordinal(ordinal: Ordinal) -> Self {\n Self {\n ordinal,\n kind: SourceVersionKind::CurrentLogic,\n }\n }\n\n pub fn from_current_data(data: &interface::SourceData) -> Self {\n let kind = match &data.value {\n interface::SourceValue::Existence(_) => SourceVersionKind::CurrentLogic,\n interface::SourceValue::NonExistence => SourceVersionKind::NonExistence,\n };\n Self {\n ordinal: data.ordinal,\n kind,\n }\n }\n\n pub fn should_skip(\n &self,\n target: &SourceVersion,\n update_stats: Option<&stats::UpdateStats>,\n ) -> bool {\n // Ordinal indicates monotonic invariance - always respect ordinal order\n // Never process older ordinals to maintain consistency\n let should_skip = match (self.ordinal.0, target.ordinal.0) {\n (Some(existing_ordinal), Some(target_ordinal)) => {\n // Skip if target ordinal is older, or same ordinal with same/older logic version\n existing_ordinal > target_ordinal\n || (existing_ordinal == target_ordinal && self.kind >= target.kind)\n }\n _ => false,\n };\n if should_skip {\n if let Some(update_stats) = update_stats {\n update_stats.num_no_change.inc(1);\n }\n }\n should_skip\n }\n}\n\npub enum SkippedOr {\n Normal(T),\n Skipped(SourceVersion),\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\nstruct TargetKeyPair {\n pub key: serde_json::Value,\n pub additional_key: serde_json::Value,\n}\n\n#[derive(Default)]\nstruct TrackingInfoForTarget<'a> {\n export_op: Option<&'a AnalyzedExportOp>,\n\n // Existing keys info. Keyed by target key.\n // Will be removed after new rows for the same key are added into `new_staging_keys_info` and `mutation.upserts`,\n // hence all remaining ones are to be deleted.\n existing_staging_keys_info: HashMap)>>,\n existing_keys_info: HashMap)>>,\n\n // New keys info for staging.\n new_staging_keys_info: Vec,\n\n // Mutation to apply to the target storage.\n mutation: ExportTargetMutation,\n}\n\n#[derive(Debug)]\nstruct PrecommitData<'a> {\n evaluate_output: &'a EvaluateSourceEntryOutput,\n memoization_info: &'a StoredMemoizationInfo,\n}\nstruct PrecommitMetadata {\n source_entry_exists: bool,\n process_ordinal: i64,\n existing_process_ordinal: Option,\n new_target_keys: db_tracking::TrackedTargetKeyForSource,\n}\nstruct PrecommitOutput {\n metadata: PrecommitMetadata,\n target_mutations: HashMap,\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn precommit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n logic_fp: Fingerprint,\n data: Option>,\n process_timestamp: &chrono::DateTime,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n export_ops: &[AnalyzedExportOp],\n export_ops_exec_ctx: &[exec_ctx::ExportOpExecutionContext],\n update_stats: &stats::UpdateStats,\n pool: &PgPool,\n) -> Result> {\n let mut txn = pool.begin().await?;\n\n let tracking_info = db_tracking::read_source_tracking_info_for_precommit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n if let Some(tracking_info) = &tracking_info {\n let existing_source_version =\n SourceVersion::from_stored_precommit_info(tracking_info, logic_fp);\n if existing_source_version.should_skip(source_version, Some(update_stats)) {\n return Ok(SkippedOr::Skipped(existing_source_version));\n }\n }\n let tracking_info_exists = tracking_info.is_some();\n let process_ordinal = (tracking_info\n .as_ref()\n .map(|info| info.max_process_ordinal)\n .unwrap_or(0)\n + 1)\n .max(process_timestamp.timestamp_millis());\n let existing_process_ordinal = tracking_info.as_ref().and_then(|info| info.process_ordinal);\n\n let mut tracking_info_for_targets = HashMap::::new();\n for (export_op, export_op_exec_ctx) in\n std::iter::zip(export_ops.iter(), export_ops_exec_ctx.iter())\n {\n tracking_info_for_targets\n .entry(export_op_exec_ctx.target_id)\n .or_default()\n .export_op = Some(export_op);\n }\n\n // Collect `tracking_info_for_targets` from existing tracking info.\n if let Some(info) = tracking_info {\n let sqlx::types::Json(staging_target_keys) = info.staging_target_keys;\n for (target_id, keys_info) in staging_target_keys.into_iter() {\n let target_info = tracking_info_for_targets.entry(target_id).or_default();\n for key_info in keys_info.into_iter() {\n target_info\n .existing_staging_keys_info\n .entry(TargetKeyPair {\n key: key_info.key,\n additional_key: key_info.additional_key,\n })\n .or_default()\n .push((key_info.process_ordinal, key_info.fingerprint));\n }\n }\n\n if let Some(sqlx::types::Json(target_keys)) = info.target_keys {\n for (target_id, keys_info) in target_keys.into_iter() {\n let target_info = tracking_info_for_targets.entry(target_id).or_default();\n for key_info in keys_info.into_iter() {\n target_info\n .existing_keys_info\n .entry(TargetKeyPair {\n key: key_info.key,\n additional_key: key_info.additional_key,\n })\n .or_default()\n .push((key_info.process_ordinal, key_info.fingerprint));\n }\n }\n }\n }\n\n let mut new_target_keys_info = db_tracking::TrackedTargetKeyForSource::default();\n if let Some(data) = &data {\n for (export_op, export_op_exec_ctx) in\n std::iter::zip(export_ops.iter(), export_ops_exec_ctx.iter())\n {\n let target_info = tracking_info_for_targets\n .entry(export_op_exec_ctx.target_id)\n .or_default();\n let mut keys_info = Vec::new();\n let collected_values =\n &data.evaluate_output.collected_values[export_op.input.collector_idx as usize];\n for value in collected_values.iter() {\n let primary_key = extract_primary_key(&export_op.primary_key_def, value)?;\n let primary_key_json = serde_json::to_value(&primary_key)?;\n\n let mut field_values = FieldValues {\n fields: Vec::with_capacity(export_op.value_fields.len()),\n };\n for field in export_op.value_fields.iter() {\n field_values\n .fields\n .push(value.fields[*field as usize].clone());\n }\n let additional_key = export_op.export_target_factory.extract_additional_key(\n &primary_key,\n &field_values,\n export_op.export_context.as_ref(),\n )?;\n let target_key_pair = TargetKeyPair {\n key: primary_key_json,\n additional_key,\n };\n let existing_target_keys = target_info.existing_keys_info.remove(&target_key_pair);\n let existing_staging_target_keys = target_info\n .existing_staging_keys_info\n .remove(&target_key_pair);\n\n let curr_fp = if !export_op.value_stable {\n Some(\n Fingerprinter::default()\n .with(&field_values)?\n .into_fingerprint(),\n )\n } else {\n None\n };\n if existing_target_keys\n .as_ref()\n .map(|keys| !keys.is_empty() && keys.iter().all(|(_, fp)| fp == &curr_fp))\n .unwrap_or(false)\n && existing_staging_target_keys\n .map(|keys| keys.iter().all(|(_, fp)| fp == &curr_fp))\n .unwrap_or(true)\n {\n // Already exists, with exactly the same value fingerprint.\n // Nothing need to be changed, except carrying over the existing target keys info.\n let (existing_ordinal, existing_fp) = existing_target_keys\n .ok_or_else(invariance_violation)?\n .into_iter()\n .next()\n .ok_or_else(invariance_violation)?;\n keys_info.push(TrackedTargetKeyInfo {\n key: target_key_pair.key,\n additional_key: target_key_pair.additional_key,\n process_ordinal: existing_ordinal,\n fingerprint: existing_fp,\n });\n } else {\n // Entry with new value. Needs to be upserted.\n let tracked_target_key = TrackedTargetKeyInfo {\n key: target_key_pair.key.clone(),\n additional_key: target_key_pair.additional_key.clone(),\n process_ordinal,\n fingerprint: curr_fp,\n };\n target_info.mutation.upserts.push(ExportTargetUpsertEntry {\n key: primary_key,\n additional_key: target_key_pair.additional_key,\n value: field_values,\n });\n target_info\n .new_staging_keys_info\n .push(tracked_target_key.clone());\n keys_info.push(tracked_target_key);\n }\n }\n new_target_keys_info.push((export_op_exec_ctx.target_id, keys_info));\n }\n }\n\n let mut new_staging_target_keys = db_tracking::TrackedTargetKeyForSource::default();\n let mut target_mutations = HashMap::with_capacity(export_ops.len());\n for (target_id, target_tracking_info) in tracking_info_for_targets.into_iter() {\n let legacy_keys: HashSet = target_tracking_info\n .existing_keys_info\n .into_keys()\n .chain(target_tracking_info.existing_staging_keys_info.into_keys())\n .collect();\n\n let mut new_staging_keys_info = target_tracking_info.new_staging_keys_info;\n // Add tracking info for deletions.\n new_staging_keys_info.extend(legacy_keys.iter().map(|key| TrackedTargetKeyInfo {\n key: key.key.clone(),\n additional_key: key.additional_key.clone(),\n process_ordinal,\n fingerprint: None,\n }));\n new_staging_target_keys.push((target_id, new_staging_keys_info));\n\n if let Some(export_op) = target_tracking_info.export_op {\n let mut mutation = target_tracking_info.mutation;\n mutation.deletes.reserve(legacy_keys.len());\n for legacy_key in legacy_keys.into_iter() {\n let key = value::Value::::from_json(\n legacy_key.key,\n &export_op.primary_key_type,\n )?\n .as_key()?;\n mutation.deletes.push(interface::ExportTargetDeleteEntry {\n key,\n additional_key: legacy_key.additional_key,\n });\n }\n target_mutations.insert(target_id, mutation);\n }\n }\n\n db_tracking::precommit_source_tracking_info(\n source_id,\n source_key_json,\n process_ordinal,\n new_staging_target_keys,\n data.as_ref().map(|data| data.memoization_info),\n db_setup,\n &mut *txn,\n if tracking_info_exists {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n )\n .await?;\n\n txn.commit().await?;\n\n Ok(SkippedOr::Normal(PrecommitOutput {\n metadata: PrecommitMetadata {\n source_entry_exists: data.is_some(),\n process_ordinal,\n existing_process_ordinal,\n new_target_keys: new_target_keys_info,\n },\n target_mutations,\n }))\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn commit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n logic_fingerprint: &[u8],\n precommit_metadata: PrecommitMetadata,\n process_timestamp: &chrono::DateTime,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n pool: &PgPool,\n) -> Result<()> {\n let mut txn = pool.begin().await?;\n\n let tracking_info = db_tracking::read_source_tracking_info_for_commit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n let tracking_info_exists = tracking_info.is_some();\n if tracking_info.as_ref().and_then(|info| info.process_ordinal)\n >= Some(precommit_metadata.process_ordinal)\n {\n return Ok(());\n }\n\n let cleaned_staging_target_keys = tracking_info\n .map(|info| {\n let sqlx::types::Json(staging_target_keys) = info.staging_target_keys;\n staging_target_keys\n .into_iter()\n .filter_map(|(target_id, target_keys)| {\n let cleaned_target_keys: Vec<_> = target_keys\n .into_iter()\n .filter(|key_info| {\n Some(key_info.process_ordinal)\n > precommit_metadata.existing_process_ordinal\n && key_info.process_ordinal != precommit_metadata.process_ordinal\n })\n .collect();\n if !cleaned_target_keys.is_empty() {\n Some((target_id, cleaned_target_keys))\n } else {\n None\n }\n })\n .collect::>()\n })\n .unwrap_or_default();\n if !precommit_metadata.source_entry_exists && cleaned_staging_target_keys.is_empty() {\n // TODO: When we support distributed execution in the future, we'll need to leave a tombstone for a while\n // to prevent an earlier update causing the record reappear because of out-of-order processing.\n if tracking_info_exists {\n db_tracking::delete_source_tracking_info(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n }\n } else {\n db_tracking::commit_source_tracking_info(\n source_id,\n source_key_json,\n cleaned_staging_target_keys,\n source_version.ordinal.into(),\n logic_fingerprint,\n precommit_metadata.process_ordinal,\n process_timestamp.timestamp_micros(),\n precommit_metadata.new_target_keys,\n db_setup,\n &mut *txn,\n if tracking_info_exists {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n )\n .await?;\n }\n\n txn.commit().await?;\n\n Ok(())\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn try_content_hash_optimization(\n source_id: i32,\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n current_hash: &crate::utils::fingerprint::Fingerprint,\n tracking_info: &db_tracking::SourceTrackingInfoForProcessing,\n existing_version: &Option,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n update_stats: &stats::UpdateStats,\n pool: &PgPool,\n) -> Result>> {\n // Check if we can use content hash optimization\n if existing_version\n .as_ref()\n .is_none_or(|v| v.kind != SourceVersionKind::CurrentLogic)\n {\n return Ok(None);\n }\n\n if tracking_info\n .max_process_ordinal\n .zip(tracking_info.process_ordinal)\n .is_none_or(|(max_ord, proc_ord)| max_ord != proc_ord)\n {\n return Ok(None);\n }\n\n let existing_hash = tracking_info\n .memoization_info\n .as_ref()\n .and_then(|info| info.0.as_ref())\n .and_then(|stored_info| stored_info.content_hash.as_ref());\n\n if existing_hash != Some(current_hash) {\n return Ok(None);\n }\n\n // Content hash matches - try optimization\n let mut txn = pool.begin().await?;\n\n let current_tracking_info = db_tracking::read_source_tracking_info_for_precommit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n\n let Some(current_tracking_info) = current_tracking_info else {\n return Ok(None);\n };\n\n // Check 1: Same check as precommit - verify no newer version exists\n let current_source_version = SourceVersion::from_stored_precommit_info(\n ¤t_tracking_info,\n src_eval_ctx.plan.logic_fingerprint,\n );\n if current_source_version.should_skip(source_version, Some(update_stats)) {\n return Ok(Some(SkippedOr::Skipped(current_source_version)));\n }\n\n // Check 2: Verify process_ordinal hasn't changed (no concurrent processing)\n let original_process_ordinal = tracking_info.process_ordinal;\n if current_tracking_info.process_ordinal != original_process_ordinal {\n return Ok(None);\n }\n\n // Safe to apply optimization - just update tracking table\n db_tracking::update_source_tracking_ordinal(\n source_id,\n source_key_json,\n source_version.ordinal.0,\n db_setup,\n &mut *txn,\n )\n .await?;\n\n txn.commit().await?;\n update_stats.num_no_change.inc(1);\n Ok(Some(SkippedOr::Normal(())))\n}\n\npub async fn evaluate_source_entry_with_memory(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n options: EvaluationMemoryOptions,\n pool: &PgPool,\n) -> Result> {\n let stored_info = if options.enable_cache || !options.evaluation_only {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let source_id = setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id;\n let existing_tracking_info = read_source_tracking_info_for_processing(\n source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n existing_tracking_info\n .and_then(|info| info.memoization_info.map(|info| info.0))\n .flatten()\n } else {\n None\n };\n let memory = EvaluationMemory::new(chrono::Utc::now(), stored_info, options);\n let source_value = src_eval_ctx\n .import_op\n .executor\n .get_value(\n src_eval_ctx.key,\n &SourceExecutorGetOptions {\n include_value: true,\n include_ordinal: false,\n },\n )\n .await?\n .value\n .ok_or_else(|| anyhow::anyhow!(\"value not returned\"))?;\n let output = match source_value {\n interface::SourceValue::Existence(source_value) => {\n Some(evaluate_source_entry(src_eval_ctx, source_value, &memory).await?)\n }\n interface::SourceValue::NonExistence => None,\n };\n Ok(output)\n}\n\npub async fn update_source_row(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n source_value: interface::SourceValue,\n source_version: &SourceVersion,\n pool: &PgPool,\n update_stats: &stats::UpdateStats,\n) -> Result> {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let process_time = chrono::Utc::now();\n let source_id = setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id;\n\n // Phase 1: Check existing tracking info and apply optimizations\n let existing_tracking_info = read_source_tracking_info_for_processing(\n source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n\n let existing_version = match &existing_tracking_info {\n Some(info) => {\n let existing_version = SourceVersion::from_stored_processing_info(\n info,\n src_eval_ctx.plan.logic_fingerprint,\n );\n\n // First check ordinal-based skipping\n if existing_version.should_skip(source_version, Some(update_stats)) {\n return Ok(SkippedOr::Skipped(existing_version));\n }\n\n Some(existing_version)\n }\n None => None,\n };\n\n // Compute content hash once if needed for both optimization and evaluation\n let current_content_hash = match &source_value {\n interface::SourceValue::Existence(source_value) => Some(\n Fingerprinter::default()\n .with(source_value)?\n .into_fingerprint(),\n ),\n interface::SourceValue::NonExistence => None,\n };\n\n if let (Some(current_hash), Some(existing_tracking_info)) =\n (¤t_content_hash, &existing_tracking_info)\n {\n if let Some(optimization_result) = try_content_hash_optimization(\n source_id,\n src_eval_ctx,\n &source_key_json,\n source_version,\n current_hash,\n existing_tracking_info,\n &existing_version,\n &setup_execution_ctx.setup_state.tracking_table,\n update_stats,\n pool,\n )\n .await?\n {\n return Ok(optimization_result);\n }\n }\n\n let (output, stored_mem_info) = {\n let extracted_memoization_info = existing_tracking_info\n .and_then(|info| info.memoization_info)\n .and_then(|info| info.0);\n\n match source_value {\n interface::SourceValue::Existence(source_value) => {\n let evaluation_memory = EvaluationMemory::new(\n process_time,\n extracted_memoization_info,\n EvaluationMemoryOptions {\n enable_cache: true,\n evaluation_only: false,\n },\n );\n\n let output =\n evaluate_source_entry(src_eval_ctx, source_value, &evaluation_memory).await?;\n let mut stored_info = evaluation_memory.into_stored()?;\n stored_info.content_hash = current_content_hash;\n\n (Some(output), stored_info)\n }\n interface::SourceValue::NonExistence => (None, Default::default()),\n }\n };\n\n // Phase 2 (precommit): Update with the memoization info and stage target keys.\n let precommit_output = precommit_source_tracking_info(\n source_id,\n &source_key_json,\n source_version,\n src_eval_ctx.plan.logic_fingerprint,\n output.as_ref().map(|scope_value| PrecommitData {\n evaluate_output: scope_value,\n memoization_info: &stored_mem_info,\n }),\n &process_time,\n &setup_execution_ctx.setup_state.tracking_table,\n &src_eval_ctx.plan.export_ops,\n &setup_execution_ctx.export_ops,\n update_stats,\n pool,\n )\n .await?;\n let precommit_output = match precommit_output {\n SkippedOr::Normal(output) => output,\n SkippedOr::Skipped(source_version) => return Ok(SkippedOr::Skipped(source_version)),\n };\n\n // Phase 3: Apply changes to the target storage, including upserting new target records and removing existing ones.\n let mut target_mutations = precommit_output.target_mutations;\n let apply_futs = src_eval_ctx\n .plan\n .export_op_groups\n .iter()\n .filter_map(|export_op_group| {\n let mutations_w_ctx: Vec<_> = export_op_group\n .op_idx\n .iter()\n .filter_map(|export_op_idx| {\n let export_op = &src_eval_ctx.plan.export_ops[*export_op_idx];\n target_mutations\n .remove(&setup_execution_ctx.export_ops[*export_op_idx].target_id)\n .filter(|m| !m.is_empty())\n .map(|mutation| interface::ExportTargetMutationWithContext {\n mutation,\n export_context: export_op.export_context.as_ref(),\n })\n })\n .collect();\n (!mutations_w_ctx.is_empty()).then(|| {\n export_op_group\n .target_factory\n .apply_mutation(mutations_w_ctx)\n })\n });\n\n // TODO: Handle errors.\n try_join_all(apply_futs).await?;\n\n // Phase 4: Update the tracking record.\n commit_source_tracking_info(\n source_id,\n &source_key_json,\n source_version,\n &src_eval_ctx.plan.logic_fingerprint.0,\n precommit_output.metadata,\n &process_time,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n\n if let Some(existing_version) = existing_version {\n if output.is_some() {\n if !source_version.ordinal.is_available()\n || source_version.ordinal != existing_version.ordinal\n {\n update_stats.num_updates.inc(1);\n } else {\n update_stats.num_reprocesses.inc(1);\n }\n } else {\n update_stats.num_deletions.inc(1);\n }\n } else if output.is_some() {\n update_stats.num_insertions.inc(1);\n }\n\n Ok(SkippedOr::Normal(()))\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_github_actions_scenario_ordinal_behavior() {\n // Test ordinal-based behavior - should_skip only cares about ordinal monotonic invariance\n // Content hash optimization is handled at update_source_row level\n\n let processed_version = SourceVersion {\n ordinal: Ordinal(Some(1000)), // Original timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // GitHub Actions checkout: timestamp changes but content same\n let after_checkout_version = SourceVersion {\n ordinal: Ordinal(Some(2000)), // New timestamp after checkout\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Should NOT skip at should_skip level (ordinal is newer - monotonic invariance)\n // Content hash optimization happens at update_source_row level to update only tracking\n assert!(!processed_version.should_skip(&after_checkout_version, None));\n\n // Reverse case: if we somehow get an older ordinal, always skip\n assert!(after_checkout_version.should_skip(&processed_version, None));\n\n // Now simulate actual content change\n let content_changed_version = SourceVersion {\n ordinal: Ordinal(Some(3000)), // Even newer timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Should NOT skip processing (ordinal is newer)\n assert!(!processed_version.should_skip(&content_changed_version, None));\n }\n\n #[test]\n fn test_content_hash_computation() {\n use crate::base::value::{BasicValue, FieldValues, Value};\n use crate::utils::fingerprint::Fingerprinter;\n\n // Test that content hash is computed correctly from source data\n let source_data1 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"Hello\".into())),\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let source_data2 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"Hello\".into())),\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let source_data3 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"World\".into())), // Different content\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let hash1 = Fingerprinter::default()\n .with(&source_data1)\n .unwrap()\n .into_fingerprint();\n\n let hash2 = Fingerprinter::default()\n .with(&source_data2)\n .unwrap()\n .into_fingerprint();\n\n let hash3 = Fingerprinter::default()\n .with(&source_data3)\n .unwrap()\n .into_fingerprint();\n\n // Same content should produce same hash\n assert_eq!(hash1, hash2);\n\n // Different content should produce different hash\n assert_ne!(hash1, hash3);\n assert_ne!(hash2, hash3);\n }\n\n #[test]\n fn test_github_actions_content_hash_optimization_requirements() {\n // This test documents the exact requirements for GitHub Actions scenario\n // where file modification times change but content remains the same\n\n use crate::utils::fingerprint::Fingerprinter;\n\n // Simulate file content that remains the same across GitHub Actions checkout\n let file_content = \"const hello = 'world';\\nexport default hello;\";\n\n // Hash before checkout (original file)\n let hash_before_checkout = Fingerprinter::default()\n .with(&file_content)\n .unwrap()\n .into_fingerprint();\n\n // Hash after checkout (same content, different timestamp)\n let hash_after_checkout = Fingerprinter::default()\n .with(&file_content)\n .unwrap()\n .into_fingerprint();\n\n // Content hashes must be identical for optimization to work\n assert_eq!(\n hash_before_checkout, hash_after_checkout,\n \"Content hash optimization requires identical hashes for same content\"\n );\n\n // Test with slightly different content (should produce different hashes)\n let modified_content = \"const hello = 'world!';\\nexport default hello;\"; // Added !\n let hash_modified = Fingerprinter::default()\n .with(&modified_content)\n .unwrap()\n .into_fingerprint();\n\n assert_ne!(\n hash_before_checkout, hash_modified,\n \"Different content should produce different hashes\"\n );\n }\n\n #[test]\n fn test_github_actions_ordinal_behavior_with_content_optimization() {\n // Test the complete GitHub Actions scenario:\n // 1. File processed with ordinal=1000, content_hash=ABC\n // 2. GitHub Actions checkout: ordinal=2000, content_hash=ABC (same content)\n // 3. Should use content hash optimization (update only tracking, skip evaluation)\n\n let original_processing = SourceVersion {\n ordinal: Ordinal(Some(1000)), // Original file timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n let after_github_checkout = SourceVersion {\n ordinal: Ordinal(Some(2000)), // New timestamp after checkout\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Step 1: Ordinal check should NOT skip (newer ordinal means potential processing needed)\n assert!(\n !original_processing.should_skip(&after_github_checkout, None),\n \"GitHub Actions: newer ordinal should not be skipped at ordinal level\"\n );\n\n // Step 2: Content hash optimization should trigger when content is same\n // This is tested in the integration level - the optimization path should:\n // - Compare content hashes\n // - If same: update only tracking info (process_ordinal, process_time)\n // - Skip expensive evaluation and target storage updates\n\n // Step 3: After optimization, tracking shows the new ordinal\n let after_optimization = SourceVersion {\n ordinal: Ordinal(Some(2000)), // Updated to new ordinal\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Future requests with same ordinal should be skipped\n assert!(\n after_optimization.should_skip(&after_github_checkout, None),\n \"After optimization, same ordinal should be skipped\"\n );\n }\n}\n"], ["/cocoindex/src/ops/functions/test_utils.rs", "use crate::builder::plan::{\n AnalyzedFieldReference, AnalyzedLocalFieldReference, AnalyzedValueMapping,\n};\nuse crate::ops::sdk::{\n AuthRegistry, BasicValueType, EnrichedValueType, FlowInstanceContext, OpArgSchema,\n OpArgsResolver, SimpleFunctionExecutor, SimpleFunctionFactoryBase, Value, make_output_type,\n};\nuse anyhow::Result;\nuse serde::de::DeserializeOwned;\nuse std::sync::Arc;\n\n// This function builds an argument schema for a flow function.\npub fn build_arg_schema(\n name: &str,\n value_type: BasicValueType,\n) -> (Option<&str>, EnrichedValueType) {\n (Some(name), make_output_type(value_type))\n}\n\n// This function tests a flow function by providing a spec, input argument schemas, and values.\npub async fn test_flow_function(\n factory: Arc,\n spec: S,\n input_arg_schemas: Vec<(Option<&str>, EnrichedValueType)>,\n input_arg_values: Vec,\n) -> Result\nwhere\n S: DeserializeOwned + Send + Sync + 'static,\n R: Send + Sync + 'static,\n F: SimpleFunctionFactoryBase + ?Sized,\n{\n // 1. Construct OpArgSchema\n let op_arg_schemas: Vec = input_arg_schemas\n .into_iter()\n .enumerate()\n .map(|(idx, (name, value_type))| OpArgSchema {\n name: name.map_or(crate::base::spec::OpArgName(None), |n| {\n crate::base::spec::OpArgName(Some(n.to_string()))\n }),\n value_type,\n analyzed_value: AnalyzedValueMapping::Field(AnalyzedFieldReference {\n local: AnalyzedLocalFieldReference {\n fields_idx: vec![idx as u32],\n },\n scope_up_level: 0,\n }),\n })\n .collect();\n\n // 2. Resolve Schema & Args\n let mut args_resolver = OpArgsResolver::new(&op_arg_schemas)?;\n let context = Arc::new(FlowInstanceContext {\n flow_instance_name: \"test_flow_function\".to_string(),\n auth_registry: Arc::new(AuthRegistry::default()),\n py_exec_ctx: None,\n });\n\n let (resolved_args_from_schema, _output_schema): (R, EnrichedValueType) = factory\n .resolve_schema(&spec, &mut args_resolver, &context)\n .await?;\n\n args_resolver.done()?;\n\n // 3. Build Executor\n let executor: Box = factory\n .build_executor(spec, resolved_args_from_schema, Arc::clone(&context))\n .await?;\n\n // 4. Evaluate\n let result = executor.evaluate(input_arg_values).await?;\n\n Ok(result)\n}\n"], ["/cocoindex/src/ops/sdk.rs", "pub(crate) use crate::prelude::*;\n\nuse crate::builder::plan::AnalyzedFieldReference;\nuse crate::builder::plan::AnalyzedLocalFieldReference;\nuse std::collections::BTreeMap;\n\npub use super::factory_bases::*;\npub use super::interface::*;\npub use crate::base::schema::*;\npub use crate::base::spec::*;\npub use crate::base::value::*;\n\n// Disambiguate the ExportTargetBuildOutput type.\npub use super::factory_bases::TypedExportDataCollectionBuildOutput;\npub use super::registry::ExecutorFactoryRegistry;\n/// Defined for all types convertible to ValueType, to ease creation for ValueType in various operation factories.\npub trait TypeCore {\n fn into_type(self) -> ValueType;\n}\n\nimpl TypeCore for BasicValueType {\n fn into_type(self) -> ValueType {\n ValueType::Basic(self)\n }\n}\n\nimpl TypeCore for StructSchema {\n fn into_type(self) -> ValueType {\n ValueType::Struct(self)\n }\n}\n\nimpl TypeCore for TableSchema {\n fn into_type(self) -> ValueType {\n ValueType::Table(self)\n }\n}\n\npub fn make_output_type(value_type: Type) -> EnrichedValueType {\n EnrichedValueType {\n typ: value_type.into_type(),\n attrs: Default::default(),\n nullable: false,\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct EmptySpec {}\n\n#[macro_export]\nmacro_rules! fields_value {\n ($($field:expr), +) => {\n $crate::base::value::FieldValues { fields: std::vec![ $(($field).into()),+ ] }\n };\n}\n\npub struct SchemaBuilderFieldRef(AnalyzedLocalFieldReference);\n\nimpl SchemaBuilderFieldRef {\n pub fn to_field_ref(&self) -> AnalyzedFieldReference {\n AnalyzedFieldReference {\n local: self.0.clone(),\n scope_up_level: 0,\n }\n }\n}\npub struct StructSchemaBuilder<'a> {\n base_fields_idx: Vec,\n target: &'a mut StructSchema,\n}\n\nimpl<'a> StructSchemaBuilder<'a> {\n pub fn new(target: &'a mut StructSchema) -> Self {\n Self {\n base_fields_idx: Vec::new(),\n target,\n }\n }\n\n pub fn _set_description(&mut self, description: impl Into>) {\n self.target.description = Some(description.into());\n }\n\n pub fn add_field(&mut self, field_schema: FieldSchema) -> SchemaBuilderFieldRef {\n let current_idx = self.target.fields.len() as u32;\n Arc::make_mut(&mut self.target.fields).push(field_schema);\n let mut fields_idx = self.base_fields_idx.clone();\n fields_idx.push(current_idx);\n SchemaBuilderFieldRef(AnalyzedLocalFieldReference { fields_idx })\n }\n\n pub fn _add_struct_field(\n &mut self,\n name: impl Into,\n nullable: bool,\n attrs: Arc>,\n ) -> (StructSchemaBuilder<'_>, SchemaBuilderFieldRef) {\n let field_schema = FieldSchema::new(\n name.into(),\n EnrichedValueType {\n typ: ValueType::Struct(StructSchema {\n fields: Arc::new(Vec::new()),\n description: None,\n }),\n nullable,\n attrs,\n },\n );\n let local_ref = self.add_field(field_schema);\n let struct_schema = match &mut Arc::make_mut(&mut self.target.fields)\n .last_mut()\n .unwrap()\n .value_type\n .typ\n {\n ValueType::Struct(s) => s,\n _ => unreachable!(),\n };\n (\n StructSchemaBuilder {\n base_fields_idx: local_ref.0.fields_idx.clone(),\n target: struct_schema,\n },\n local_ref,\n )\n }\n}\n"], ["/cocoindex/src/execution/db_tracking_setup.rs", "use crate::prelude::*;\n\nuse crate::setup::{CombinedState, ResourceSetupInfo, ResourceSetupStatus, SetupChangeType};\nuse serde::{Deserialize, Serialize};\nuse sqlx::PgPool;\n\npub fn default_tracking_table_name(flow_name: &str) -> String {\n format!(\n \"{}__cocoindex_tracking\",\n utils::db::sanitize_identifier(flow_name)\n )\n}\n\npub const CURRENT_TRACKING_TABLE_VERSION: i32 = 1;\n\nasync fn upgrade_tracking_table(\n pool: &PgPool,\n table_name: &str,\n existing_version_id: i32,\n target_version_id: i32,\n) -> Result<()> {\n if existing_version_id < 1 && target_version_id >= 1 {\n let query = format!(\n \"CREATE TABLE IF NOT EXISTS {table_name} (\n source_id INTEGER NOT NULL,\n source_key JSONB NOT NULL,\n\n -- Update in the precommit phase: after evaluation done, before really applying the changes to the target storage.\n max_process_ordinal BIGINT NOT NULL,\n staging_target_keys JSONB NOT NULL,\n memoization_info JSONB,\n\n -- Update after applying the changes to the target storage.\n processed_source_ordinal BIGINT,\n process_logic_fingerprint BYTEA,\n process_ordinal BIGINT,\n process_time_micros BIGINT,\n target_keys JSONB,\n\n PRIMARY KEY (source_id, source_key)\n );\",\n );\n sqlx::query(&query).execute(pool).await?;\n }\n\n Ok(())\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TrackingTableSetupState {\n pub table_name: String,\n pub version_id: i32,\n}\n\n#[derive(Debug)]\npub struct TrackingTableSetupStatus {\n pub desired_state: Option,\n\n pub legacy_table_names: Vec,\n\n pub min_existing_version_id: Option,\n pub source_ids_to_delete: Vec,\n}\n\nimpl TrackingTableSetupStatus {\n pub fn new(\n desired: Option<&TrackingTableSetupState>,\n existing: &CombinedState,\n source_ids_to_delete: Vec,\n ) -> Option {\n let legacy_table_names = existing\n .legacy_values(desired, |v| &v.table_name)\n .into_iter()\n .cloned()\n .collect();\n let min_existing_version_id = existing\n .always_exists()\n .then(|| existing.possible_versions().map(|v| v.version_id).min())\n .flatten();\n if desired.is_some() || min_existing_version_id.is_some() {\n Some(Self {\n desired_state: desired.cloned(),\n legacy_table_names,\n min_existing_version_id,\n source_ids_to_delete,\n })\n } else {\n None\n }\n }\n\n pub fn into_setup_info(\n self,\n ) -> ResourceSetupInfo<(), TrackingTableSetupState, TrackingTableSetupStatus> {\n ResourceSetupInfo {\n key: (),\n state: self.desired_state.clone(),\n description: \"Tracking Table\".to_string(),\n setup_status: Some(self),\n legacy_key: None,\n }\n }\n}\n\nimpl ResourceSetupStatus for TrackingTableSetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut changes: Vec = vec![];\n if self.desired_state.is_some() && !self.legacy_table_names.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Rename legacy tracking tables: {}. \",\n self.legacy_table_names.join(\", \")\n )));\n }\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(state)) => {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Create the tracking table: {}. \",\n state.table_name\n )));\n }\n (Some(min_version_id), Some(desired)) => {\n if min_version_id < desired.version_id {\n changes.push(setup::ChangeDescription::Action(\n \"Update the tracking table. \".into(),\n ));\n }\n }\n (Some(_), None) => changes.push(setup::ChangeDescription::Action(format!(\n \"Drop existing tracking table: {}. \",\n self.legacy_table_names.join(\", \")\n ))),\n (None, None) => (),\n }\n if !self.source_ids_to_delete.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Delete source IDs: {}. \",\n self.source_ids_to_delete\n .iter()\n .map(|id| id.to_string())\n .collect::>()\n .join(\", \")\n )));\n }\n changes\n }\n\n fn change_type(&self) -> SetupChangeType {\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(_)) => SetupChangeType::Create,\n (Some(min_version_id), Some(desired)) => {\n if min_version_id == desired.version_id && self.legacy_table_names.is_empty() {\n SetupChangeType::NoChange\n } else if min_version_id < desired.version_id {\n SetupChangeType::Update\n } else {\n SetupChangeType::Invalid\n }\n }\n (Some(_), None) => SetupChangeType::Delete,\n (None, None) => SetupChangeType::NoChange,\n }\n }\n}\n\nimpl TrackingTableSetupStatus {\n pub async fn apply_change(&self) -> Result<()> {\n let lib_context = get_lib_context()?;\n let pool = lib_context.require_builtin_db_pool()?;\n if let Some(desired) = &self.desired_state {\n for lagacy_name in self.legacy_table_names.iter() {\n let query = format!(\n \"ALTER TABLE IF EXISTS {} RENAME TO {}\",\n lagacy_name, desired.table_name\n );\n sqlx::query(&query).execute(pool).await?;\n }\n\n if self.min_existing_version_id != Some(desired.version_id) {\n upgrade_tracking_table(\n pool,\n &desired.table_name,\n self.min_existing_version_id.unwrap_or(0),\n desired.version_id,\n )\n .await?;\n }\n } else {\n for lagacy_name in self.legacy_table_names.iter() {\n let query = format!(\"DROP TABLE IF EXISTS {lagacy_name}\");\n sqlx::query(&query).execute(pool).await?;\n }\n return Ok(());\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/execution/live_updater.rs", "use crate::{execution::stats::UpdateStats, prelude::*};\n\nuse super::stats;\nuse futures::future::try_join_all;\nuse sqlx::PgPool;\nuse tokio::{sync::watch, task::JoinSet, time::MissedTickBehavior};\n\npub struct FlowLiveUpdaterUpdates {\n pub active_sources: Vec,\n pub updated_sources: Vec,\n}\nstruct FlowLiveUpdaterStatus {\n pub active_source_idx: BTreeSet,\n pub source_updates_num: Vec,\n}\n\nstruct UpdateReceiveState {\n status_rx: watch::Receiver,\n last_num_source_updates: Vec,\n is_done: bool,\n}\n\npub struct FlowLiveUpdater {\n flow_ctx: Arc,\n join_set: Mutex>>>,\n stats_per_task: Vec>,\n recv_state: tokio::sync::Mutex,\n num_remaining_tasks_rx: watch::Receiver,\n\n // Hold tx to avoid dropping the sender.\n _status_tx: watch::Sender,\n _num_remaining_tasks_tx: watch::Sender,\n}\n\n#[derive(Debug, Clone, Default, Serialize, Deserialize)]\npub struct FlowLiveUpdaterOptions {\n /// If true, the updater will keep refreshing the index.\n /// Otherwise, it will only apply changes from the source up to the current time.\n pub live_mode: bool,\n\n /// If true, stats will be printed to the console.\n pub print_stats: bool,\n}\n\nconst REPORT_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10);\n\nstruct SharedAckFn Result<()>> {\n count: usize,\n ack_fn: Option,\n}\n\nimpl Result<()>> SharedAckFn {\n fn new(count: usize, ack_fn: AckAsyncFn) -> Self {\n Self {\n count,\n ack_fn: Some(ack_fn),\n }\n }\n\n async fn ack(v: &Mutex) -> Result<()> {\n let ack_fn = {\n let mut v = v.lock().unwrap();\n v.count -= 1;\n if v.count > 0 { None } else { v.ack_fn.take() }\n };\n if let Some(ack_fn) = ack_fn {\n ack_fn().await?;\n }\n Ok(())\n }\n}\n\nstruct SourceUpdateTask {\n source_idx: usize,\n\n flow: Arc,\n plan: Arc,\n execution_ctx: Arc>,\n source_update_stats: Arc,\n pool: PgPool,\n options: FlowLiveUpdaterOptions,\n\n status_tx: watch::Sender,\n num_remaining_tasks_tx: watch::Sender,\n}\n\nimpl Drop for SourceUpdateTask {\n fn drop(&mut self) {\n self.status_tx.send_modify(|update| {\n update.active_source_idx.remove(&self.source_idx);\n });\n self.num_remaining_tasks_tx.send_modify(|update| {\n *update -= 1;\n });\n }\n}\n\nimpl SourceUpdateTask {\n async fn run(self) -> Result<()> {\n let source_idx = self.source_idx;\n let source_context = self\n .execution_ctx\n .get_source_indexing_context(&self.flow, source_idx, &self.pool)\n .await?;\n\n let import_op = &self.plan.import_ops[source_idx];\n\n let report_stats = |stats: &stats::UpdateStats, kind: &str| {\n self.source_update_stats.merge(stats);\n if self.options.print_stats {\n println!(\n \"{}.{} ({kind}): {}\",\n self.flow.flow_instance.name, import_op.name, stats\n );\n } else {\n trace!(\n \"{}.{} ({kind}): {}\",\n self.flow.flow_instance.name, import_op.name, stats\n );\n }\n };\n\n let mut futs: Vec>> = Vec::new();\n\n // Deal with change streams.\n if self.options.live_mode {\n if let Some(change_stream) = import_op.executor.change_stream().await? {\n let change_stream_stats = Arc::new(stats::UpdateStats::default());\n futs.push(\n {\n let change_stream_stats = change_stream_stats.clone();\n let pool = self.pool.clone();\n let status_tx = self.status_tx.clone();\n async move {\n let mut change_stream = change_stream;\n let retry_options = retryable::RetryOptions {\n max_retries: None,\n initial_backoff: std::time::Duration::from_secs(5),\n max_backoff: std::time::Duration::from_secs(60),\n };\n loop {\n // Workaround as AsyncFnMut isn't mature yet.\n // Should be changed to use AsyncFnMut once it is.\n let change_stream = tokio::sync::Mutex::new(&mut change_stream);\n let change_msg = retryable::run(\n || async {\n let mut change_stream = change_stream.lock().await;\n change_stream\n .next()\n .await\n .transpose()\n .map_err(retryable::Error::always_retryable)\n },\n &retry_options,\n )\n .await?;\n let change_msg = if let Some(change_msg) = change_msg {\n change_msg\n } else {\n break;\n };\n\n let update_stats = Arc::new(stats::UpdateStats::default());\n let ack_fn = {\n let status_tx = status_tx.clone();\n let update_stats = update_stats.clone();\n let change_stream_stats = change_stream_stats.clone();\n async move || {\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n change_stream_stats.merge(&update_stats);\n }\n if let Some(ack_fn) = change_msg.ack_fn {\n ack_fn().await\n } else {\n Ok(())\n }\n }\n };\n let shared_ack_fn = Arc::new(Mutex::new(SharedAckFn::new(\n change_msg.changes.iter().len(),\n ack_fn,\n )));\n for change in change_msg.changes {\n let shared_ack_fn = shared_ack_fn.clone();\n let concur_permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n tokio::spawn(source_context.clone().process_source_key(\n change.key,\n change.data,\n update_stats.clone(),\n concur_permit,\n Some(move || async move {\n SharedAckFn::ack(&shared_ack_fn).await\n }),\n pool.clone(),\n ));\n }\n }\n Ok(())\n }\n }\n .boxed(),\n );\n\n futs.push(\n async move {\n let mut interval = tokio::time::interval(REPORT_INTERVAL);\n let mut last_change_stream_stats: UpdateStats =\n change_stream_stats.as_ref().clone();\n interval.set_missed_tick_behavior(MissedTickBehavior::Delay);\n interval.tick().await;\n loop {\n interval.tick().await;\n let curr_change_stream_stats = change_stream_stats.as_ref().clone();\n let delta = curr_change_stream_stats.delta(&last_change_stream_stats);\n if delta.has_any_change() {\n report_stats(&delta, \"change stream\");\n last_change_stream_stats = curr_change_stream_stats;\n }\n }\n }\n .boxed(),\n );\n }\n }\n\n // The main update loop.\n futs.push({\n let status_tx = self.status_tx.clone();\n let pool = self.pool.clone();\n let live_mode = self.options.live_mode;\n async move {\n let update_stats = Arc::new(stats::UpdateStats::default());\n source_context.update(&pool, &update_stats).await?;\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n }\n report_stats(&update_stats, \"batch update\");\n\n if let (true, Some(refresh_interval)) =\n (live_mode, import_op.refresh_options.refresh_interval)\n {\n let mut interval = tokio::time::interval(refresh_interval);\n interval.set_missed_tick_behavior(MissedTickBehavior::Delay);\n interval.tick().await;\n loop {\n interval.tick().await;\n\n let update_stats = Arc::new(stats::UpdateStats::default());\n source_context.update(&pool, &update_stats).await?;\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n }\n report_stats(&update_stats, \"interval refresh\");\n }\n }\n Ok(())\n }\n .boxed()\n });\n\n let join_result = try_join_all(futs).await;\n if let Err(err) = join_result {\n error!(\"Error in source `{}`: {:?}\", import_op.name, err);\n return Err(err);\n }\n Ok(())\n }\n}\n\nimpl FlowLiveUpdater {\n pub async fn start(\n flow_ctx: Arc,\n pool: &PgPool,\n options: FlowLiveUpdaterOptions,\n ) -> Result {\n let plan = flow_ctx.flow.get_execution_plan().await?;\n let execution_ctx = Arc::new(flow_ctx.use_owned_execution_ctx().await?);\n\n let (status_tx, status_rx) = watch::channel(FlowLiveUpdaterStatus {\n active_source_idx: BTreeSet::from_iter(0..plan.import_ops.len()),\n source_updates_num: vec![0; plan.import_ops.len()],\n });\n\n let (num_remaining_tasks_tx, num_remaining_tasks_rx) =\n watch::channel(plan.import_ops.len());\n\n let mut join_set = JoinSet::new();\n let mut stats_per_task = Vec::new();\n\n for source_idx in 0..plan.import_ops.len() {\n let source_update_stats = Arc::new(stats::UpdateStats::default());\n let source_update_task = SourceUpdateTask {\n source_idx,\n flow: flow_ctx.flow.clone(),\n plan: plan.clone(),\n execution_ctx: execution_ctx.clone(),\n source_update_stats: source_update_stats.clone(),\n pool: pool.clone(),\n options: options.clone(),\n status_tx: status_tx.clone(),\n num_remaining_tasks_tx: num_remaining_tasks_tx.clone(),\n };\n join_set.spawn(source_update_task.run());\n stats_per_task.push(source_update_stats);\n }\n Ok(Self {\n flow_ctx,\n join_set: Mutex::new(Some(join_set)),\n stats_per_task,\n recv_state: tokio::sync::Mutex::new(UpdateReceiveState {\n status_rx,\n last_num_source_updates: vec![0; plan.import_ops.len()],\n is_done: false,\n }),\n num_remaining_tasks_rx,\n\n _status_tx: status_tx,\n _num_remaining_tasks_tx: num_remaining_tasks_tx,\n })\n }\n\n pub async fn wait(&self) -> Result<()> {\n {\n let mut rx = self.num_remaining_tasks_rx.clone();\n rx.wait_for(|v| *v == 0).await?;\n }\n\n let Some(mut join_set) = self.join_set.lock().unwrap().take() else {\n return Ok(());\n };\n while let Some(task_result) = join_set.join_next().await {\n match task_result {\n Ok(Ok(_)) => {}\n Ok(Err(err)) => {\n return Err(err);\n }\n Err(err) if err.is_cancelled() => {}\n Err(err) => {\n return Err(err.into());\n }\n }\n }\n Ok(())\n }\n\n pub fn abort(&self) {\n let mut join_set = self.join_set.lock().unwrap();\n if let Some(join_set) = &mut *join_set {\n join_set.abort_all();\n }\n }\n\n pub fn index_update_info(&self) -> stats::IndexUpdateInfo {\n stats::IndexUpdateInfo {\n sources: std::iter::zip(\n self.flow_ctx.flow.flow_instance.import_ops.iter(),\n self.stats_per_task.iter(),\n )\n .map(|(import_op, stats)| stats::SourceUpdateInfo {\n source_name: import_op.name.clone(),\n stats: stats.as_ref().clone(),\n })\n .collect(),\n }\n }\n\n pub async fn next_status_updates(&self) -> Result {\n let mut recv_state = self.recv_state.lock().await;\n let recv_state = &mut *recv_state;\n\n if recv_state.is_done {\n return Ok(FlowLiveUpdaterUpdates {\n active_sources: vec![],\n updated_sources: vec![],\n });\n }\n\n recv_state.status_rx.changed().await?;\n let status = recv_state.status_rx.borrow_and_update();\n let updates = FlowLiveUpdaterUpdates {\n active_sources: status\n .active_source_idx\n .iter()\n .map(|idx| {\n self.flow_ctx.flow.flow_instance.import_ops[*idx]\n .name\n .clone()\n })\n .collect(),\n updated_sources: status\n .source_updates_num\n .iter()\n .enumerate()\n .filter_map(|(idx, num_updates)| {\n if num_updates > &recv_state.last_num_source_updates[idx] {\n Some(\n self.flow_ctx.flow.flow_instance.import_ops[idx]\n .name\n .clone(),\n )\n } else {\n None\n }\n })\n .collect(),\n };\n recv_state.last_num_source_updates = status.source_updates_num.clone();\n if status.active_source_idx.is_empty() {\n recv_state.is_done = true;\n }\n Ok(updates)\n }\n}\n"], ["/cocoindex/src/utils/concur_control.rs", "use crate::prelude::*;\n\nuse tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore};\n\nstruct WeightedSemaphore {\n downscale_factor: u8,\n downscaled_quota: u32,\n sem: Arc,\n}\n\nimpl WeightedSemaphore {\n pub fn new(quota: usize) -> Self {\n let mut downscale_factor = 0;\n let mut downscaled_quota = quota;\n while downscaled_quota > u32::MAX as usize {\n downscaled_quota >>= 1;\n downscale_factor += 1;\n }\n let sem = Arc::new(Semaphore::new(downscaled_quota));\n Self {\n downscaled_quota: downscaled_quota as u32,\n downscale_factor,\n sem,\n }\n }\n\n async fn acquire_reservation(&self) -> Result {\n self.sem.clone().acquire_owned().await\n }\n\n async fn acquire(\n &self,\n weight: usize,\n reserved: bool,\n ) -> Result, AcquireError> {\n let downscaled_weight = (weight >> self.downscale_factor) as u32;\n let capped_weight = downscaled_weight.min(self.downscaled_quota);\n let reserved_weight = if reserved { 1 } else { 0 };\n if reserved_weight >= capped_weight {\n return Ok(None);\n }\n Ok(Some(\n self.sem\n .clone()\n .acquire_many_owned(capped_weight - reserved_weight)\n .await?,\n ))\n }\n}\n\npub struct Options {\n pub max_inflight_rows: Option,\n pub max_inflight_bytes: Option,\n}\n\npub struct ConcurrencyControllerPermit {\n _inflight_count_permit: Option,\n _inflight_bytes_permit: Option,\n}\n\npub struct ConcurrencyController {\n inflight_count_sem: Option>,\n inflight_bytes_sem: Option,\n}\n\npub static BYTES_UNKNOWN_YET: Option usize> = None;\n\nimpl ConcurrencyController {\n pub fn new(exec_options: &Options) -> Self {\n Self {\n inflight_count_sem: exec_options\n .max_inflight_rows\n .map(|max| Arc::new(Semaphore::new(max))),\n inflight_bytes_sem: exec_options.max_inflight_bytes.map(WeightedSemaphore::new),\n }\n }\n\n /// If `bytes_fn` is `None`, it means the number of bytes is not known yet.\n /// The controller will reserve a minimum number of bytes.\n /// The caller should call `acquire_bytes_with_reservation` with the actual number of bytes later.\n pub async fn acquire(\n &self,\n bytes_fn: Option usize>,\n ) -> Result {\n let inflight_count_permit = if let Some(sem) = &self.inflight_count_sem {\n Some(sem.clone().acquire_owned().await?)\n } else {\n None\n };\n let inflight_bytes_permit = if let Some(sem) = &self.inflight_bytes_sem {\n if let Some(bytes_fn) = bytes_fn {\n sem.acquire(bytes_fn(), false).await?\n } else {\n Some(sem.acquire_reservation().await?)\n }\n } else {\n None\n };\n Ok(ConcurrencyControllerPermit {\n _inflight_count_permit: inflight_count_permit,\n _inflight_bytes_permit: inflight_bytes_permit,\n })\n }\n\n pub async fn acquire_bytes_with_reservation(\n &self,\n bytes_fn: impl FnOnce() -> usize,\n ) -> Result, AcquireError> {\n if let Some(sem) = &self.inflight_bytes_sem {\n sem.acquire(bytes_fn(), true).await\n } else {\n Ok(None)\n }\n }\n}\n\npub struct CombinedConcurrencyControllerPermit {\n _permit: ConcurrencyControllerPermit,\n _global_permit: ConcurrencyControllerPermit,\n}\n\npub struct CombinedConcurrencyController {\n controller: ConcurrencyController,\n global_controller: Arc,\n needs_num_bytes: bool,\n}\n\nimpl CombinedConcurrencyController {\n pub fn new(exec_options: &Options, global_controller: Arc) -> Self {\n Self {\n controller: ConcurrencyController::new(exec_options),\n needs_num_bytes: exec_options.max_inflight_bytes.is_some()\n || global_controller.inflight_bytes_sem.is_some(),\n global_controller,\n }\n }\n\n pub async fn acquire(\n &self,\n bytes_fn: Option usize>,\n ) -> Result {\n let num_bytes_fn = if let Some(bytes_fn) = bytes_fn\n && self.needs_num_bytes\n {\n let num_bytes = bytes_fn();\n Some(move || num_bytes)\n } else {\n None\n };\n\n let permit = self.controller.acquire(num_bytes_fn).await?;\n let global_permit = self.global_controller.acquire(num_bytes_fn).await?;\n Ok(CombinedConcurrencyControllerPermit {\n _permit: permit,\n _global_permit: global_permit,\n })\n }\n\n pub async fn acquire_bytes_with_reservation(\n &self,\n bytes_fn: impl FnOnce() -> usize,\n ) -> Result<(Option, Option), AcquireError> {\n let num_bytes = bytes_fn();\n let permit = self\n .controller\n .acquire_bytes_with_reservation(move || num_bytes)\n .await?;\n let global_permit = self\n .global_controller\n .acquire_bytes_with_reservation(move || num_bytes)\n .await?;\n Ok((permit, global_permit))\n }\n}\n"], ["/cocoindex/src/utils/retryable.rs", "use log::trace;\nuse std::{future::Future, time::Duration};\n\npub trait IsRetryable {\n fn is_retryable(&self) -> bool;\n}\n\npub struct Error {\n error: anyhow::Error,\n is_retryable: bool,\n}\n\nimpl std::fmt::Display for Error {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Display::fmt(&self.error, f)\n }\n}\n\nimpl std::fmt::Debug for Error {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Debug::fmt(&self.error, f)\n }\n}\n\nimpl IsRetryable for Error {\n fn is_retryable(&self) -> bool {\n self.is_retryable\n }\n}\n\nimpl IsRetryable for reqwest::Error {\n fn is_retryable(&self) -> bool {\n self.status() == Some(reqwest::StatusCode::TOO_MANY_REQUESTS)\n }\n}\n\nimpl Error {\n pub fn always_retryable(error: anyhow::Error) -> Self {\n Self {\n error,\n is_retryable: true,\n }\n }\n}\n\nimpl From for Error {\n fn from(error: anyhow::Error) -> Self {\n Self {\n error,\n is_retryable: false,\n }\n }\n}\n\nimpl From for anyhow::Error {\n fn from(val: Error) -> Self {\n val.error\n }\n}\n\nimpl From for Error {\n fn from(error: E) -> Self {\n Self {\n is_retryable: error.is_retryable(),\n error: anyhow::Error::new(error),\n }\n }\n}\n\npub type Result = std::result::Result;\n\n#[allow(non_snake_case)]\npub fn Ok(value: T) -> Result {\n Result::Ok(value)\n}\n\npub struct RetryOptions {\n pub max_retries: Option,\n pub initial_backoff: Duration,\n pub max_backoff: Duration,\n}\n\nimpl Default for RetryOptions {\n fn default() -> Self {\n Self {\n max_retries: Some(10),\n initial_backoff: Duration::from_millis(100),\n max_backoff: Duration::from_secs(10),\n }\n }\n}\n\npub static HEAVY_LOADED_OPTIONS: RetryOptions = RetryOptions {\n max_retries: Some(10),\n initial_backoff: Duration::from_secs(1),\n max_backoff: Duration::from_secs(60),\n};\n\npub async fn run<\n Ok,\n Err: std::fmt::Display + IsRetryable,\n Fut: Future>,\n F: Fn() -> Fut,\n>(\n f: F,\n options: &RetryOptions,\n) -> Result {\n let mut retries = 0;\n let mut backoff = options.initial_backoff;\n\n loop {\n match f().await {\n Result::Ok(result) => return Result::Ok(result),\n Result::Err(err) => {\n if !err.is_retryable()\n || options\n .max_retries\n .is_some_and(|max_retries| retries >= max_retries)\n {\n return Result::Err(err);\n }\n retries += 1;\n trace!(\n \"Will retry #{} in {}ms for error: {}\",\n retries,\n backoff.as_millis(),\n err\n );\n tokio::time::sleep(backoff).await;\n if backoff < options.max_backoff {\n backoff = std::cmp::min(\n Duration::from_micros(\n (backoff.as_micros() * rand::random_range(1618..=2000) / 1000) as u64,\n ),\n options.max_backoff,\n );\n }\n }\n }\n }\n}\n"], ["/cocoindex/src/execution/source_indexer.rs", "use crate::{\n prelude::*,\n service::error::{SharedError, SharedResult, SharedResultExt},\n};\n\nuse futures::future::Ready;\nuse sqlx::PgPool;\nuse std::collections::{HashMap, hash_map};\nuse tokio::{sync::Semaphore, task::JoinSet};\n\nuse super::{\n db_tracking,\n evaluator::SourceRowEvaluationContext,\n row_indexer::{self, SkippedOr, SourceVersion},\n stats,\n};\n\nuse crate::ops::interface;\nstruct SourceRowIndexingState {\n source_version: SourceVersion,\n processing_sem: Arc,\n touched_generation: usize,\n}\n\nimpl Default for SourceRowIndexingState {\n fn default() -> Self {\n Self {\n source_version: SourceVersion::default(),\n processing_sem: Arc::new(Semaphore::new(1)),\n touched_generation: 0,\n }\n }\n}\n\nstruct SourceIndexingState {\n rows: HashMap,\n scan_generation: usize,\n}\n\npub struct SourceIndexingContext {\n flow: Arc,\n source_idx: usize,\n pending_update: Mutex>>>>,\n update_sem: Semaphore,\n state: Mutex,\n setup_execution_ctx: Arc,\n}\n\npub const NO_ACK: Option Ready>> = None;\n\nimpl SourceIndexingContext {\n pub async fn load(\n flow: Arc,\n source_idx: usize,\n setup_execution_ctx: Arc,\n pool: &PgPool,\n ) -> Result {\n let plan = flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[source_idx];\n let mut list_state = db_tracking::ListTrackedSourceKeyMetadataState::new();\n let mut rows = HashMap::new();\n let scan_generation = 0;\n {\n let mut key_metadata_stream = list_state.list(\n setup_execution_ctx.import_ops[source_idx].source_id,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n );\n while let Some(key_metadata) = key_metadata_stream.next().await {\n let key_metadata = key_metadata?;\n let source_key = value::Value::::from_json(\n key_metadata.source_key,\n &import_op.primary_key_type,\n )?\n .into_key()?;\n rows.insert(\n source_key,\n SourceRowIndexingState {\n source_version: SourceVersion::from_stored(\n key_metadata.processed_source_ordinal,\n &key_metadata.process_logic_fingerprint,\n plan.logic_fingerprint,\n ),\n processing_sem: Arc::new(Semaphore::new(1)),\n touched_generation: scan_generation,\n },\n );\n }\n }\n Ok(Self {\n flow,\n source_idx,\n state: Mutex::new(SourceIndexingState {\n rows,\n scan_generation,\n }),\n pending_update: Mutex::new(None),\n update_sem: Semaphore::new(1),\n setup_execution_ctx,\n })\n }\n\n pub async fn process_source_key<\n AckFut: Future> + Send + 'static,\n AckFn: FnOnce() -> AckFut,\n >(\n self: Arc,\n key: value::KeyValue,\n source_data: Option,\n update_stats: Arc,\n _concur_permit: concur_control::CombinedConcurrencyControllerPermit,\n ack_fn: Option,\n pool: PgPool,\n ) {\n let process = async {\n let plan = self.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[self.source_idx];\n let schema = &self.flow.data_schema;\n let source_data = match source_data {\n Some(source_data) => source_data,\n None => import_op\n .executor\n .get_value(\n &key,\n &interface::SourceExecutorGetOptions {\n include_value: true,\n include_ordinal: true,\n },\n )\n .await?\n .try_into()?,\n };\n\n let source_version = SourceVersion::from_current_data(&source_data);\n let processing_sem = {\n let mut state = self.state.lock().unwrap();\n let touched_generation = state.scan_generation;\n match state.rows.entry(key.clone()) {\n hash_map::Entry::Occupied(mut entry) => {\n if entry\n .get()\n .source_version\n .should_skip(&source_version, Some(update_stats.as_ref()))\n {\n return anyhow::Ok(());\n }\n let sem = entry.get().processing_sem.clone();\n if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n entry.remove();\n } else {\n entry.get_mut().source_version = source_version.clone();\n }\n sem\n }\n hash_map::Entry::Vacant(entry) => {\n if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n update_stats.num_no_change.inc(1);\n return anyhow::Ok(());\n }\n let new_entry = SourceRowIndexingState {\n source_version: source_version.clone(),\n touched_generation,\n ..Default::default()\n };\n let sem = new_entry.processing_sem.clone();\n entry.insert(new_entry);\n sem\n }\n }\n };\n\n let _processing_permit = processing_sem.acquire().await?;\n let result = row_indexer::update_source_row(\n &SourceRowEvaluationContext {\n plan: &plan,\n import_op,\n schema,\n key: &key,\n import_op_idx: self.source_idx,\n },\n &self.setup_execution_ctx,\n source_data.value,\n &source_version,\n &pool,\n &update_stats,\n )\n .await?;\n let target_source_version = if let SkippedOr::Skipped(existing_source_version) = result\n {\n Some(existing_source_version)\n } else if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n Some(source_version)\n } else {\n None\n };\n if let Some(target_source_version) = target_source_version {\n let mut state = self.state.lock().unwrap();\n let scan_generation = state.scan_generation;\n let entry = state.rows.entry(key.clone());\n match entry {\n hash_map::Entry::Occupied(mut entry) => {\n if !entry\n .get()\n .source_version\n .should_skip(&target_source_version, None)\n {\n if target_source_version.kind\n == row_indexer::SourceVersionKind::NonExistence\n {\n entry.remove();\n } else {\n let mut_entry = entry.get_mut();\n mut_entry.source_version = target_source_version;\n mut_entry.touched_generation = scan_generation;\n }\n }\n }\n hash_map::Entry::Vacant(entry) => {\n if target_source_version.kind\n != row_indexer::SourceVersionKind::NonExistence\n {\n entry.insert(SourceRowIndexingState {\n source_version: target_source_version,\n touched_generation: scan_generation,\n ..Default::default()\n });\n }\n }\n }\n }\n anyhow::Ok(())\n };\n let process_and_ack = async {\n process.await?;\n if let Some(ack_fn) = ack_fn {\n ack_fn().await?;\n }\n anyhow::Ok(())\n };\n if let Err(e) = process_and_ack.await {\n update_stats.num_errors.inc(1);\n error!(\n \"{:?}\",\n e.context(format!(\n \"Error in processing row from source `{source}` with key: {key}\",\n source = self.flow.flow_instance.import_ops[self.source_idx].name\n ))\n );\n }\n }\n\n pub async fn update(\n self: &Arc,\n pool: &PgPool,\n update_stats: &Arc,\n ) -> Result<()> {\n let pending_update_fut = {\n let mut pending_update = self.pending_update.lock().unwrap();\n if let Some(pending_update_fut) = &*pending_update {\n pending_update_fut.clone()\n } else {\n let slf = self.clone();\n let pool = pool.clone();\n let update_stats = update_stats.clone();\n let task = tokio::spawn(async move {\n {\n let _permit = slf.update_sem.acquire().await?;\n {\n let mut pending_update = slf.pending_update.lock().unwrap();\n *pending_update = None;\n }\n slf.update_once(&pool, &update_stats).await?;\n }\n anyhow::Ok(())\n });\n let pending_update_fut = async move {\n task.await\n .map_err(SharedError::from)?\n .map_err(SharedError::new)\n }\n .boxed()\n .shared();\n *pending_update = Some(pending_update_fut.clone());\n pending_update_fut\n }\n };\n pending_update_fut.await.std_result()?;\n Ok(())\n }\n\n async fn update_once(\n self: &Arc,\n pool: &PgPool,\n update_stats: &Arc,\n ) -> Result<()> {\n let plan = self.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[self.source_idx];\n let mut rows_stream = import_op\n .executor\n .list(&interface::SourceExecutorListOptions {\n include_ordinal: true,\n });\n let mut join_set = JoinSet::new();\n let scan_generation = {\n let mut state = self.state.lock().unwrap();\n state.scan_generation += 1;\n state.scan_generation\n };\n while let Some(row) = rows_stream.next().await {\n for row in row? {\n let source_version = SourceVersion::from_current_with_ordinal(\n row.ordinal\n .ok_or_else(|| anyhow::anyhow!(\"ordinal is not available\"))?,\n );\n {\n let mut state = self.state.lock().unwrap();\n let scan_generation = state.scan_generation;\n let row_state = state.rows.entry(row.key.clone()).or_default();\n row_state.touched_generation = scan_generation;\n if row_state\n .source_version\n .should_skip(&source_version, Some(update_stats.as_ref()))\n {\n continue;\n }\n }\n let concur_permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n join_set.spawn(self.clone().process_source_key(\n row.key,\n None,\n update_stats.clone(),\n concur_permit,\n NO_ACK,\n pool.clone(),\n ));\n }\n }\n while let Some(result) = join_set.join_next().await {\n if let Err(e) = result {\n if !e.is_cancelled() {\n error!(\"{e:?}\");\n }\n }\n }\n\n let deleted_key_versions = {\n let mut deleted_key_versions = Vec::new();\n let state = self.state.lock().unwrap();\n for (key, row_state) in state.rows.iter() {\n if row_state.touched_generation < scan_generation {\n deleted_key_versions.push((key.clone(), row_state.source_version.ordinal));\n }\n }\n deleted_key_versions\n };\n for (key, source_ordinal) in deleted_key_versions {\n // If the source ordinal is unavailable, call without source ordinal so that another polling will be triggered to avoid out-of-order.\n let source_data = source_ordinal\n .is_available()\n .then(|| interface::SourceData {\n value: interface::SourceValue::NonExistence,\n ordinal: source_ordinal,\n });\n let concur_permit = import_op.concurrency_controller.acquire(Some(|| 0)).await?;\n join_set.spawn(self.clone().process_source_key(\n key,\n source_data,\n update_stats.clone(),\n concur_permit,\n NO_ACK,\n pool.clone(),\n ));\n }\n while let Some(result) = join_set.join_next().await {\n if let Err(e) = result {\n if !e.is_cancelled() {\n error!(\"{e:?}\");\n }\n }\n }\n\n Ok(())\n }\n}\n"], ["/cocoindex/src/builder/exec_ctx.rs", "use crate::prelude::*;\n\nuse crate::execution::db_tracking_setup;\nuse crate::ops::get_executor_factory;\nuse crate::ops::interface::SetupStateCompatibility;\n\npub struct ImportOpExecutionContext {\n pub source_id: i32,\n}\n\npub struct ExportOpExecutionContext {\n pub target_id: i32,\n}\n\npub struct FlowSetupExecutionContext {\n pub setup_state: setup::FlowSetupState,\n pub import_ops: Vec,\n pub export_ops: Vec,\n}\n\npub struct AnalyzedTargetSetupState {\n pub target_kind: String,\n pub setup_key: serde_json::Value,\n pub desired_setup_state: serde_json::Value,\n pub setup_by_user: bool,\n}\n\npub struct AnalyzedSetupState {\n pub targets: Vec,\n pub declarations: Vec,\n}\n\nfn build_import_op_exec_ctx(\n import_field_name: &spec::FieldName,\n import_op_output_type: &schema::EnrichedValueType,\n existing_source_states: Option<&Vec<&setup::SourceSetupState>>,\n metadata: &mut setup::FlowSetupMetadata,\n) -> Result {\n let key_schema_no_attrs = import_op_output_type\n .typ\n .key_type()\n .ok_or_else(|| api_error!(\"Source must produce a type with key\"))?\n .typ\n .without_attrs();\n\n let existing_source_ids = existing_source_states\n .iter()\n .flat_map(|v| v.iter())\n .filter_map(|state| {\n if state.key_schema == key_schema_no_attrs {\n Some(state.source_id)\n } else {\n None\n }\n })\n .collect::>();\n let source_id = if existing_source_ids.len() == 1 {\n existing_source_ids.into_iter().next().unwrap()\n } else {\n if existing_source_ids.len() > 1 {\n warn!(\"Multiple source states with the same key schema found\");\n }\n metadata.last_source_id += 1;\n metadata.last_source_id\n };\n metadata.sources.insert(\n import_field_name.clone(),\n setup::SourceSetupState {\n source_id,\n key_schema: key_schema_no_attrs,\n },\n );\n Ok(ImportOpExecutionContext { source_id })\n}\n\nfn build_target_id(\n analyzed_target_ss: &AnalyzedTargetSetupState,\n existing_target_states: &HashMap<&setup::ResourceIdentifier, Vec<&setup::TargetSetupState>>,\n flow_setup_state: &mut setup::FlowSetupState,\n) -> Result {\n let interface::ExecutorFactory::ExportTarget(target_factory) =\n get_executor_factory(&analyzed_target_ss.target_kind)?\n else {\n api_bail!(\n \"`{}` is not a export target op\",\n analyzed_target_ss.target_kind\n )\n };\n\n let resource_id = setup::ResourceIdentifier {\n key: analyzed_target_ss.setup_key.clone(),\n target_kind: analyzed_target_ss.target_kind.clone(),\n };\n let existing_target_states = existing_target_states.get(&resource_id);\n let mut compatible_target_ids = HashSet::>::new();\n let mut reusable_schema_version_ids = HashSet::>::new();\n for existing_state in existing_target_states.iter().flat_map(|v| v.iter()) {\n let compatibility =\n if analyzed_target_ss.setup_by_user == existing_state.common.setup_by_user {\n target_factory.check_state_compatibility(\n &analyzed_target_ss.desired_setup_state,\n &existing_state.state,\n )?\n } else {\n SetupStateCompatibility::NotCompatible\n };\n let compatible_target_id = if compatibility != SetupStateCompatibility::NotCompatible {\n reusable_schema_version_ids.insert(\n (compatibility == SetupStateCompatibility::Compatible)\n .then_some(existing_state.common.schema_version_id),\n );\n Some(existing_state.common.target_id)\n } else {\n None\n };\n compatible_target_ids.insert(compatible_target_id);\n }\n\n let target_id = if compatible_target_ids.len() == 1 {\n compatible_target_ids.into_iter().next().flatten()\n } else {\n if compatible_target_ids.len() > 1 {\n warn!(\"Multiple target states with the same key schema found\");\n }\n None\n };\n let target_id = target_id.unwrap_or_else(|| {\n flow_setup_state.metadata.last_target_id += 1;\n flow_setup_state.metadata.last_target_id\n });\n let max_schema_version_id = existing_target_states\n .iter()\n .flat_map(|v| v.iter())\n .map(|s| s.common.max_schema_version_id)\n .max()\n .unwrap_or(0);\n let schema_version_id = if reusable_schema_version_ids.len() == 1 {\n reusable_schema_version_ids\n .into_iter()\n .next()\n .unwrap()\n .unwrap_or(max_schema_version_id + 1)\n } else {\n max_schema_version_id + 1\n };\n match flow_setup_state.targets.entry(resource_id) {\n indexmap::map::Entry::Occupied(entry) => {\n api_bail!(\n \"Target resource already exists: kind = {}, key = {}\",\n entry.key().target_kind,\n entry.key().key\n );\n }\n indexmap::map::Entry::Vacant(entry) => {\n entry.insert(setup::TargetSetupState {\n common: setup::TargetSetupStateCommon {\n target_id,\n schema_version_id,\n max_schema_version_id: max_schema_version_id.max(schema_version_id),\n setup_by_user: analyzed_target_ss.setup_by_user,\n },\n state: analyzed_target_ss.desired_setup_state.clone(),\n });\n }\n }\n Ok(target_id)\n}\n\npub fn build_flow_setup_execution_context(\n flow_inst: &spec::FlowInstanceSpec,\n data_schema: &schema::FlowSchema,\n analyzed_ss: &AnalyzedSetupState,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n) -> Result {\n let existing_metadata_versions = || {\n existing_flow_ss\n .iter()\n .flat_map(|flow_ss| flow_ss.metadata.possible_versions())\n };\n\n let mut source_states_by_name = HashMap::<&str, Vec<&setup::SourceSetupState>>::new();\n for metadata_version in existing_metadata_versions() {\n for (source_name, state) in metadata_version.sources.iter() {\n source_states_by_name\n .entry(source_name.as_str())\n .or_default()\n .push(state);\n }\n }\n\n let mut target_states_by_name_type =\n HashMap::<&setup::ResourceIdentifier, Vec<&setup::TargetSetupState>>::new();\n for metadata_version in existing_flow_ss.iter() {\n for (resource_id, target) in metadata_version.targets.iter() {\n target_states_by_name_type\n .entry(resource_id)\n .or_default()\n .extend(target.possible_versions());\n }\n }\n\n let mut setup_state = setup::FlowSetupState:: {\n seen_flow_metadata_version: existing_flow_ss\n .and_then(|flow_ss| flow_ss.seen_flow_metadata_version),\n metadata: setup::FlowSetupMetadata {\n last_source_id: existing_metadata_versions()\n .map(|metadata| metadata.last_source_id)\n .max()\n .unwrap_or(0),\n last_target_id: existing_metadata_versions()\n .map(|metadata| metadata.last_target_id)\n .max()\n .unwrap_or(0),\n sources: BTreeMap::new(),\n },\n tracking_table: db_tracking_setup::TrackingTableSetupState {\n table_name: existing_flow_ss\n .and_then(|flow_ss| {\n flow_ss\n .tracking_table\n .current\n .as_ref()\n .map(|v| v.table_name.clone())\n })\n .unwrap_or_else(|| db_tracking_setup::default_tracking_table_name(&flow_inst.name)),\n version_id: db_tracking_setup::CURRENT_TRACKING_TABLE_VERSION,\n },\n targets: IndexMap::new(),\n };\n\n let import_op_exec_ctx = flow_inst\n .import_ops\n .iter()\n .map(|import_op| {\n let output_type = data_schema\n .root_op_scope\n .op_output_types\n .get(&import_op.name)\n .ok_or_else(invariance_violation)?;\n build_import_op_exec_ctx(\n &import_op.name,\n output_type,\n source_states_by_name.get(&import_op.name.as_str()),\n &mut setup_state.metadata,\n )\n })\n .collect::>>()?;\n\n let export_op_exec_ctx = analyzed_ss\n .targets\n .iter()\n .map(|analyzed_target_ss| {\n let target_id = build_target_id(\n analyzed_target_ss,\n &target_states_by_name_type,\n &mut setup_state,\n )?;\n Ok(ExportOpExecutionContext { target_id })\n })\n .collect::>>()?;\n\n for analyzed_target_ss in analyzed_ss.declarations.iter() {\n build_target_id(\n analyzed_target_ss,\n &target_states_by_name_type,\n &mut setup_state,\n )?;\n }\n\n Ok(FlowSetupExecutionContext {\n setup_state,\n import_ops: import_op_exec_ctx,\n export_ops: export_op_exec_ctx,\n })\n}\n"], ["/cocoindex/src/ops/registration.rs", "use super::{\n factory_bases::*, functions, registry::ExecutorFactoryRegistry, sdk::ExecutorFactory, sources,\n targets,\n};\nuse anyhow::Result;\nuse std::sync::{LazyLock, RwLock};\n\nfn register_executor_factories(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n let reqwest_client = reqwest::Client::new();\n\n sources::local_file::Factory.register(registry)?;\n sources::google_drive::Factory.register(registry)?;\n sources::amazon_s3::Factory.register(registry)?;\n sources::azure_blob::Factory.register(registry)?;\n\n functions::parse_json::Factory.register(registry)?;\n functions::split_recursively::register(registry)?;\n functions::extract_by_llm::Factory.register(registry)?;\n functions::embed_text::register(registry)?;\n\n targets::postgres::Factory::default().register(registry)?;\n targets::qdrant::register(registry)?;\n targets::kuzu::register(registry, reqwest_client)?;\n\n targets::neo4j::Factory::new().register(registry)?;\n\n Ok(())\n}\n\nstatic EXECUTOR_FACTORY_REGISTRY: LazyLock> = LazyLock::new(|| {\n let mut registry = ExecutorFactoryRegistry::new();\n register_executor_factories(&mut registry).expect(\"Failed to register executor factories\");\n RwLock::new(registry)\n});\n\npub fn get_optional_executor_factory(kind: &str) -> Option {\n let registry = EXECUTOR_FACTORY_REGISTRY.read().unwrap();\n registry.get(kind).cloned()\n}\n\npub fn get_executor_factory(kind: &str) -> Result {\n get_optional_executor_factory(kind)\n .ok_or_else(|| anyhow::anyhow!(\"Executor factory not found for op kind: {}\", kind))\n}\n\npub fn register_factory(name: String, factory: ExecutorFactory) -> Result<()> {\n let mut registry = EXECUTOR_FACTORY_REGISTRY.write().unwrap();\n registry.register(name, factory)\n}\n"], ["/cocoindex/src/utils/immutable.rs", "#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]\npub enum RefList<'a, T> {\n #[default]\n Nil,\n\n Cons(T, &'a RefList<'a, T>),\n}\n\nimpl<'a, T> RefList<'a, T> {\n pub fn prepend(&'a self, head: T) -> Self {\n Self::Cons(head, self)\n }\n\n pub fn iter(&'a self) -> impl Iterator {\n self\n }\n\n pub fn head(&'a self) -> Option<&'a T> {\n match self {\n RefList::Nil => None,\n RefList::Cons(head, _) => Some(head),\n }\n }\n\n pub fn headn(&'a self, n: usize) -> Option<&'a T> {\n match self {\n RefList::Nil => None,\n RefList::Cons(head, tail) => {\n if n == 0 {\n Some(head)\n } else {\n tail.headn(n - 1)\n }\n }\n }\n }\n\n pub fn tail(&'a self) -> Option<&'a RefList<'a, T>> {\n match self {\n RefList::Nil => None,\n RefList::Cons(_, tail) => Some(tail),\n }\n }\n\n pub fn tailn(&'a self, n: usize) -> Option<&'a RefList<'a, T>> {\n if n == 0 {\n Some(self)\n } else {\n match self {\n RefList::Nil => None,\n RefList::Cons(_, tail) => tail.tailn(n - 1),\n }\n }\n }\n}\n\nimpl<'a, T> Iterator for &'a RefList<'a, T> {\n type Item = &'a T;\n\n fn next(&mut self) -> Option {\n let current = *self;\n match current {\n RefList::Nil => None,\n RefList::Cons(head, tail) => {\n *self = *tail;\n Some(head)\n }\n }\n }\n}\n"], ["/cocoindex/src/builder/analyzed_flow.rs", "use crate::{ops::interface::FlowInstanceContext, prelude::*};\n\nuse super::{analyzer, plan};\nuse crate::service::error::{SharedError, SharedResultExt, shared_ok};\n\npub struct AnalyzedFlow {\n pub flow_instance: spec::FlowInstanceSpec,\n pub data_schema: schema::FlowSchema,\n pub setup_state: exec_ctx::AnalyzedSetupState,\n\n pub flow_instance_ctx: Arc,\n\n /// It's None if the flow is not up to date\n pub execution_plan: Shared, SharedError>>>,\n}\n\nimpl AnalyzedFlow {\n pub async fn from_flow_instance(\n flow_instance: crate::base::spec::FlowInstanceSpec,\n flow_instance_ctx: Arc,\n ) -> Result {\n let (data_schema, setup_state, execution_plan_fut) =\n analyzer::analyze_flow(&flow_instance, flow_instance_ctx.clone()).await?;\n let execution_plan = async move {\n shared_ok(Arc::new(\n execution_plan_fut.await.map_err(SharedError::new)?,\n ))\n }\n .boxed()\n .shared();\n let result = Self {\n flow_instance,\n data_schema,\n setup_state,\n flow_instance_ctx,\n execution_plan,\n };\n Ok(result)\n }\n\n pub async fn get_execution_plan(&self) -> Result> {\n let execution_plan = self.execution_plan.clone().await.std_result()?;\n Ok(execution_plan)\n }\n}\n\npub struct AnalyzedTransientFlow {\n pub transient_flow_instance: spec::TransientFlowSpec,\n pub data_schema: schema::FlowSchema,\n pub execution_plan: plan::TransientExecutionPlan,\n pub output_type: schema::EnrichedValueType,\n}\n\nimpl AnalyzedTransientFlow {\n pub async fn from_transient_flow(\n transient_flow: spec::TransientFlowSpec,\n py_exec_ctx: Option,\n ) -> Result {\n let ctx = analyzer::build_flow_instance_context(&transient_flow.name, py_exec_ctx);\n let (output_type, data_schema, execution_plan_fut) =\n analyzer::analyze_transient_flow(&transient_flow, ctx).await?;\n Ok(Self {\n transient_flow_instance: transient_flow,\n data_schema,\n execution_plan: execution_plan_fut.await?,\n output_type,\n })\n }\n}\n"], ["/cocoindex/src/server.rs", "use crate::prelude::*;\n\nuse crate::{lib_context::LibContext, service};\nuse axum::{Router, routing};\nuse tower::ServiceBuilder;\nuse tower_http::{\n cors::{AllowOrigin, CorsLayer},\n trace::TraceLayer,\n};\n\n#[derive(Deserialize, Debug)]\npub struct ServerSettings {\n pub address: String,\n #[serde(default)]\n pub cors_origins: Vec,\n}\n\n/// Initialize the server and return a future that will actually handle requests.\npub async fn init_server(\n lib_context: Arc,\n settings: ServerSettings,\n) -> Result> {\n let mut cors = CorsLayer::default();\n if !settings.cors_origins.is_empty() {\n let origins: Vec<_> = settings\n .cors_origins\n .iter()\n .map(|origin| origin.parse())\n .collect::>()?;\n cors = cors\n .allow_origin(AllowOrigin::list(origins))\n .allow_methods([\n axum::http::Method::GET,\n axum::http::Method::POST,\n axum::http::Method::DELETE,\n ])\n .allow_headers([axum::http::header::CONTENT_TYPE]);\n }\n let app = Router::new()\n .route(\n \"/cocoindex\",\n routing::get(|| async { \"CocoIndex is running!\" }),\n )\n .nest(\n \"/cocoindex/api\",\n Router::new()\n .route(\"/flows\", routing::get(service::flows::list_flows))\n .route(\n \"/flows/{flowInstName}\",\n routing::get(service::flows::get_flow),\n )\n .route(\n \"/flows/{flowInstName}/schema\",\n routing::get(service::flows::get_flow_schema),\n )\n .route(\n \"/flows/{flowInstName}/keys\",\n routing::get(service::flows::get_keys),\n )\n .route(\n \"/flows/{flowInstName}/data\",\n routing::get(service::flows::evaluate_data),\n )\n .route(\n \"/flows/{flowInstName}/rowStatus\",\n routing::get(service::flows::get_row_indexing_status),\n )\n .route(\n \"/flows/{flowInstName}/update\",\n routing::post(service::flows::update),\n )\n .layer(\n ServiceBuilder::new()\n .layer(TraceLayer::new_for_http())\n .layer(cors),\n )\n .with_state(lib_context.clone()),\n );\n\n let listener = tokio::net::TcpListener::bind(&settings.address)\n .await\n .context(format!(\"Failed to bind to address: {}\", settings.address))?;\n\n println!(\n \"Server running at http://{}/cocoindex\",\n listener.local_addr()?\n );\n let serve_fut = async { axum::serve(listener, app).await.unwrap() };\n Ok(serve_fut.boxed())\n}\n"], ["/cocoindex/src/execution/stats.rs", "use crate::prelude::*;\n\nuse std::{\n ops::AddAssign,\n sync::atomic::{AtomicI64, Ordering::Relaxed},\n};\n\n#[derive(Default, Serialize)]\npub struct Counter(pub AtomicI64);\n\nimpl Counter {\n pub fn inc(&self, by: i64) {\n self.0.fetch_add(by, Relaxed);\n }\n\n pub fn get(&self) -> i64 {\n self.0.load(Relaxed)\n }\n\n pub fn delta(&self, base: &Self) -> Counter {\n Counter(AtomicI64::new(self.get() - base.get()))\n }\n\n pub fn into_inner(self) -> i64 {\n self.0.into_inner()\n }\n\n pub fn merge(&self, delta: &Self) {\n self.0.fetch_add(delta.get(), Relaxed);\n }\n}\n\nimpl AddAssign for Counter {\n fn add_assign(&mut self, rhs: Self) {\n self.0.fetch_add(rhs.into_inner(), Relaxed);\n }\n}\n\nimpl Clone for Counter {\n fn clone(&self) -> Self {\n Self(AtomicI64::new(self.get()))\n }\n}\n\nimpl std::fmt::Display for Counter {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.get())\n }\n}\n\nimpl std::fmt::Debug for Counter {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.get())\n }\n}\n\n#[derive(Debug, Serialize, Default, Clone)]\npub struct UpdateStats {\n pub num_no_change: Counter,\n pub num_insertions: Counter,\n pub num_deletions: Counter,\n /// Number of source rows that were updated.\n pub num_updates: Counter,\n /// Number of source rows that were reprocessed because of logic change.\n pub num_reprocesses: Counter,\n pub num_errors: Counter,\n}\n\nimpl UpdateStats {\n pub fn delta(&self, base: &Self) -> Self {\n UpdateStats {\n num_no_change: self.num_no_change.delta(&base.num_no_change),\n num_insertions: self.num_insertions.delta(&base.num_insertions),\n num_deletions: self.num_deletions.delta(&base.num_deletions),\n num_updates: self.num_updates.delta(&base.num_updates),\n num_reprocesses: self.num_reprocesses.delta(&base.num_reprocesses),\n num_errors: self.num_errors.delta(&base.num_errors),\n }\n }\n\n pub fn merge(&self, delta: &Self) {\n self.num_no_change.merge(&delta.num_no_change);\n self.num_insertions.merge(&delta.num_insertions);\n self.num_deletions.merge(&delta.num_deletions);\n self.num_updates.merge(&delta.num_updates);\n self.num_reprocesses.merge(&delta.num_reprocesses);\n self.num_errors.merge(&delta.num_errors);\n }\n\n pub fn has_any_change(&self) -> bool {\n self.num_insertions.get() > 0\n || self.num_deletions.get() > 0\n || self.num_updates.get() > 0\n || self.num_reprocesses.get() > 0\n || self.num_errors.get() > 0\n }\n}\n\nimpl std::fmt::Display for UpdateStats {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let mut messages = Vec::new();\n let num_errors = self.num_errors.get();\n if num_errors > 0 {\n messages.push(format!(\"{num_errors} source rows FAILED\"));\n }\n\n let num_skipped = self.num_no_change.get();\n if num_skipped > 0 {\n messages.push(format!(\"{num_skipped} source rows NO CHANGE\"));\n }\n\n let num_insertions = self.num_insertions.get();\n let num_deletions = self.num_deletions.get();\n let num_updates = self.num_updates.get();\n let num_reprocesses = self.num_reprocesses.get();\n let num_source_rows = num_insertions + num_deletions + num_updates + num_reprocesses;\n if num_source_rows > 0 {\n messages.push(format!(\n \"{num_source_rows} source rows processed ({num_insertions} ADDED, {num_deletions} REMOVED, {num_updates} UPDATED, {num_reprocesses} REPROCESSED on flow change)\",\n ));\n }\n\n if !messages.is_empty() {\n write!(f, \"{}\", messages.join(\"; \"))?;\n } else {\n write!(f, \"No changes\")?;\n }\n\n Ok(())\n }\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceUpdateInfo {\n pub source_name: String,\n pub stats: UpdateStats,\n}\n\nimpl std::fmt::Display for SourceUpdateInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.source_name, self.stats)\n }\n}\n\n#[derive(Debug, Serialize)]\npub struct IndexUpdateInfo {\n pub sources: Vec,\n}\n\nimpl std::fmt::Display for IndexUpdateInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n for source in self.sources.iter() {\n writeln!(f, \"{source}\")?;\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/base/duration.rs", "use std::f64;\n\nuse anyhow::{Result, anyhow, bail};\nuse chrono::Duration;\n\n/// Parses a string of number-unit pairs into a vector of (number, unit),\n/// ensuring units are among the allowed ones.\nfn parse_components(\n s: &str,\n allowed_units: &[char],\n original_input: &str,\n) -> Result> {\n let mut result = Vec::new();\n let mut iter = s.chars().peekable();\n while iter.peek().is_some() {\n let mut num_str = String::new();\n let mut has_decimal = false;\n\n // Parse digits and optional decimal point\n while let Some(&c) = iter.peek() {\n if c.is_ascii_digit() || (c == '.' && !has_decimal) {\n if c == '.' {\n has_decimal = true;\n }\n num_str.push(iter.next().unwrap());\n } else {\n break;\n }\n }\n if num_str.is_empty() {\n bail!(\"Expected number in: {}\", original_input);\n }\n let num = num_str\n .parse::()\n .map_err(|_| anyhow!(\"Invalid number '{}' in: {}\", num_str, original_input))?;\n if let Some(&unit) = iter.peek() {\n if allowed_units.contains(&unit) {\n result.push((num, unit));\n iter.next();\n } else {\n bail!(\"Invalid unit '{}' in: {}\", unit, original_input);\n }\n } else {\n bail!(\n \"Missing unit after number '{}' in: {}\",\n num_str,\n original_input\n );\n }\n }\n Ok(result)\n}\n\n/// Parses an ISO 8601 duration string into a `chrono::Duration`.\nfn parse_iso8601_duration(s: &str, original_input: &str) -> Result {\n let (is_negative, s_after_sign) = if let Some(stripped) = s.strip_prefix('-') {\n (true, stripped)\n } else {\n (false, s)\n };\n\n if !s_after_sign.starts_with('P') {\n bail!(\"Duration must start with 'P' in: {}\", original_input);\n }\n let s_after_p = &s_after_sign[1..];\n\n let (date_part, time_part) = if let Some(pos) = s_after_p.find('T') {\n (&s_after_p[..pos], Some(&s_after_p[pos + 1..]))\n } else {\n (s_after_p, None)\n };\n\n // Date components (Y, M, W, D)\n let date_components = parse_components(date_part, &['Y', 'M', 'W', 'D'], original_input)?;\n\n // Time components (H, M, S)\n let time_components = if let Some(time_str) = time_part {\n let comps = parse_components(time_str, &['H', 'M', 'S'], original_input)?;\n if comps.is_empty() {\n bail!(\n \"Time part present but no time components in: {}\",\n original_input\n );\n }\n comps\n } else {\n vec![]\n };\n\n if date_components.is_empty() && time_components.is_empty() {\n bail!(\"No components in duration: {}\", original_input);\n }\n\n // Accumulate date duration\n let date_duration = date_components\n .iter()\n .fold(Duration::zero(), |acc, &(num, unit)| {\n let days = match unit {\n 'Y' => num * 365.0,\n 'M' => num * 30.0,\n 'W' => num * 7.0,\n 'D' => num,\n _ => unreachable!(\"Invalid date unit should be caught by prior validation\"),\n };\n let microseconds = (days * 86_400_000_000.0) as i64;\n acc + Duration::microseconds(microseconds)\n });\n\n // Accumulate time duration\n let time_duration =\n time_components\n .iter()\n .fold(Duration::zero(), |acc, &(num, unit)| match unit {\n 'H' => {\n let nanoseconds = (num * 3_600_000_000_000.0).round() as i64;\n acc + Duration::nanoseconds(nanoseconds)\n }\n 'M' => {\n let nanoseconds = (num * 60_000_000_000.0).round() as i64;\n acc + Duration::nanoseconds(nanoseconds)\n }\n 'S' => {\n let nanoseconds = (num.fract() * 1_000_000_000.0).round() as i64;\n acc + Duration::seconds(num as i64) + Duration::nanoseconds(nanoseconds)\n }\n _ => unreachable!(\"Invalid time unit should be caught by prior validation\"),\n });\n\n let mut total = date_duration + time_duration;\n if is_negative {\n total = -total;\n }\n\n Ok(total)\n}\n\n/// Parses a human-readable duration string into a `chrono::Duration`.\nfn parse_human_readable_duration(s: &str, original_input: &str) -> Result {\n let parts: Vec<&str> = s.split_whitespace().collect();\n if parts.is_empty() || parts.len() % 2 != 0 {\n bail!(\n \"Invalid human-readable duration format in: {}\",\n original_input\n );\n }\n\n let durations: Result> = parts\n .chunks(2)\n .map(|chunk| {\n let num: i64 = chunk[0]\n .parse()\n .map_err(|_| anyhow!(\"Invalid number '{}' in: {}\", chunk[0], original_input))?;\n\n match chunk[1].to_lowercase().as_str() {\n \"day\" | \"days\" => Ok(Duration::days(num)),\n \"hour\" | \"hours\" => Ok(Duration::hours(num)),\n \"minute\" | \"minutes\" => Ok(Duration::minutes(num)),\n \"second\" | \"seconds\" => Ok(Duration::seconds(num)),\n \"millisecond\" | \"milliseconds\" => Ok(Duration::milliseconds(num)),\n \"microsecond\" | \"microseconds\" => Ok(Duration::microseconds(num)),\n _ => bail!(\"Invalid unit '{}' in: {}\", chunk[1], original_input),\n }\n })\n .collect();\n\n durations.map(|durs| durs.into_iter().sum())\n}\n\n/// Parses a duration string into a `chrono::Duration`, trying ISO 8601 first, then human-readable format.\npub fn parse_duration(s: &str) -> Result {\n let original_input = s;\n let s = s.trim();\n if s.is_empty() {\n bail!(\"Empty duration string\");\n }\n\n let is_likely_iso8601 = match s.as_bytes() {\n [c, ..] if c.eq_ignore_ascii_case(&b'P') => true,\n [b'-', c, ..] if c.eq_ignore_ascii_case(&b'P') => true,\n _ => false,\n };\n\n if is_likely_iso8601 {\n parse_iso8601_duration(s, original_input)\n } else {\n parse_human_readable_duration(s, original_input)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n fn check_ok(res: Result, expected: Duration, input_str: &str) {\n match res {\n Ok(duration) => assert_eq!(duration, expected, \"Input: '{input_str}'\"),\n Err(e) => panic!(\"Input: '{input_str}', expected Ok({expected:?}), but got Err: {e}\"),\n }\n }\n\n fn check_err_contains(res: Result, expected_substring: &str, input_str: &str) {\n match res {\n Ok(d) => panic!(\n \"Input: '{input_str}', expected error containing '{expected_substring}', but got Ok({d:?})\"\n ),\n Err(e) => {\n let err_msg = e.to_string();\n assert!(\n err_msg.contains(expected_substring),\n \"Input: '{input_str}', error message '{err_msg}' does not contain expected substring '{expected_substring}'\"\n );\n }\n }\n }\n\n #[test]\n fn test_empty_string() {\n check_err_contains(parse_duration(\"\"), \"Empty duration string\", \"\\\"\\\"\");\n }\n\n #[test]\n fn test_whitespace_string() {\n check_err_contains(parse_duration(\" \"), \"Empty duration string\", \"\\\" \\\"\");\n }\n\n #[test]\n fn test_iso_just_p() {\n check_err_contains(parse_duration(\"P\"), \"No components in duration: P\", \"\\\"P\\\"\");\n }\n\n #[test]\n fn test_iso_pt() {\n check_err_contains(\n parse_duration(\"PT\"),\n \"Time part present but no time components in: PT\",\n \"\\\"PT\\\"\",\n );\n }\n\n #[test]\n fn test_iso_missing_number_before_unit_in_date_part() {\n check_err_contains(parse_duration(\"PD\"), \"Expected number in: PD\", \"\\\"PD\\\"\");\n }\n #[test]\n fn test_iso_missing_number_before_unit_in_time_part() {\n check_err_contains(parse_duration(\"PTM\"), \"Expected number in: PTM\", \"\\\"PTM\\\"\");\n }\n\n #[test]\n fn test_iso_time_unit_without_t() {\n check_err_contains(parse_duration(\"P1H\"), \"Invalid unit 'H' in: P1H\", \"\\\"P1H\\\"\");\n check_err_contains(parse_duration(\"P1S\"), \"Invalid unit 'S' in: P1S\", \"\\\"P1S\\\"\");\n }\n\n #[test]\n fn test_iso_invalid_unit() {\n check_err_contains(parse_duration(\"P1X\"), \"Invalid unit 'X' in: P1X\", \"\\\"P1X\\\"\");\n check_err_contains(\n parse_duration(\"PT1X\"),\n \"Invalid unit 'X' in: PT1X\",\n \"\\\"PT1X\\\"\",\n );\n }\n\n #[test]\n fn test_iso_valid_lowercase_unit_is_not_allowed() {\n check_err_contains(\n parse_duration(\"p1h\"),\n \"Duration must start with 'P' in: p1h\",\n \"\\\"p1h\\\"\",\n );\n check_err_contains(\n parse_duration(\"PT1h\"),\n \"Invalid unit 'h' in: PT1h\",\n \"\\\"PT1h\\\"\",\n );\n }\n\n #[test]\n fn test_iso_trailing_number_error() {\n check_err_contains(\n parse_duration(\"P1D2\"),\n \"Missing unit after number '2' in: P1D2\",\n \"\\\"P1D2\\\"\",\n );\n }\n\n #[test]\n fn test_iso_invalid_fractional_format() {\n check_err_contains(\n parse_duration(\"PT1..5S\"),\n \"Invalid unit '.' in: PT1..5S\",\n \"\\\"PT1..5S\\\"\",\n );\n check_err_contains(\n parse_duration(\"PT1.5.5S\"),\n \"Invalid unit '.' in: PT1.5.5S\",\n \"\\\"PT1.5.5S\\\"\",\n );\n check_err_contains(\n parse_duration(\"P1..5D\"),\n \"Invalid unit '.' in: P1..5D\",\n \"\\\"P1..5D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_misplaced_t() {\n check_err_contains(\n parse_duration(\"P1DT2H T3M\"),\n \"Expected number in: P1DT2H T3M\",\n \"\\\"P1DT2H T3M\\\"\",\n );\n check_err_contains(\n parse_duration(\"P1T2H\"),\n \"Missing unit after number '1' in: P1T2H\",\n \"\\\"P1T2H\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_number_after_p() {\n check_err_contains(\n parse_duration(\"P-1D\"),\n \"Expected number in: P-1D\",\n \"\\\"P-1D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_valid_months() {\n check_ok(parse_duration(\"P1M\"), Duration::days(30), \"\\\"P1M\\\"\");\n check_ok(parse_duration(\" P13M\"), Duration::days(13 * 30), \"\\\"P13M\\\"\");\n }\n\n #[test]\n fn test_iso_valid_weeks() {\n check_ok(parse_duration(\"P1W\"), Duration::days(7), \"\\\"P1W\\\"\");\n check_ok(parse_duration(\" P1W \"), Duration::days(7), \"\\\"P1W\\\"\");\n }\n\n #[test]\n fn test_iso_valid_days() {\n check_ok(parse_duration(\"P1D\"), Duration::days(1), \"\\\"P1D\\\"\");\n }\n\n #[test]\n fn test_iso_valid_hours() {\n check_ok(parse_duration(\"PT2H\"), Duration::hours(2), \"\\\"PT2H\\\"\");\n }\n\n #[test]\n fn test_iso_valid_minutes() {\n check_ok(parse_duration(\"PT3M\"), Duration::minutes(3), \"\\\"PT3M\\\"\");\n }\n\n #[test]\n fn test_iso_valid_seconds() {\n check_ok(parse_duration(\"PT4S\"), Duration::seconds(4), \"\\\"PT4S\\\"\");\n }\n\n #[test]\n fn test_iso_combined_units() {\n check_ok(\n parse_duration(\"P1Y2M3W4DT5H6M7S\"),\n Duration::days(365 + 60 + 3 * 7 + 4)\n + Duration::hours(5)\n + Duration::minutes(6)\n + Duration::seconds(7),\n \"\\\"P1Y2M3DT4H5M6S\\\"\",\n );\n check_ok(\n parse_duration(\"P1DT2H3M4S\"),\n Duration::days(1) + Duration::hours(2) + Duration::minutes(3) + Duration::seconds(4),\n \"\\\"P1DT2H3M4S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_duplicated_unit() {\n check_ok(parse_duration(\"P1D1D\"), Duration::days(2), \"\\\"P1D1D\\\"\");\n check_ok(parse_duration(\"PT1H1H\"), Duration::hours(2), \"\\\"PT1H1H\\\"\");\n }\n\n #[test]\n fn test_iso_out_of_order_unit() {\n check_ok(\n parse_duration(\"P1W1Y\"),\n Duration::days(365 + 7),\n \"\\\"P1W1Y\\\"\",\n );\n check_ok(\n parse_duration(\"PT2S1H\"),\n Duration::hours(1) + Duration::seconds(2),\n \"\\\"PT2S1H\\\"\",\n );\n check_ok(parse_duration(\"P3M\"), Duration::days(90), \"\\\"PT2S1H\\\"\");\n check_ok(parse_duration(\"PT3M\"), Duration::minutes(3), \"\\\"PT2S1H\\\"\");\n check_err_contains(\n parse_duration(\"P1H2D\"),\n \"Invalid unit 'H' in: P1H2D\", // Time part without 'T' is invalid\n \"\\\"P1H2D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_duration_p1d() {\n check_ok(parse_duration(\"-P1D\"), -Duration::days(1), \"\\\"-P1D\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pd0() {\n check_ok(parse_duration(\"P0D\"), Duration::zero(), \"\\\"P0D\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pt0s() {\n check_ok(parse_duration(\"PT0S\"), Duration::zero(), \"\\\"PT0S\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pt0h0m0s() {\n check_ok(parse_duration(\"PT0H0M0S\"), Duration::zero(), \"\\\"PT0H0M0S\\\"\");\n }\n\n #[test]\n fn test_iso_fractional_seconds() {\n check_ok(\n parse_duration(\"PT1.5S\"),\n Duration::seconds(1) + Duration::milliseconds(500),\n \"\\\"PT1.5S\\\"\",\n );\n check_ok(\n parse_duration(\"PT441010.456123S\"),\n Duration::seconds(441010) + Duration::microseconds(456123),\n \"\\\"PT441010.456123S\\\"\",\n );\n check_ok(\n parse_duration(\"PT0.000001S\"),\n Duration::microseconds(1),\n \"\\\"PT0.000001S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_fractional_date_units() {\n check_ok(\n parse_duration(\"P1.5D\"),\n Duration::microseconds((1.5 * 86_400_000_000.0) as i64),\n \"\\\"P1.5D\\\"\",\n );\n check_ok(\n parse_duration(\"P1.25Y\"),\n Duration::microseconds((1.25 * 365.0 * 86_400_000_000.0) as i64),\n \"\\\"P1.25Y\\\"\",\n );\n check_ok(\n parse_duration(\"P2.75M\"),\n Duration::microseconds((2.75 * 30.0 * 86_400_000_000.0) as i64),\n \"\\\"P2.75M\\\"\",\n );\n check_ok(\n parse_duration(\"P0.5W\"),\n Duration::microseconds((0.5 * 7.0 * 86_400_000_000.0) as i64),\n \"\\\"P0.5W\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_fractional_date_units() {\n check_ok(\n parse_duration(\"-P1.5D\"),\n -Duration::microseconds((1.5 * 86_400_000_000.0) as i64),\n \"\\\"-P1.5D\\\"\",\n );\n check_ok(\n parse_duration(\"-P0.25Y\"),\n -Duration::microseconds((0.25 * 365.0 * 86_400_000_000.0) as i64),\n \"\\\"-P0.25Y\\\"\",\n );\n }\n\n #[test]\n fn test_iso_combined_fractional_units() {\n check_ok(\n parse_duration(\"P1.5DT2.5H3.5M4.5S\"),\n Duration::microseconds((1.5 * 86_400_000_000.0) as i64)\n + Duration::microseconds((2.5 * 3_600_000_000.0) as i64)\n + Duration::microseconds((3.5 * 60_000_000.0) as i64)\n + Duration::seconds(4)\n + Duration::milliseconds(500),\n \"\\\"1.5DT2.5H3.5M4.5S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_multiple_fractional_time_units() {\n check_ok(\n parse_duration(\"PT1.5S2.5S\"),\n Duration::seconds(1 + 2) + Duration::milliseconds(500) + Duration::milliseconds(500),\n \"\\\"PT1.5S2.5S\\\"\",\n );\n check_ok(\n parse_duration(\"PT1.1H2.2M3.3S\"),\n Duration::hours(1)\n + Duration::seconds((0.1 * 3600.0) as i64)\n + Duration::minutes(2)\n + Duration::seconds((0.2 * 60.0) as i64)\n + Duration::seconds(3)\n + Duration::milliseconds(300),\n \"\\\"PT1.1H2.2M3.3S\\\"\",\n );\n }\n\n // Human-readable Tests\n #[test]\n fn test_human_missing_unit() {\n check_err_contains(\n parse_duration(\"1\"),\n \"Invalid human-readable duration format in: 1\",\n \"\\\"1\\\"\",\n );\n }\n\n #[test]\n fn test_human_missing_number() {\n check_err_contains(\n parse_duration(\"day\"),\n \"Invalid human-readable duration format in: day\",\n \"\\\"day\\\"\",\n );\n }\n\n #[test]\n fn test_human_incomplete_pair() {\n check_err_contains(\n parse_duration(\"1 day 2\"),\n \"Invalid human-readable duration format in: 1 day 2\",\n \"\\\"1 day 2\\\"\",\n );\n }\n\n #[test]\n fn test_human_invalid_number_at_start() {\n check_err_contains(\n parse_duration(\"one day\"),\n \"Invalid number 'one' in: one day\",\n \"\\\"one day\\\"\",\n );\n }\n\n #[test]\n fn test_human_invalid_unit() {\n check_err_contains(\n parse_duration(\"1 hour 2 minutes 3 seconds four seconds\"),\n \"Invalid number 'four' in: 1 hour 2 minutes 3 seconds four seconds\",\n \"\\\"1 hour 2 minutes 3 seconds four seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_float_number_fail() {\n check_err_contains(\n parse_duration(\"1.5 hours\"),\n \"Invalid number '1.5' in: 1.5 hours\",\n \"\\\"1.5 hours\\\"\",\n );\n }\n\n #[test]\n fn test_invalid_human_readable_no_pairs() {\n check_err_contains(\n parse_duration(\"just some words\"),\n \"Invalid human-readable duration format in: just some words\",\n \"\\\"just some words\\\"\",\n );\n }\n\n #[test]\n fn test_human_unknown_unit() {\n check_err_contains(\n parse_duration(\"1 year\"),\n \"Invalid unit 'year' in: 1 year\",\n \"\\\"1 year\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_day() {\n check_ok(parse_duration(\"1 day\"), Duration::days(1), \"\\\"1 day\\\"\");\n }\n\n #[test]\n fn test_human_valid_days_uppercase() {\n check_ok(parse_duration(\"2 DAYS\"), Duration::days(2), \"\\\"2 DAYS\\\"\");\n }\n\n #[test]\n fn test_human_valid_hour() {\n check_ok(parse_duration(\"3 hour\"), Duration::hours(3), \"\\\"3 hour\\\"\");\n }\n\n #[test]\n fn test_human_valid_hours_mixedcase() {\n check_ok(parse_duration(\"4 HoUrS\"), Duration::hours(4), \"\\\"4 HoUrS\\\"\");\n }\n\n #[test]\n fn test_human_valid_minute() {\n check_ok(\n parse_duration(\"5 minute\"),\n Duration::minutes(5),\n \"\\\"5 minute\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_minutes() {\n check_ok(\n parse_duration(\"6 minutes\"),\n Duration::minutes(6),\n \"\\\"6 minutes\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_second() {\n check_ok(\n parse_duration(\"7 second\"),\n Duration::seconds(7),\n \"\\\"7 second\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_seconds() {\n check_ok(\n parse_duration(\"8 seconds\"),\n Duration::seconds(8),\n \"\\\"8 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_millisecond() {\n check_ok(\n parse_duration(\"9 millisecond\"),\n Duration::milliseconds(9),\n \"\\\"9 millisecond\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_milliseconds() {\n check_ok(\n parse_duration(\"10 milliseconds\"),\n Duration::milliseconds(10),\n \"\\\"10 milliseconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_microsecond() {\n check_ok(\n parse_duration(\"11 microsecond\"),\n Duration::microseconds(11),\n \"\\\"11 microsecond\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_microseconds() {\n check_ok(\n parse_duration(\"12 microseconds\"),\n Duration::microseconds(12),\n \"\\\"12 microseconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_combined() {\n let expected =\n Duration::days(1) + Duration::hours(2) + Duration::minutes(3) + Duration::seconds(4);\n check_ok(\n parse_duration(\"1 day 2 hours 3 minutes 4 seconds\"),\n expected,\n \"\\\"1 day 2 hours 3 minutes 4 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_out_of_order() {\n check_ok(\n parse_duration(\"1 second 2 hours\"),\n Duration::hours(2) + Duration::seconds(1),\n \"\\\"1 second 2 hours\\\"\",\n );\n check_ok(\n parse_duration(\"7 minutes 6 hours 5 days\"),\n Duration::days(5) + Duration::hours(6) + Duration::minutes(7),\n \"\\\"7 minutes 6 hours 5 days\\\"\",\n )\n }\n\n #[test]\n fn test_human_zero_duration_seconds() {\n check_ok(\n parse_duration(\"0 seconds\"),\n Duration::zero(),\n \"\\\"0 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_zero_duration_days_hours() {\n check_ok(\n parse_duration(\"0 day 0 hour\"),\n Duration::zero(),\n \"\\\"0 day 0 hour\\\"\",\n );\n }\n\n #[test]\n fn test_human_zero_duration_multiple_zeros() {\n check_ok(\n parse_duration(\"0 days 0 hours 0 minutes 0 seconds\"),\n Duration::zero(),\n \"\\\"0 days 0 hours 0 minutes 0 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_no_space_between_num_unit() {\n check_err_contains(\n parse_duration(\"1day\"),\n \"Invalid human-readable duration format in: 1day\",\n \"\\\"1day\\\"\",\n );\n }\n\n #[test]\n fn test_human_trimmed() {\n check_ok(parse_duration(\" 1 day \"), Duration::days(1), \"\\\" 1 day \\\"\");\n }\n\n #[test]\n fn test_human_extra_whitespace() {\n check_ok(\n parse_duration(\" 1 day 2 hours \"),\n Duration::days(1) + Duration::hours(2),\n \"\\\" 1 day 2 hours \\\"\",\n );\n }\n\n #[test]\n fn test_human_negative_numbers() {\n check_ok(\n parse_duration(\"-1 day 2 hours\"),\n Duration::days(-1) + Duration::hours(2),\n \"\\\"-1 day 2 hours\\\"\",\n );\n check_ok(\n parse_duration(\"1 day -2 hours\"),\n Duration::days(1) + Duration::hours(-2),\n \"\\\"1 day -2 hours\\\"\",\n );\n }\n}\n"], ["/cocoindex/src/setup/auth_registry.rs", "use std::collections::hash_map;\n\nuse crate::prelude::*;\n\npub struct AuthRegistry {\n entries: RwLock>,\n}\n\nimpl Default for AuthRegistry {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl AuthRegistry {\n pub fn new() -> Self {\n Self {\n entries: RwLock::new(HashMap::new()),\n }\n }\n\n pub fn add(&self, key: String, value: serde_json::Value) -> Result<()> {\n let mut entries = self.entries.write().unwrap();\n match entries.entry(key) {\n hash_map::Entry::Occupied(entry) => {\n api_bail!(\"Auth entry already exists: {}\", entry.key());\n }\n hash_map::Entry::Vacant(entry) => {\n entry.insert(value);\n }\n }\n Ok(())\n }\n\n pub fn get(&self, entry_ref: &spec::AuthEntryReference) -> Result {\n let entries = self.entries.read().unwrap();\n match entries.get(&entry_ref.key) {\n Some(value) => Ok(serde_json::from_value(value.clone())?),\n None => api_bail!(\n \"Auth entry `{key}` not found.\\n\\\n Hint: If you're not referencing `{key}` in your flow, it will likely be caused by a previously persisted target using it. \\\n You need to bring back the definition for the auth entry `{key}`, so that CocoIndex will be able to do a cleanup in the next `setup` run. \\\n See https://cocoindex.io/docs/core/flow_def#auth-registry for more details.\",\n key = entry_ref.key\n ),\n }\n }\n}\n"], ["/cocoindex/src/execution/indexing_status.rs", "use crate::prelude::*;\n\nuse super::db_tracking;\nuse super::evaluator;\nuse futures::try_join;\n\n#[derive(Debug, Serialize)]\npub struct SourceRowLastProcessedInfo {\n pub source_ordinal: interface::Ordinal,\n pub processing_time: Option>,\n pub is_logic_current: bool,\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceRowInfo {\n pub ordinal: interface::Ordinal,\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceRowIndexingStatus {\n pub last_processed: Option,\n pub current: Option,\n}\n\npub async fn get_source_row_indexing_status(\n src_eval_ctx: &evaluator::SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n pool: &sqlx::PgPool,\n) -> Result {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let last_processed_fut = db_tracking::read_source_last_processed_info(\n setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n );\n let current_fut = src_eval_ctx.import_op.executor.get_value(\n src_eval_ctx.key,\n &interface::SourceExecutorGetOptions {\n include_value: false,\n include_ordinal: true,\n },\n );\n let (last_processed, current) = try_join!(last_processed_fut, current_fut)?;\n\n let last_processed = last_processed.map(|l| SourceRowLastProcessedInfo {\n source_ordinal: interface::Ordinal(l.processed_source_ordinal),\n processing_time: l\n .process_time_micros\n .and_then(chrono::DateTime::::from_timestamp_micros),\n is_logic_current: Some(src_eval_ctx.plan.logic_fingerprint.0.as_slice())\n == l.process_logic_fingerprint.as_deref(),\n });\n let current = SourceRowInfo {\n ordinal: current\n .ordinal\n .ok_or(anyhow::anyhow!(\"Ordinal is unavailable for the source\"))?,\n };\n Ok(SourceRowIndexingStatus {\n last_processed,\n current: Some(current),\n })\n}\n"], ["/cocoindex/src/settings.rs", "use serde::Deserialize;\n\n#[derive(Deserialize, Debug)]\npub struct DatabaseConnectionSpec {\n pub url: String,\n pub user: Option,\n pub password: Option,\n}\n\n#[derive(Deserialize, Debug, Default)]\npub struct GlobalExecutionOptions {\n pub source_max_inflight_rows: Option,\n pub source_max_inflight_bytes: Option,\n}\n\n#[derive(Deserialize, Debug, Default)]\npub struct Settings {\n #[serde(default)]\n pub database: Option,\n #[serde(default)]\n #[allow(dead_code)] // Used via serialization/deserialization to Python\n pub app_namespace: String,\n #[serde(default)]\n pub global_execution_options: GlobalExecutionOptions,\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_settings_deserialize_with_database() {\n let json = r#\"{\n \"database\": {\n \"url\": \"postgresql://localhost:5432/test\",\n \"user\": \"testuser\",\n \"password\": \"testpass\"\n },\n \"app_namespace\": \"test_app\"\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_some());\n let db = settings.database.unwrap();\n assert_eq!(db.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db.user, Some(\"testuser\".to_string()));\n assert_eq!(db.password, Some(\"testpass\".to_string()));\n assert_eq!(settings.app_namespace, \"test_app\");\n }\n\n #[test]\n fn test_settings_deserialize_without_database() {\n let json = r#\"{\n \"app_namespace\": \"test_app\"\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_none());\n assert_eq!(settings.app_namespace, \"test_app\");\n }\n\n #[test]\n fn test_settings_deserialize_empty_object() {\n let json = r#\"{}\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_none());\n assert_eq!(settings.app_namespace, \"\");\n }\n\n #[test]\n fn test_settings_deserialize_database_without_user_password() {\n let json = r#\"{\n \"database\": {\n \"url\": \"postgresql://localhost:5432/test\"\n }\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_some());\n let db = settings.database.unwrap();\n assert_eq!(db.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db.user, None);\n assert_eq!(db.password, None);\n assert_eq!(settings.app_namespace, \"\");\n }\n\n #[test]\n fn test_database_connection_spec_deserialize() {\n let json = r#\"{\n \"url\": \"postgresql://localhost:5432/test\",\n \"user\": \"testuser\",\n \"password\": \"testpass\"\n }\"#;\n\n let db_spec: DatabaseConnectionSpec = serde_json::from_str(json).unwrap();\n\n assert_eq!(db_spec.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db_spec.user, Some(\"testuser\".to_string()));\n assert_eq!(db_spec.password, Some(\"testpass\".to_string()));\n }\n}\n"], ["/cocoindex/src/prelude.rs", "#![allow(unused_imports)]\n\npub(crate) use anyhow::{Context, Result};\npub(crate) use async_trait::async_trait;\npub(crate) use chrono::{DateTime, Utc};\npub(crate) use futures::{FutureExt, StreamExt};\npub(crate) use futures::{\n future::{BoxFuture, Shared},\n prelude::*,\n stream::BoxStream,\n};\npub(crate) use indexmap::{IndexMap, IndexSet};\npub(crate) use itertools::Itertools;\npub(crate) use serde::{Deserialize, Serialize, de::DeserializeOwned};\npub(crate) use std::any::Any;\npub(crate) use std::borrow::Cow;\npub(crate) use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};\npub(crate) use std::hash::Hash;\npub(crate) use std::sync::{Arc, LazyLock, Mutex, OnceLock, RwLock, Weak};\n\npub(crate) use crate::base::{self, schema, spec, value};\npub(crate) use crate::builder::{self, exec_ctx, plan};\npub(crate) use crate::execution;\npub(crate) use crate::lib_context::{FlowContext, LibContext, get_lib_context, get_runtime};\npub(crate) use crate::ops::interface;\npub(crate) use crate::service::error::{ApiError, invariance_violation};\npub(crate) use crate::setup;\npub(crate) use crate::setup::AuthRegistry;\npub(crate) use crate::utils::{self, concur_control, retryable};\npub(crate) use crate::{api_bail, api_error};\n\npub(crate) use anyhow::{anyhow, bail};\npub(crate) use async_stream::{stream, try_stream};\npub(crate) use log::{debug, error, info, trace, warn};\n\npub(crate) use derivative::Derivative;\n"], ["/cocoindex/src/utils/db.rs", "#[derive(Debug, Clone, PartialEq, Eq)]\npub struct ValidIdentifier(pub String);\n\nimpl TryFrom for ValidIdentifier {\n type Error = anyhow::Error;\n\n fn try_from(s: String) -> Result {\n if !s.is_empty() && s.chars().all(|c| c.is_alphanumeric() || c == '_') {\n Ok(ValidIdentifier(s))\n } else {\n Err(anyhow::anyhow!(\"Invalid identifier: {s:?}\"))\n }\n }\n}\n\nimpl std::fmt::Display for ValidIdentifier {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Display::fmt(&self.0, f)\n }\n}\n\nimpl std::ops::Deref for ValidIdentifier {\n type Target = String;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\npub enum WriteAction {\n Insert,\n Update,\n}\n\npub fn sanitize_identifier(s: &str) -> String {\n let mut result = String::new();\n for c in s.chars() {\n if c.is_alphanumeric() || c == '_' {\n result.push(c);\n } else {\n result.push_str(\"__\");\n }\n }\n result\n}\n"], ["/cocoindex/src/ops/registry.rs", "use super::interface::ExecutorFactory;\nuse anyhow::Result;\nuse std::collections::HashMap;\n\npub struct ExecutorFactoryRegistry {\n factories: HashMap,\n}\n\nimpl Default for ExecutorFactoryRegistry {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl ExecutorFactoryRegistry {\n pub fn new() -> Self {\n Self {\n factories: HashMap::new(),\n }\n }\n\n pub fn register(&mut self, name: String, factory: ExecutorFactory) -> Result<()> {\n match self.factories.entry(name) {\n std::collections::hash_map::Entry::Occupied(entry) => Err(anyhow::anyhow!(\n \"Factory with name already exists: {}\",\n entry.key()\n )),\n std::collections::hash_map::Entry::Vacant(entry) => {\n entry.insert(factory);\n Ok(())\n }\n }\n }\n\n pub fn get(&self, name: &str) -> Option<&ExecutorFactory> {\n self.factories.get(name)\n }\n}\n"], ["/cocoindex/src/base/field_attrs.rs", "use const_format::concatcp;\n\npub static COCOINDEX_PREFIX: &str = \"cocoindex.io/\";\n\n/// Present for bytes and str. It points to fields that represents the original file name for the data.\n/// Type: AnalyzedValueMapping\npub static CONTENT_FILENAME: &str = concatcp!(COCOINDEX_PREFIX, \"content_filename\");\n\n/// Present for bytes and str. It points to fields that represents mime types for the data.\n/// Type: AnalyzedValueMapping\npub static CONTENT_MIME_TYPE: &str = concatcp!(COCOINDEX_PREFIX, \"content_mime_type\");\n\n/// Present for chunks. It points to fields that the chunks are for.\n/// Type: AnalyzedValueMapping\npub static CHUNK_BASE_TEXT: &str = concatcp!(COCOINDEX_PREFIX, \"chunk_base_text\");\n\n/// Base text for an embedding vector.\npub static _EMBEDDING_ORIGIN_TEXT: &str = concatcp!(COCOINDEX_PREFIX, \"embedding_origin_text\");\n"], ["/cocoindex/src/ops/functions/mod.rs", "pub mod embed_text;\npub mod extract_by_llm;\npub mod parse_json;\npub mod split_recursively;\n\n#[cfg(test)]\nmod test_utils;\n"], ["/cocoindex/src/lib.rs", "mod base;\nmod builder;\nmod execution;\nmod lib_context;\nmod llm;\nmod ops;\nmod prelude;\nmod py;\nmod server;\nmod service;\nmod settings;\nmod setup;\nmod utils;\n"], ["/cocoindex/src/setup/mod.rs", "mod auth_registry;\nmod db_metadata;\nmod driver;\nmod states;\n\npub mod components;\n\npub use auth_registry::AuthRegistry;\npub use driver::*;\npub use states::*;\n"], ["/cocoindex/src/builder/mod.rs", "pub mod analyzer;\npub mod exec_ctx;\npub mod flow_builder;\npub mod plan;\n\nmod analyzed_flow;\n\npub use analyzed_flow::AnalyzedFlow;\npub use analyzed_flow::AnalyzedTransientFlow;\n"], ["/cocoindex/src/ops/mod.rs", "pub mod interface;\npub mod registry;\n\n// All operations\nmod factory_bases;\nmod functions;\nmod sources;\nmod targets;\n\nmod registration;\npub(crate) use registration::*;\npub(crate) mod py_factory;\n\n// SDK is used for help registration for operations.\nmod sdk;\n"], ["/cocoindex/src/execution/mod.rs", "pub(crate) mod db_tracking_setup;\npub(crate) mod dumper;\npub(crate) mod evaluator;\npub(crate) mod indexing_status;\npub(crate) mod memoization;\npub(crate) mod row_indexer;\npub(crate) mod source_indexer;\npub(crate) mod stats;\n\nmod live_updater;\npub(crate) use live_updater::*;\n\nmod db_tracking;\n"], ["/cocoindex/src/ops/targets/mod.rs", "mod shared;\n\npub mod kuzu;\npub mod neo4j;\npub mod postgres;\npub mod qdrant;\n"], ["/cocoindex/src/base/mod.rs", "pub mod duration;\npub mod field_attrs;\npub mod json_schema;\npub mod schema;\npub mod spec;\npub mod value;\n"], ["/cocoindex/src/service/mod.rs", "pub(crate) mod error;\npub(crate) mod flows;\n"], ["/cocoindex/src/ops/sources/mod.rs", "pub mod amazon_s3;\npub mod azure_blob;\npub mod google_drive;\npub mod local_file;\n"], ["/cocoindex/src/utils/mod.rs", "pub mod concur_control;\npub mod db;\npub mod fingerprint;\npub mod immutable;\npub mod retryable;\npub mod yaml_ser;\n"], ["/cocoindex/src/ops/targets/shared/mod.rs", "pub mod property_graph;\npub mod table_columns;\n"]], "task_instance_info": {"created_time": "2025-08-21 01:30:28", "created_task_model": "DeepSeek-R1", "function_skeleton": "pub async fn new_llm_embedding_client (\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {}", "code_description": "This code segment functions to: Instantiate appropriate service clients for text embedding processing based on specified API configurations while handling unsupported service types through error reporting."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```rust\n[TASK_BEGIN]\npub async fn new_llm_embedding_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Voyage => {\n Box::new(voyage::Client::new(address).await?) as Box\n }\n _ => api_bail!(\"Embedding not supported for {api_type:?}\"),\n };\n Ok(client)\n}\n[TASK_END]\n```", "inference_time": "2025-08-21 01-30-58"}, "editdistance_info": {"edit_distance": 35.906, "calculate_time": "2025-08-21 01:30:58", "true_code_clean": "pub async fn new_llm_embedding_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Voyage => {\n Box::new(voyage::Client::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::OpenRouter | LlmApiType::LiteLlm | LlmApiType::Vllm | LlmApiType::Anthropic => {\n api_bail!(\"Embedding is not supported for API type {:?}\", api_type)\n }\n };\n Ok(client)\n}", "predict_code_clean": "pub async fn new_llm_embedding_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Voyage => {\n Box::new(voyage::Client::new(address).await?) as Box\n }\n _ => api_bail!(\"Embedding not supported for {api_type:?}\"),\n };\n Ok(client)\n}"}} {"repo_name": "cocoindex", "file_name": "/cocoindex/src/ops/sources/amazon_s3.rs", "inference_info": {"prefix_code": "use crate::fields_value;\nuse async_stream::try_stream;\nuse aws_config::BehaviorVersion;\nuse aws_sdk_s3::Client;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse std::sync::Arc;\nuse urlencoding;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\n/// Decode a form-encoded URL string, treating '+' as spaces\nfn decode_form_encoded_url(input: &str) -> Result> {\n // Replace '+' with spaces (form encoding convention), then decode\n // This handles both cases correctly:\n // - Literal '+' would be encoded as '%2B' and remain unchanged after replacement\n // - Space would be encoded as '+' and become ' ' after replacement\n let with_spaces = input.replace(\"+\", \" \");\n Ok(urlencoding::decode(&with_spaces)?.into())\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n bucket_name: String,\n prefix: Option,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n sqs_queue_url: Option,\n}\n\nstruct SqsContext {\n client: aws_sdk_sqs::Client,\n queue_url: String,\n}\n\nimpl SqsContext {\n async fn delete_message(&self, receipt_handle: String) -> Result<()> {\n self.client\n .delete_message()\n .queue_url(&self.queue_url)\n .receipt_handle(receipt_handle)\n .send()\n .await?;\n Ok(())\n }\n}\n\nstruct Executor {\n client: Client,\n bucket_name: String,\n prefix: Option,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n sqs_context: Option>,\n}\n\nimpl Executor {\n fn is_excluded(&self, key: &str) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(key))\n }\n\n fn is_file_included(&self, key: &str) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(key))\n && !self.is_excluded(key)\n }\n}\n\nfn datetime_to_ordinal(dt: &aws_sdk_s3::primitives::DateTime) -> Ordinal {\n Ordinal(Some((dt.as_nanos() / 1000) as i64))\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n _options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n try_stream! {\n let mut continuation_token = None;\n loop {\n let mut req = self.client\n .list_objects_v2()\n .bucket(&self.bucket_name);\n if let Some(ref p) = self.prefix {\n req = req.prefix(p);\n }\n if let Some(ref token) = continuation_token {\n req = req.continuation_token(token);\n }\n let resp = req.send().await?;\n if let Some(contents) = &resp.contents {\n let mut batch = Vec::new();\n for obj in contents {\n if let Some(key) = obj.key() {\n // Only include files (not folders)\n if key.ends_with('/') { continue; }\n let include = self.included_glob_set\n .as_ref()\n .map(|gs| gs.is_match(key))\n .unwrap_or(true);\n let exclude = self.excluded_glob_set\n .as_ref()\n .map(|gs| gs.is_match(key))\n .unwrap_or(false);\n if include && !exclude {\n batch.push(PartialSourceRowMetadata {\n key: KeyValue::Str(key.to_string().into()),\n ordinal: obj.last_modified().map(datetime_to_ordinal),\n });\n }\n }\n }\n if !batch.is_empty() {\n yield batch;\n }\n }\n if resp.is_truncated == Some(true) {\n continuation_token = resp.next_continuation_token.clone().map(|s| s.to_string());\n } else {\n break;\n }\n }\n }.boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let key_str = key.str_value()?;\n if !self.is_file_included(key_str) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n let resp = self\n .client\n .get_object()\n .bucket(&self.bucket_name)\n .key(key_str.as_ref())\n .send()\n .await;\n let obj = match resp {\n Err(e) if e.as_service_error().is_some_and(|e| e.is_no_such_key()) => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n r => r?,\n };\n let ordinal = if options.include_ordinal {\n obj.last_modified().map(datetime_to_ordinal)\n } else {\n None\n };\n let value = if options.include_value {\n let bytes = obj.body.collect().await?.into_bytes();\n Some(SourceValue::Existence(if self.binary {\n fields_value!(bytes.to_vec())\n } else {\n fields_value!(String::from_utf8_lossy(&bytes).to_string())\n }))\n } else {\n None\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n let sqs_context = if let Some(sqs_context) = &self.sqs_context {\n sqs_context\n } else {\n return Ok(None);\n };\n let stream = stream! {\n loop {\n match self.poll_sqs(sqs_context).await {\n Ok(messages) => {\n for message in messages {\n yield Ok(message);\n }\n }\n Err(e) => {\n yield Err(e);\n }\n };\n }\n };\n Ok(Some(stream.boxed()))\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3EventNotification {\n #[serde(default, rename = \"Records\")]\n pub records: Vec,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3EventRecord {\n #[serde(rename = \"eventName\")]\n pub event_name: String,\n pub s3: Option,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Entity {\n pub bucket: S3Bucket,\n pub object: S3Object,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Bucket {\n pub name: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Object {\n pub key: String,\n}\n\nimpl Executor {\n ", "suffix_code": "\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"AmazonS3\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n let config = aws_config::load_defaults(BehaviorVersion::latest()).await;\n Ok(Box::new(Executor {\n client: Client::new(&config),\n bucket_name: spec.bucket_name,\n prefix: spec.prefix,\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n sqs_context: spec.sqs_queue_url.map(|url| {\n Arc::new(SqsContext {\n client: aws_sdk_sqs::Client::new(&config),\n queue_url: url,\n })\n }),\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n", "middle_code": "async fn poll_sqs(&self, sqs_context: &Arc) -> Result> {\n let resp = sqs_context\n .client\n .receive_message()\n .queue_url(&sqs_context.queue_url)\n .max_number_of_messages(10)\n .wait_time_seconds(20)\n .send()\n .await?;\n let messages = if let Some(messages) = resp.messages {\n messages\n } else {\n return Ok(Vec::new());\n };\n let mut change_messages = vec![];\n for message in messages.into_iter() {\n if let Some(body) = message.body {\n let notification: S3EventNotification = serde_json::from_str(&body)?;\n let mut changes = vec![];\n for record in notification.records {\n let s3 = if let Some(s3) = record.s3 {\n s3\n } else {\n continue;\n };\n if s3.bucket.name != self.bucket_name {\n continue;\n }\n if !self\n .prefix\n .as_ref()\n .is_none_or(|prefix| s3.object.key.starts_with(prefix))\n {\n continue;\n }\n if record.event_name.starts_with(\"ObjectCreated:\")\n || record.event_name.starts_with(\"ObjectRemoved:\")\n {\n let decoded_key = decode_form_encoded_url(&s3.object.key)?;\n changes.push(SourceChange {\n key: KeyValue::Str(decoded_key),\n data: None,\n });\n }\n }\n if let Some(receipt_handle) = message.receipt_handle {\n if !changes.is_empty() {\n let sqs_context = sqs_context.clone();\n change_messages.push(SourceChangeMessage {\n changes,\n ack_fn: Some(Box::new(move || {\n async move { sqs_context.delete_message(receipt_handle).await }\n .boxed()\n })),\n });\n } else {\n sqs_context.delete_message(receipt_handle).await?;\n }\n }\n }\n }\n Ok(change_messages)\n }", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "rust", "sub_task_type": null}, "context_code": [["/cocoindex/src/ops/sources/azure_blob.rs", "use crate::fields_value;\nuse async_stream::try_stream;\nuse azure_core::prelude::NextMarker;\nuse azure_identity::{DefaultAzureCredential, TokenCredentialOptions};\nuse azure_storage::StorageCredentials;\nuse azure_storage_blobs::prelude::*;\nuse futures::StreamExt;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse std::sync::Arc;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n account_name: String,\n container_name: String,\n prefix: Option,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n\n /// SAS token for authentication. Takes precedence over account_access_key.\n sas_token: Option>,\n /// Account access key for authentication. If not provided, will use default Azure credential.\n account_access_key: Option>,\n}\n\nstruct Executor {\n client: BlobServiceClient,\n container_name: String,\n prefix: Option,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n}\n\nimpl Executor {\n fn is_excluded(&self, key: &str) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(key))\n }\n\n fn is_file_included(&self, key: &str) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(key))\n && !self.is_excluded(key)\n }\n}\n\nfn datetime_to_ordinal(dt: &time::OffsetDateTime) -> Ordinal {\n Ordinal(Some(dt.unix_timestamp_nanos() as i64 / 1000))\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n _options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n try_stream! {\n let mut continuation_token: Option = None;\n loop {\n let mut list_builder = self.client\n .container_client(&self.container_name)\n .list_blobs();\n\n if let Some(p) = &self.prefix {\n list_builder = list_builder.prefix(p.clone());\n }\n\n if let Some(token) = continuation_token.take() {\n list_builder = list_builder.marker(token);\n }\n\n let mut page_stream = list_builder.into_stream();\n let Some(page_result) = page_stream.next().await else {\n break;\n };\n\n let page = page_result?;\n let mut batch = Vec::new();\n\n for blob in page.blobs.blobs() {\n let key = &blob.name;\n\n // Only include files (not directories)\n if key.ends_with('/') { continue; }\n\n if self.is_file_included(key) {\n let ordinal = Some(datetime_to_ordinal(&blob.properties.last_modified));\n batch.push(PartialSourceRowMetadata {\n key: KeyValue::Str(key.clone().into()),\n ordinal,\n });\n }\n }\n\n if !batch.is_empty() {\n yield batch;\n }\n\n continuation_token = page.next_marker;\n if continuation_token.is_none() {\n break;\n }\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let key_str = key.str_value()?;\n if !self.is_file_included(key_str) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n\n let blob_client = self\n .client\n .container_client(&self.container_name)\n .blob_client(key_str.as_ref());\n\n let mut stream = blob_client.get().into_stream();\n let result = stream.next().await;\n\n let blob_response = match result {\n Some(response) => response?,\n None => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n };\n\n let ordinal = if options.include_ordinal {\n Some(datetime_to_ordinal(\n &blob_response.blob.properties.last_modified,\n ))\n } else {\n None\n };\n\n let value = if options.include_value {\n let bytes = blob_response.data.collect().await?;\n Some(SourceValue::Existence(if self.binary {\n fields_value!(bytes)\n } else {\n fields_value!(String::from_utf8_lossy(&bytes).to_string())\n }))\n } else {\n None\n };\n\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n // Azure Blob Storage doesn't have built-in change notifications like S3+SQS\n Ok(None)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"AzureBlob\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n context: Arc,\n ) -> Result> {\n let credential = if let Some(sas_token) = spec.sas_token {\n let sas_token = context.auth_registry.get(&sas_token)?;\n StorageCredentials::sas_token(sas_token)?\n } else if let Some(account_access_key) = spec.account_access_key {\n let account_access_key = context.auth_registry.get(&account_access_key)?;\n StorageCredentials::access_key(spec.account_name.clone(), account_access_key)\n } else {\n let default_credential = Arc::new(DefaultAzureCredential::create(\n TokenCredentialOptions::default(),\n )?);\n StorageCredentials::token_credential(default_credential)\n };\n\n let client = BlobServiceClient::new(&spec.account_name, credential);\n Ok(Box::new(Executor {\n client,\n container_name: spec.container_name,\n prefix: spec.prefix,\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/ops/sources/local_file.rs", "use async_stream::try_stream;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse log::warn;\nuse std::borrow::Cow;\nuse std::path::Path;\nuse std::{path::PathBuf, sync::Arc};\n\nuse crate::base::field_attrs;\nuse crate::{fields_value, ops::sdk::*};\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n path: String,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n}\n\nstruct Executor {\n root_path: PathBuf,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n}\n\nimpl Executor {\n fn is_excluded(&self, path: impl AsRef + Copy) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(path))\n }\n\n fn is_file_included(&self, path: impl AsRef + Copy) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(path))\n && !self.is_excluded(path)\n }\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n let root_component_size = self.root_path.components().count();\n let mut dirs = Vec::new();\n dirs.push(Cow::Borrowed(&self.root_path));\n let mut new_dirs = Vec::new();\n try_stream! {\n while let Some(dir) = dirs.pop() {\n let mut entries = tokio::fs::read_dir(dir.as_ref()).await?;\n while let Some(entry) = entries.next_entry().await? {\n let path = entry.path();\n let mut path_components = path.components();\n for _ in 0..root_component_size {\n path_components.next();\n }\n let relative_path = path_components.as_path();\n if path.is_dir() {\n if !self.is_excluded(relative_path) {\n new_dirs.push(Cow::Owned(path));\n }\n } else if self.is_file_included(relative_path) {\n let ordinal: Option = if options.include_ordinal {\n Some(path.metadata()?.modified()?.try_into()?)\n } else {\n None\n };\n if let Some(relative_path) = relative_path.to_str() {\n yield vec![PartialSourceRowMetadata {\n key: KeyValue::Str(relative_path.into()),\n ordinal,\n }];\n } else {\n warn!(\"Skipped ill-formed file path: {}\", path.display());\n }\n }\n }\n dirs.extend(new_dirs.drain(..).rev());\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n if !self.is_file_included(key.str_value()?.as_ref()) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n let path = self.root_path.join(key.str_value()?.as_ref());\n let ordinal = if options.include_ordinal {\n Some(path.metadata()?.modified()?.try_into()?)\n } else {\n None\n };\n let value = if options.include_value {\n match std::fs::read(path) {\n Ok(content) => {\n let content = if self.binary {\n fields_value!(content)\n } else {\n fields_value!(String::from_utf8_lossy(&content).to_string())\n };\n Some(SourceValue::Existence(content))\n }\n Err(e) if e.kind() == std::io::ErrorKind::NotFound => {\n Some(SourceValue::NonExistence)\n }\n Err(e) => Err(e)?,\n }\n } else {\n None\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"LocalFile\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor {\n root_path: PathBuf::from(spec.path),\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/ops/sources/google_drive.rs", "use chrono::Duration;\nuse google_drive3::{\n DriveHub,\n api::{File, Scope},\n yup_oauth2::{ServiceAccountAuthenticator, read_service_account_key},\n};\nuse http_body_util::BodyExt;\nuse hyper_rustls::HttpsConnector;\nuse hyper_util::client::legacy::connect::HttpConnector;\nuse phf::phf_map;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\nstruct ExportMimeType {\n text: &'static str,\n binary: &'static str,\n}\n\nconst FOLDER_MIME_TYPE: &str = \"application/vnd.google-apps.folder\";\nconst FILE_MIME_TYPE: &str = \"application/vnd.google-apps.file\";\nstatic EXPORT_MIME_TYPES: phf::Map<&'static str, ExportMimeType> = phf_map! {\n \"application/vnd.google-apps.document\" =>\n ExportMimeType {\n text: \"text/markdown\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.spreadsheet\" =>\n ExportMimeType {\n text: \"text/csv\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.presentation\" =>\n ExportMimeType {\n text: \"text/plain\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.drawing\" =>\n ExportMimeType {\n text: \"image/svg+xml\",\n binary: \"image/png\",\n },\n \"application/vnd.google-apps.script\" =>\n ExportMimeType {\n text: \"application/vnd.google-apps.script+json\",\n binary: \"application/vnd.google-apps.script+json\",\n },\n};\n\nfn is_supported_file_type(mime_type: &str) -> bool {\n !mime_type.starts_with(\"application/vnd.google-apps.\")\n || EXPORT_MIME_TYPES.contains_key(mime_type)\n || mime_type == FILE_MIME_TYPE\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n service_account_credential_path: String,\n binary: bool,\n root_folder_ids: Vec,\n recent_changes_poll_interval: Option,\n}\n\nstruct Executor {\n drive_hub: DriveHub>,\n binary: bool,\n root_folder_ids: IndexSet>,\n recent_updates_poll_interval: Option,\n}\n\nimpl Executor {\n async fn new(spec: Spec) -> Result {\n let service_account_key =\n read_service_account_key(spec.service_account_credential_path).await?;\n let auth = ServiceAccountAuthenticator::builder(service_account_key)\n .build()\n .await?;\n let client =\n hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new())\n .build(\n hyper_rustls::HttpsConnectorBuilder::new()\n .with_provider_and_native_roots(\n rustls::crypto::aws_lc_rs::default_provider(),\n )?\n .https_only()\n .enable_http2()\n .build(),\n );\n let drive_hub = DriveHub::new(client, auth);\n Ok(Self {\n drive_hub,\n binary: spec.binary,\n root_folder_ids: spec.root_folder_ids.into_iter().map(Arc::from).collect(),\n recent_updates_poll_interval: spec.recent_changes_poll_interval,\n })\n }\n}\n\nfn escape_string(s: &str) -> String {\n let mut escaped = String::with_capacity(s.len());\n for c in s.chars() {\n match c {\n '\\'' | '\\\\' => escaped.push('\\\\'),\n _ => {}\n }\n escaped.push(c);\n }\n escaped\n}\n\nconst CUTOFF_TIME_BUFFER: Duration = Duration::seconds(1);\nimpl Executor {\n fn visit_file(\n &self,\n file: File,\n new_folder_ids: &mut Vec>,\n seen_ids: &mut HashSet>,\n ) -> Result> {\n if file.trashed == Some(true) {\n return Ok(None);\n }\n let (id, mime_type) = match (file.id, file.mime_type) {\n (Some(id), Some(mime_type)) => (Arc::::from(id), mime_type),\n (id, mime_type) => {\n warn!(\"Skipping file with incomplete metadata: id={id:?}, mime_type={mime_type:?}\",);\n return Ok(None);\n }\n };\n if !seen_ids.insert(id.clone()) {\n return Ok(None);\n }\n let result = if mime_type == FOLDER_MIME_TYPE {\n new_folder_ids.push(id);\n None\n } else if is_supported_file_type(&mime_type) {\n Some(PartialSourceRowMetadata {\n key: KeyValue::Str(id),\n ordinal: file.modified_time.map(|t| t.try_into()).transpose()?,\n })\n } else {\n None\n };\n Ok(result)\n }\n\n async fn list_files(\n &self,\n folder_id: &str,\n fields: &str,\n next_page_token: &mut Option,\n ) -> Result> {\n let query = format!(\"'{}' in parents\", escape_string(folder_id));\n let mut list_call = self\n .drive_hub\n .files()\n .list()\n .add_scope(Scope::Readonly)\n .q(&query)\n .param(\"fields\", fields);\n if let Some(next_page_token) = &next_page_token {\n list_call = list_call.page_token(next_page_token);\n }\n let (_, files) = list_call.doit().await?;\n *next_page_token = files.next_page_token;\n let file_iter = files.files.into_iter().flat_map(|file| file.into_iter());\n Ok(file_iter)\n }\n\n fn make_cutoff_time(\n most_recent_modified_time: Option>,\n list_start_time: DateTime,\n ) -> DateTime {\n let safe_upperbound = list_start_time - CUTOFF_TIME_BUFFER;\n most_recent_modified_time\n .map(|t| t.min(safe_upperbound))\n .unwrap_or(safe_upperbound)\n }\n\n async fn get_recent_updates(\n &self,\n cutoff_time: &mut DateTime,\n ) -> Result {\n let mut page_size: i32 = 10;\n let mut next_page_token: Option = None;\n let mut changes = Vec::new();\n let mut most_recent_modified_time = None;\n let start_time = Utc::now();\n 'paginate: loop {\n let mut list_call = self\n .drive_hub\n .files()\n .list()\n .add_scope(Scope::Readonly)\n .param(\"fields\", \"files(id,modifiedTime,parents,trashed)\")\n .order_by(\"modifiedTime desc\")\n .page_size(page_size);\n if let Some(token) = next_page_token {\n list_call = list_call.page_token(token.as_str());\n }\n let (_, files) = list_call.doit().await?;\n for file in files.files.into_iter().flat_map(|files| files.into_iter()) {\n let modified_time = file.modified_time.unwrap_or_default();\n if most_recent_modified_time.is_none() {\n most_recent_modified_time = Some(modified_time);\n }\n if modified_time <= *cutoff_time {\n break 'paginate;\n }\n let file_id = file.id.ok_or_else(|| anyhow!(\"File has no id\"))?;\n if self.is_file_covered(&file_id).await? {\n changes.push(SourceChange {\n key: KeyValue::Str(Arc::from(file_id)),\n data: None,\n });\n }\n }\n if let Some(token) = files.next_page_token {\n next_page_token = Some(token);\n } else {\n break;\n }\n // List more in a page since 2nd.\n page_size = 100;\n }\n *cutoff_time = Self::make_cutoff_time(most_recent_modified_time, start_time);\n Ok(SourceChangeMessage {\n changes,\n ack_fn: None,\n })\n }\n\n async fn is_file_covered(&self, file_id: &str) -> Result {\n let mut next_file_id = Some(Cow::Borrowed(file_id));\n while let Some(file_id) = next_file_id {\n if self.root_folder_ids.contains(file_id.as_ref()) {\n return Ok(true);\n }\n let (_, file) = self\n .drive_hub\n .files()\n .get(&file_id)\n .add_scope(Scope::Readonly)\n .param(\"fields\", \"parents\")\n .doit()\n .await?;\n next_file_id = file\n .parents\n .into_iter()\n .flat_map(|parents| parents.into_iter())\n .map(Cow::Owned)\n .next();\n }\n Ok(false)\n }\n}\n\ntrait ResultExt {\n type OptResult;\n fn or_not_found(self) -> Self::OptResult;\n}\n\nimpl ResultExt for google_drive3::Result {\n type OptResult = google_drive3::Result>;\n\n fn or_not_found(self) -> Self::OptResult {\n match self {\n Ok(value) => Ok(Some(value)),\n Err(google_drive3::Error::BadRequest(err_msg))\n if err_msg\n .get(\"error\")\n .and_then(|e| e.get(\"code\"))\n .and_then(|code| code.as_i64())\n == Some(404) =>\n {\n Ok(None)\n }\n Err(e) => Err(e),\n }\n }\n}\n\nfn optional_modified_time(include_ordinal: bool) -> &'static str {\n if include_ordinal { \",modifiedTime\" } else { \"\" }\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n let mut seen_ids = HashSet::new();\n let mut folder_ids = self.root_folder_ids.clone();\n let fields = format!(\n \"files(id,name,mimeType,trashed{})\",\n optional_modified_time(options.include_ordinal)\n );\n let mut new_folder_ids = Vec::new();\n try_stream! {\n while let Some(folder_id) = folder_ids.pop() {\n let mut next_page_token = None;\n loop {\n let mut curr_rows = Vec::new();\n let files = self\n .list_files(&folder_id, &fields, &mut next_page_token)\n .await?;\n for file in files {\n curr_rows.extend(self.visit_file(file, &mut new_folder_ids, &mut seen_ids)?);\n }\n if !curr_rows.is_empty() {\n yield curr_rows;\n }\n if next_page_token.is_none() {\n break;\n }\n }\n folder_ids.extend(new_folder_ids.drain(..).rev());\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let file_id = key.str_value()?;\n let fields = format!(\n \"id,name,mimeType,trashed{}\",\n optional_modified_time(options.include_ordinal)\n );\n let resp = self\n .drive_hub\n .files()\n .get(file_id)\n .add_scope(Scope::Readonly)\n .param(\"fields\", &fields)\n .doit()\n .await\n .or_not_found()?;\n let file = match resp {\n Some((_, file)) if file.trashed != Some(true) => file,\n _ => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n };\n let ordinal = if options.include_ordinal {\n file.modified_time.map(|t| t.try_into()).transpose()?\n } else {\n None\n };\n let type_n_body = if let Some(export_mime_type) = file\n .mime_type\n .as_ref()\n .and_then(|mime_type| EXPORT_MIME_TYPES.get(mime_type.as_str()))\n {\n let target_mime_type = if self.binary {\n export_mime_type.binary\n } else {\n export_mime_type.text\n };\n self.drive_hub\n .files()\n .export(file_id, target_mime_type)\n .add_scope(Scope::Readonly)\n .doit()\n .await\n .or_not_found()?\n .map(|content| (Some(target_mime_type.to_string()), content.into_body()))\n } else {\n self.drive_hub\n .files()\n .get(file_id)\n .add_scope(Scope::Readonly)\n .param(\"alt\", \"media\")\n .doit()\n .await\n .or_not_found()?\n .map(|(resp, _)| (file.mime_type, resp.into_body()))\n };\n let value = match type_n_body {\n Some((mime_type, resp_body)) => {\n let content = resp_body.collect().await?;\n\n let fields = vec![\n file.name.unwrap_or_default().into(),\n mime_type.into(),\n if self.binary {\n content.to_bytes().to_vec().into()\n } else {\n String::from_utf8_lossy(&content.to_bytes())\n .to_string()\n .into()\n },\n ];\n Some(SourceValue::Existence(FieldValues { fields }))\n }\n None => None,\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n let poll_interval = if let Some(poll_interval) = self.recent_updates_poll_interval {\n poll_interval\n } else {\n return Ok(None);\n };\n let mut cutoff_time = Utc::now() - CUTOFF_TIME_BUFFER;\n let mut interval = tokio::time::interval(poll_interval);\n interval.tick().await;\n let stream = stream! {\n loop {\n interval.tick().await;\n yield self.get_recent_updates(&mut cutoff_time).await;\n }\n };\n Ok(Some(stream.boxed()))\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"GoogleDrive\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n schema_builder.add_field(FieldSchema::new(\n \"file_id\",\n make_output_type(BasicValueType::Str),\n ));\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n let mime_type_field = schema_builder.add_field(FieldSchema::new(\n \"mime_type\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n )\n .with_attr(\n field_attrs::CONTENT_MIME_TYPE,\n serde_json::to_value(mime_type_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(spec).await?))\n }\n}\n"], ["/cocoindex/src/ops/targets/qdrant.rs", "use crate::ops::sdk::*;\nuse crate::prelude::*;\n\nuse std::fmt::Display;\n\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::setup;\nuse qdrant_client::Qdrant;\nuse qdrant_client::qdrant::{\n CreateCollectionBuilder, DeletePointsBuilder, DenseVector, Distance, MultiDenseVector,\n MultiVectorComparator, MultiVectorConfigBuilder, NamedVectors, PointId, PointStruct,\n PointsIdsList, UpsertPointsBuilder, Value as QdrantValue, Vector as QdrantVector,\n VectorParamsBuilder, VectorsConfigBuilder,\n};\n\nconst DEFAULT_VECTOR_SIMILARITY_METRIC: spec::VectorSimilarityMetric =\n spec::VectorSimilarityMetric::CosineSimilarity;\nconst DEFAULT_URL: &str = \"http://localhost:6334/\";\n\n////////////////////////////////////////////////////////////\n// Public Types\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n grpc_url: String,\n api_key: Option,\n}\n\n#[derive(Debug, Deserialize, Clone)]\nstruct Spec {\n connection: Option>,\n collection_name: String,\n}\n\n////////////////////////////////////////////////////////////\n// Common\n////////////////////////////////////////////////////////////\n\nstruct FieldInfo {\n field_schema: schema::FieldSchema,\n vector_shape: Option,\n}\n\nenum VectorShape {\n Vector(usize),\n MultiVector(usize),\n}\n\nimpl VectorShape {\n fn vector_size(&self) -> usize {\n match self {\n VectorShape::Vector(size) => *size,\n VectorShape::MultiVector(size) => *size,\n }\n }\n\n fn multi_vector_comparator(&self) -> Option {\n match self {\n VectorShape::MultiVector(_) => Some(MultiVectorComparator::MaxSim),\n _ => None,\n }\n }\n}\n\nfn parse_vector_schema_shape(vector_schema: &schema::VectorTypeSchema) -> Option {\n match &*vector_schema.element_type {\n schema::BasicValueType::Float32\n | schema::BasicValueType::Float64\n | schema::BasicValueType::Int64 => vector_schema.dimension.map(VectorShape::Vector),\n\n schema::BasicValueType::Vector(nested_vector_schema) => {\n match parse_vector_schema_shape(nested_vector_schema) {\n Some(VectorShape::Vector(dim)) => Some(VectorShape::MultiVector(dim)),\n _ => None,\n }\n }\n _ => None,\n }\n}\n\nfn parse_vector_shape(typ: &schema::ValueType) -> Option {\n match typ {\n schema::ValueType::Basic(schema::BasicValueType::Vector(vector_schema)) => {\n parse_vector_schema_shape(vector_schema)\n }\n _ => None,\n }\n}\n\nfn encode_dense_vector(v: &BasicValue) -> Result {\n let vec = match v {\n BasicValue::Vector(v) => v\n .iter()\n .map(|elem| {\n Ok(match elem {\n BasicValue::Float32(f) => *f,\n BasicValue::Float64(f) => *f as f32,\n BasicValue::Int64(i) => *i as f32,\n _ => bail!(\"Unsupported vector type: {:?}\", elem.kind()),\n })\n })\n .collect::>>()?,\n _ => bail!(\"Expected a vector field, got {:?}\", v),\n };\n Ok(vec.into())\n}\n\nfn encode_multi_dense_vector(v: &BasicValue) -> Result {\n let vecs = match v {\n BasicValue::Vector(v) => v\n .iter()\n .map(encode_dense_vector)\n .collect::>>()?,\n _ => bail!(\"Expected a vector field, got {:?}\", v),\n };\n Ok(vecs.into())\n}\n\nfn embedding_metric_to_qdrant(metric: spec::VectorSimilarityMetric) -> Result {\n Ok(match metric {\n spec::VectorSimilarityMetric::CosineSimilarity => Distance::Cosine,\n spec::VectorSimilarityMetric::L2Distance => Distance::Euclid,\n spec::VectorSimilarityMetric::InnerProduct => Distance::Dot,\n })\n}\n\n////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct CollectionKey {\n connection: Option>,\n collection_name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\nstruct VectorDef {\n vector_size: usize,\n metric: spec::VectorSimilarityMetric,\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n multi_vector_comparator: Option,\n}\n#[derive(Debug, Clone, Serialize, Deserialize)]\nstruct SetupState {\n #[serde(default)]\n vectors: BTreeMap,\n\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n unsupported_vector_fields: Vec<(String, ValueType)>,\n}\n\n#[derive(Debug)]\nstruct SetupStatus {\n delete_collection: bool,\n add_collection: Option,\n}\n\nimpl setup::ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n if self.delete_collection {\n result.push(setup::ChangeDescription::Action(\n \"Delete collection\".to_string(),\n ));\n }\n if let Some(add_collection) = &self.add_collection {\n let vector_descriptions = add_collection\n .vectors\n .iter()\n .map(|(name, vector_def)| {\n format!(\n \"{}[{}], {}\",\n name, vector_def.vector_size, vector_def.metric\n )\n })\n .collect::>()\n .join(\"; \");\n result.push(setup::ChangeDescription::Action(format!(\n \"Create collection{}\",\n if vector_descriptions.is_empty() {\n \"\".to_string()\n } else {\n format!(\" with vectors: {vector_descriptions}\")\n }\n )));\n for (name, schema) in add_collection.unsupported_vector_fields.iter() {\n result.push(setup::ChangeDescription::Note(format!(\n \"Field `{}` has type `{}`. Only number vector with fixed size is supported by Qdrant. It will be stored in payload.\",\n name, schema\n )));\n }\n }\n result\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n match (self.delete_collection, self.add_collection.is_some()) {\n (false, false) => setup::SetupChangeType::NoChange,\n (false, true) => setup::SetupChangeType::Create,\n (true, false) => setup::SetupChangeType::Delete,\n (true, true) => setup::SetupChangeType::Update,\n }\n }\n}\n\nimpl SetupStatus {\n async fn apply_delete(&self, collection_name: &String, qdrant_client: &Qdrant) -> Result<()> {\n if self.delete_collection {\n qdrant_client.delete_collection(collection_name).await?;\n }\n Ok(())\n }\n\n async fn apply_create(&self, collection_name: &String, qdrant_client: &Qdrant) -> Result<()> {\n if let Some(add_collection) = &self.add_collection {\n let mut builder = CreateCollectionBuilder::new(collection_name);\n if !add_collection.vectors.is_empty() {\n let mut vectors_config = VectorsConfigBuilder::default();\n for (name, vector_def) in add_collection.vectors.iter() {\n let mut params = VectorParamsBuilder::new(\n vector_def.vector_size as u64,\n embedding_metric_to_qdrant(vector_def.metric)?,\n );\n if let Some(multi_vector_comparator) = &vector_def.multi_vector_comparator {\n params = params.multivector_config(MultiVectorConfigBuilder::new(\n MultiVectorComparator::from_str_name(multi_vector_comparator)\n .ok_or_else(|| {\n anyhow!(\n \"unrecognized multi vector comparator: {}\",\n multi_vector_comparator\n )\n })?,\n ));\n }\n vectors_config.add_named_vector_params(name, params);\n }\n builder = builder.vectors_config(vectors_config);\n }\n qdrant_client.create_collection(builder).await?;\n }\n Ok(())\n }\n}\n\n////////////////////////////////////////////////////////////\n// Deal with mutations\n////////////////////////////////////////////////////////////\n\nstruct ExportContext {\n qdrant_client: Arc,\n collection_name: String,\n fields_info: Vec,\n}\n\nimpl ExportContext {\n async fn apply_mutation(&self, mutation: ExportTargetMutation) -> Result<()> {\n let mut points: Vec = Vec::with_capacity(mutation.upserts.len());\n for upsert in mutation.upserts.iter() {\n let point_id = key_to_point_id(&upsert.key)?;\n let (payload, vectors) = values_to_payload(&upsert.value.fields, &self.fields_info)?;\n\n points.push(PointStruct::new(point_id, vectors, payload));\n }\n\n if !points.is_empty() {\n self.qdrant_client\n .upsert_points(UpsertPointsBuilder::new(&self.collection_name, points).wait(true))\n .await?;\n }\n\n let ids = mutation\n .deletes\n .iter()\n .map(|deletion| key_to_point_id(&deletion.key))\n .collect::>>()?;\n\n if !ids.is_empty() {\n self.qdrant_client\n .delete_points(\n DeletePointsBuilder::new(&self.collection_name)\n .points(PointsIdsList { ids })\n .wait(true),\n )\n .await?;\n }\n\n Ok(())\n }\n}\nfn key_to_point_id(key_value: &KeyValue) -> Result {\n let point_id = match key_value {\n KeyValue::Str(v) => PointId::from(v.to_string()),\n KeyValue::Int64(v) => PointId::from(*v as u64),\n KeyValue::Uuid(v) => PointId::from(v.to_string()),\n e => bail!(\"Invalid Qdrant point ID: {e}\"),\n };\n\n Ok(point_id)\n}\n\nfn values_to_payload(\n value_fields: &[Value],\n fields_info: &[FieldInfo],\n) -> Result<(HashMap, NamedVectors)> {\n let mut payload = HashMap::with_capacity(value_fields.len());\n let mut vectors = NamedVectors::default();\n\n for (value, field_info) in value_fields.iter().zip(fields_info.iter()) {\n let field_name = &field_info.field_schema.name;\n\n match &field_info.vector_shape {\n Some(vector_shape) => {\n if value.is_null() {\n continue;\n }\n let vector: QdrantVector = match value {\n Value::Basic(basic_value) => match vector_shape {\n VectorShape::Vector(_) => encode_dense_vector(&basic_value)?.into(),\n VectorShape::MultiVector(_) => {\n encode_multi_dense_vector(&basic_value)?.into()\n }\n },\n _ => {\n bail!(\"Expected a vector field, got {:?}\", value);\n }\n };\n vectors = vectors.add_vector(field_name.clone(), vector);\n }\n None => {\n let json_value = serde_json::to_value(TypedValue {\n t: &field_info.field_schema.value_type.typ,\n v: value,\n })?;\n payload.insert(field_name.clone(), json_value.into());\n }\n }\n }\n\n Ok((payload, vectors))\n}\n\n////////////////////////////////////////////////////////////\n// Factory implementation\n////////////////////////////////////////////////////////////\n\n#[derive(Default)]\nstruct Factory {\n qdrant_clients: Mutex>, Arc>>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct CollectionId {\n collection_name: String,\n}\n\nimpl Display for CollectionId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.collection_name)?;\n Ok(())\n }\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = ();\n type SetupState = SetupState;\n type SetupStatus = SetupStatus;\n type Key = CollectionKey;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Qdrant\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n _declarations: Vec<()>,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(CollectionKey, SetupState)>,\n )> {\n let data_coll_output = data_collections\n .into_iter()\n .map(|d| {\n if d.key_fields_schema.len() != 1 {\n api_bail!(\n \"Expected one primary key field for the point ID. Got {}.\",\n d.key_fields_schema.len()\n )\n }\n\n let mut fields_info = Vec::::new();\n let mut vector_def = BTreeMap::::new();\n let mut unsupported_vector_fields = Vec::<(String, ValueType)>::new();\n\n for field in d.value_fields_schema.iter() {\n let vector_shape = parse_vector_shape(&field.value_type.typ);\n if let Some(vector_shape) = &vector_shape {\n vector_def.insert(\n field.name.clone(),\n VectorDef {\n vector_size: vector_shape.vector_size(),\n metric: DEFAULT_VECTOR_SIMILARITY_METRIC,\n multi_vector_comparator: vector_shape.multi_vector_comparator().map(|s| s.as_str_name().to_string()),\n },\n );\n } else if matches!(\n &field.value_type.typ,\n schema::ValueType::Basic(schema::BasicValueType::Vector(_))\n ) {\n // This is a vector field but not supported by Qdrant\n unsupported_vector_fields.push((field.name.clone(), field.value_type.typ.clone()));\n }\n fields_info.push(FieldInfo {\n field_schema: field.clone(),\n vector_shape,\n });\n }\n\n let mut specified_vector_fields = HashSet::new();\n for vector_index in d.index_options.vector_indexes {\n match vector_def.get_mut(&vector_index.field_name) {\n Some(vector_def) => {\n if specified_vector_fields.insert(vector_index.field_name.clone()) {\n // Validate the metric is supported by Qdrant\n embedding_metric_to_qdrant(vector_index.metric)\n .with_context(||\n format!(\"Parsing vector index metric {} for field `{}`\", vector_index.metric, vector_index.field_name))?;\n vector_def.metric = vector_index.metric;\n } else {\n api_bail!(\"Field `{}` specified more than once in vector index definition\", vector_index.field_name);\n }\n }\n None => {\n if let Some(field) = d.value_fields_schema.iter().find(|f| f.name == vector_index.field_name) {\n api_bail!(\n \"Field `{}` specified in vector index is expected to be a number vector with fixed size, actual type: {}\",\n vector_index.field_name, field.value_type.typ\n );\n } else {\n api_bail!(\"Field `{}` specified in vector index is not found\", vector_index.field_name);\n }\n }\n }\n }\n\n let export_context = Arc::new(ExportContext {\n qdrant_client: self\n .get_qdrant_client(&d.spec.connection, &context.auth_registry)?,\n collection_name: d.spec.collection_name.clone(),\n fields_info,\n });\n Ok(TypedExportDataCollectionBuildOutput {\n export_context: Box::pin(async move { Ok(export_context) }),\n setup_key: CollectionKey {\n connection: d.spec.connection,\n collection_name: d.spec.collection_name,\n },\n desired_setup_state: SetupState {\n vectors: vector_def,\n unsupported_vector_fields,\n },\n })\n })\n .collect::>>()?;\n Ok((data_coll_output, vec![]))\n }\n\n fn deserialize_setup_key(key: serde_json::Value) -> Result {\n Ok(match key {\n serde_json::Value::String(s) => {\n // For backward compatibility.\n CollectionKey {\n collection_name: s,\n connection: None,\n }\n }\n _ => serde_json::from_value(key)?,\n })\n }\n\n async fn check_setup_status(\n &self,\n _key: CollectionKey,\n desired: Option,\n existing: setup::CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n let desired_exists = desired.is_some();\n let add_collection = desired.filter(|state| {\n !existing.always_exists()\n || existing\n .possible_versions()\n .any(|v| v.vectors != state.vectors)\n });\n let delete_collection = existing.possible_versions().next().is_some()\n && (!desired_exists || add_collection.is_some());\n Ok(SetupStatus {\n delete_collection,\n add_collection,\n })\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(if desired.vectors == existing.vectors {\n SetupStateCompatibility::Compatible\n } else {\n SetupStateCompatibility::NotCompatible\n })\n }\n\n fn describe_resource(&self, key: &CollectionKey) -> Result {\n Ok(format!(\n \"Qdrant collection {}{}\",\n key.collection_name,\n key.connection\n .as_ref()\n .map_or_else(|| \"\".to_string(), |auth_entry| format!(\" @ {auth_entry}\"))\n ))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n for mutation_w_ctx in mutations.into_iter() {\n mutation_w_ctx\n .export_context\n .apply_mutation(mutation_w_ctx.mutation)\n .await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n for setup_change in setup_status.iter() {\n let qdrant_client =\n self.get_qdrant_client(&setup_change.key.connection, &context.auth_registry)?;\n setup_change\n .setup_status\n .apply_delete(&setup_change.key.collection_name, &qdrant_client)\n .await?;\n }\n for setup_change in setup_status.iter() {\n let qdrant_client =\n self.get_qdrant_client(&setup_change.key.connection, &context.auth_registry)?;\n setup_change\n .setup_status\n .apply_create(&setup_change.key.collection_name, &qdrant_client)\n .await?;\n }\n Ok(())\n }\n}\n\nimpl Factory {\n fn new() -> Self {\n Self {\n qdrant_clients: Mutex::new(HashMap::new()),\n }\n }\n\n fn get_qdrant_client(\n &self,\n auth_entry: &Option>,\n auth_registry: &AuthRegistry,\n ) -> Result> {\n let mut clients = self.qdrant_clients.lock().unwrap();\n if let Some(client) = clients.get(auth_entry) {\n return Ok(client.clone());\n }\n\n let spec = auth_entry.as_ref().map_or_else(\n || {\n Ok(ConnectionSpec {\n grpc_url: DEFAULT_URL.to_string(),\n api_key: None,\n })\n },\n |auth_entry| auth_registry.get(auth_entry),\n )?;\n let client = Arc::new(\n Qdrant::from_url(&spec.grpc_url)\n .api_key(spec.api_key)\n .skip_compatibility_check()\n .build()?,\n );\n clients.insert(auth_entry.clone(), client.clone());\n Ok(client)\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory::new().register(registry)\n}\n"], ["/cocoindex/src/ops/targets/kuzu.rs", "use chrono::TimeDelta;\nuse serde_json::json;\n\nuse std::fmt::Write;\n\nuse super::shared::property_graph::GraphElementMapping;\nuse super::shared::property_graph::*;\nuse super::shared::table_columns::{\n TableColumnsSchema, TableMainSetupAction, TableUpsertionAction, check_table_compatibility,\n};\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::prelude::*;\n\nuse crate::setup::SetupChangeType;\nuse crate::{ops::sdk::*, setup::CombinedState};\n\nconst SELF_CONTAINED_TAG_FIELD_NAME: &str = \"__self_contained\";\n\n////////////////////////////////////////////////////////////\n// Public Types\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n /// The URL of the [Kuzu API server](https://kuzu.com/docs/api/server/overview),\n /// e.g. `http://localhost:8000`.\n api_server_url: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n connection: spec::AuthEntryReference,\n mapping: GraphElementMapping,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Declaration {\n connection: spec::AuthEntryReference,\n #[serde(flatten)]\n decl: GraphDeclaration,\n}\n\n////////////////////////////////////////////////////////////\n// Utils to deal with Kuzu\n////////////////////////////////////////////////////////////\n\nstruct CypherBuilder {\n query: String,\n}\n\nimpl CypherBuilder {\n fn new() -> Self {\n Self {\n query: String::new(),\n }\n }\n\n fn query_mut(&mut self) -> &mut String {\n &mut self.query\n }\n}\n\nstruct KuzuThinClient {\n reqwest_client: reqwest::Client,\n query_url: String,\n}\n\nimpl KuzuThinClient {\n fn new(conn_spec: &ConnectionSpec, reqwest_client: reqwest::Client) -> Self {\n Self {\n reqwest_client,\n query_url: format!(\"{}/cypher\", conn_spec.api_server_url.trim_end_matches('/')),\n }\n }\n\n async fn run_cypher(&self, cyper_builder: CypherBuilder) -> Result<()> {\n if cyper_builder.query.is_empty() {\n return Ok(());\n }\n let query = json!({\n \"query\": cyper_builder.query\n });\n let response = self\n .reqwest_client\n .post(&self.query_url)\n .json(&query)\n .send()\n .await?;\n if !response.status().is_success() {\n return Err(anyhow::anyhow!(\n \"Failed to run cypher: {}\",\n response.text().await?\n ));\n }\n Ok(())\n }\n}\n\nfn kuzu_table_type(elem_type: &ElementType) -> &'static str {\n match elem_type {\n ElementType::Node(_) => \"NODE\",\n ElementType::Relationship(_) => \"REL\",\n }\n}\n\nfn basic_type_to_kuzu(basic_type: &BasicValueType) -> Result {\n Ok(match basic_type {\n BasicValueType::Bytes => \"BLOB\".to_string(),\n BasicValueType::Str => \"STRING\".to_string(),\n BasicValueType::Bool => \"BOOL\".to_string(),\n BasicValueType::Int64 => \"INT64\".to_string(),\n BasicValueType::Float32 => \"FLOAT\".to_string(),\n BasicValueType::Float64 => \"DOUBLE\".to_string(),\n BasicValueType::Range => \"UINT64[2]\".to_string(),\n BasicValueType::Uuid => \"UUID\".to_string(),\n BasicValueType::Date => \"DATE\".to_string(),\n BasicValueType::LocalDateTime => \"TIMESTAMP\".to_string(),\n BasicValueType::OffsetDateTime => \"TIMESTAMP\".to_string(),\n BasicValueType::TimeDelta => \"INTERVAL\".to_string(),\n BasicValueType::Vector(t) => format!(\n \"{}[{}]\",\n basic_type_to_kuzu(&t.element_type)?,\n t.dimension\n .map_or_else(|| \"\".to_string(), |d| d.to_string())\n ),\n t @ (BasicValueType::Union(_) | BasicValueType::Time | BasicValueType::Json) => {\n api_bail!(\"{t} is not supported in Kuzu\")\n }\n })\n}\n\nfn struct_schema_to_kuzu(struct_schema: &StructSchema) -> Result {\n Ok(format!(\n \"STRUCT({})\",\n struct_schema\n .fields\n .iter()\n .map(|f| Ok(format!(\n \"{} {}\",\n f.name,\n value_type_to_kuzu(&f.value_type.typ)?\n )))\n .collect::>>()?\n .join(\", \")\n ))\n}\n\nfn value_type_to_kuzu(value_type: &ValueType) -> Result {\n Ok(match value_type {\n ValueType::Basic(basic_type) => basic_type_to_kuzu(basic_type)?,\n ValueType::Struct(struct_type) => struct_schema_to_kuzu(struct_type)?,\n ValueType::Table(table_type) => format!(\"{}[]\", struct_schema_to_kuzu(&table_type.row)?),\n })\n}\n\n////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]\nstruct ReferencedNodeTable {\n table_name: String,\n\n #[serde(with = \"indexmap::map::serde_seq\")]\n key_columns: IndexMap,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\nstruct SetupState {\n schema: TableColumnsSchema,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n referenced_node_tables: Option<(ReferencedNodeTable, ReferencedNodeTable)>,\n}\n\nimpl<'a> From<&'a SetupState> for Cow<'a, TableColumnsSchema> {\n fn from(val: &'a SetupState) -> Self {\n Cow::Borrowed(&val.schema)\n }\n}\n\n#[derive(Debug)]\nstruct GraphElementDataSetupStatus {\n actions: TableMainSetupAction,\n referenced_node_tables: Option<(String, String)>,\n drop_affected_referenced_node_tables: IndexSet,\n}\n\nimpl setup::ResourceSetupStatus for GraphElementDataSetupStatus {\n fn describe_changes(&self) -> Vec {\n self.actions.describe_changes()\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.actions.change_type(false)\n }\n}\n\nfn append_drop_table(\n cypher: &mut CypherBuilder,\n setup_status: &GraphElementDataSetupStatus,\n elem_type: &ElementType,\n) -> Result<()> {\n if !setup_status.actions.drop_existing {\n return Ok(());\n }\n writeln!(\n cypher.query_mut(),\n \"DROP TABLE IF EXISTS {};\",\n elem_type.label()\n )?;\n Ok(())\n}\n\nfn append_delete_orphaned_nodes(cypher: &mut CypherBuilder, node_table: &str) -> Result<()> {\n writeln!(\n cypher.query_mut(),\n \"MATCH (n:{node_table}) WITH n WHERE NOT (n)--() DELETE n;\"\n )?;\n Ok(())\n}\n\nfn append_upsert_table(\n cypher: &mut CypherBuilder,\n setup_status: &GraphElementDataSetupStatus,\n elem_type: &ElementType,\n) -> Result<()> {\n let table_upsertion = if let Some(table_upsertion) = &setup_status.actions.table_upsertion {\n table_upsertion\n } else {\n return Ok(());\n };\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n write!(\n cypher.query_mut(),\n \"CREATE {kuzu_table_type} TABLE IF NOT EXISTS {table_name} (\",\n kuzu_table_type = kuzu_table_type(elem_type),\n table_name = elem_type.label(),\n )?;\n if let Some((src, tgt)) = &setup_status.referenced_node_tables {\n write!(cypher.query_mut(), \"FROM {src} TO {tgt}, \")?;\n }\n cypher.query_mut().push_str(\n keys.iter()\n .chain(values.iter())\n .map(|(name, kuzu_type)| format!(\"{name} {kuzu_type}\"))\n .join(\", \")\n .as_str(),\n );\n match elem_type {\n ElementType::Node(_) => {\n write!(\n cypher.query_mut(),\n \", {SELF_CONTAINED_TAG_FIELD_NAME} BOOL, PRIMARY KEY ({})\",\n keys.iter().map(|(name, _)| name).join(\", \")\n )?;\n }\n ElementType::Relationship(_) => {}\n }\n write!(cypher.query_mut(), \");\\n\\n\")?;\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n let table_name = elem_type.label();\n for name in columns_to_delete\n .iter()\n .chain(columns_to_upsert.iter().map(|(name, _)| name))\n {\n writeln!(\n cypher.query_mut(),\n \"ALTER TABLE {table_name} DROP IF EXISTS {name};\"\n )?;\n }\n for (name, kuzu_type) in columns_to_upsert.iter() {\n writeln!(\n cypher.query_mut(),\n \"ALTER TABLE {table_name} ADD {name} {kuzu_type};\",\n )?;\n }\n }\n }\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Utils to convert value to Kuzu literals\n////////////////////////////////////////////////////////////\n\nfn append_string_literal(cypher: &mut CypherBuilder, s: &str) -> Result<()> {\n let out = cypher.query_mut();\n out.push('\"');\n for c in s.chars() {\n match c {\n '\\\\' => out.push_str(\"\\\\\\\\\"),\n '\"' => out.push_str(\"\\\\\\\"\"),\n // Control characters (0x00..=0x1F)\n c if (c as u32) < 0x20 => write!(out, \"\\\\u{:04X}\", c as u32)?,\n // BMP Unicode\n c if (c as u32) <= 0xFFFF => out.push(c),\n // Non-BMP Unicode: Encode as surrogate pairs for Cypher \\uXXXX\\uXXXX\n c => {\n let code = c as u32;\n let high = 0xD800 + ((code - 0x10000) >> 10);\n let low = 0xDC00 + ((code - 0x10000) & 0x3FF);\n write!(out, \"\\\\u{high:04X}\\\\u{low:04X}\")?;\n }\n }\n }\n out.push('\"');\n Ok(())\n}\n\nfn append_basic_value(cypher: &mut CypherBuilder, basic_value: &BasicValue) -> Result<()> {\n match basic_value {\n BasicValue::Bytes(bytes) => {\n write!(cypher.query_mut(), \"BLOB(\")?;\n for byte in bytes {\n write!(cypher.query_mut(), \"\\\\\\\\x{byte:02X}\")?;\n }\n write!(cypher.query_mut(), \")\")?;\n }\n BasicValue::Str(s) => {\n append_string_literal(cypher, s)?;\n }\n BasicValue::Bool(b) => {\n write!(cypher.query_mut(), \"{b}\")?;\n }\n BasicValue::Int64(i) => {\n write!(cypher.query_mut(), \"{i}\")?;\n }\n BasicValue::Float32(f) => {\n write!(cypher.query_mut(), \"{f}\")?;\n }\n BasicValue::Float64(f) => {\n write!(cypher.query_mut(), \"{f}\")?;\n }\n BasicValue::Range(r) => {\n write!(cypher.query_mut(), \"[{}, {}]\", r.start, r.end)?;\n }\n BasicValue::Uuid(u) => {\n write!(cypher.query_mut(), \"UUID(\\\"{u}\\\")\")?;\n }\n BasicValue::Date(d) => {\n write!(cypher.query_mut(), \"DATE(\\\"{d}\\\")\")?;\n }\n BasicValue::LocalDateTime(dt) => write!(cypher.query_mut(), \"TIMESTAMP(\\\"{dt}\\\")\")?,\n BasicValue::OffsetDateTime(dt) => write!(cypher.query_mut(), \"TIMESTAMP(\\\"{dt}\\\")\")?,\n BasicValue::TimeDelta(td) => {\n let num_days = td.num_days();\n let sub_day_duration = *td - TimeDelta::days(num_days);\n write!(cypher.query_mut(), \"INTERVAL(\\\"\")?;\n if num_days != 0 {\n write!(cypher.query_mut(), \"{num_days} days \")?;\n }\n let microseconds = sub_day_duration\n .num_microseconds()\n .ok_or_else(invariance_violation)?;\n write!(cypher.query_mut(), \"{microseconds} microseconds\\\")\")?;\n }\n BasicValue::Vector(v) => {\n write!(cypher.query_mut(), \"[\")?;\n let mut prefix = \"\";\n for elem in v.iter() {\n cypher.query_mut().push_str(prefix);\n append_basic_value(cypher, elem)?;\n prefix = \", \";\n }\n write!(cypher.query_mut(), \"]\")?;\n }\n v @ (BasicValue::UnionVariant { .. } | BasicValue::Time(_) | BasicValue::Json(_)) => {\n bail!(\"value types are not supported in Kuzu: {}\", v.kind());\n }\n }\n Ok(())\n}\n\nfn append_struct_fields<'a>(\n cypher: &'a mut CypherBuilder,\n field_schema: &[schema::FieldSchema],\n field_values: impl Iterator,\n) -> Result<()> {\n let mut prefix = \"\";\n for (f, v) in std::iter::zip(field_schema.iter(), field_values) {\n write!(cypher.query_mut(), \"{prefix}{}: \", f.name)?;\n append_value(cypher, &f.value_type.typ, v)?;\n prefix = \", \";\n }\n Ok(())\n}\n\nfn append_value(\n cypher: &mut CypherBuilder,\n typ: &schema::ValueType,\n value: &value::Value,\n) -> Result<()> {\n match value {\n value::Value::Null => {\n write!(cypher.query_mut(), \"NULL\")?;\n }\n value::Value::Basic(basic_value) => append_basic_value(cypher, basic_value)?,\n value::Value::Struct(struct_value) => {\n let struct_schema = match typ {\n schema::ValueType::Struct(struct_schema) => struct_schema,\n _ => {\n api_bail!(\"Expected struct type, got {}\", typ);\n }\n };\n cypher.query_mut().push('{');\n append_struct_fields(cypher, &struct_schema.fields, struct_value.fields.iter())?;\n cypher.query_mut().push('}');\n }\n value::Value::KTable(map) => {\n let row_schema = match typ {\n schema::ValueType::Table(table_schema) => &table_schema.row,\n _ => {\n api_bail!(\"Expected table type, got {}\", typ);\n }\n };\n cypher.query_mut().push('[');\n let mut prefix = \"\";\n for (k, v) in map.iter() {\n let key_value = value::Value::from(k);\n cypher.query_mut().push_str(prefix);\n cypher.query_mut().push('{');\n append_struct_fields(\n cypher,\n &row_schema.fields,\n std::iter::once(&key_value).chain(v.fields.iter()),\n )?;\n cypher.query_mut().push('}');\n prefix = \", \";\n }\n cypher.query_mut().push(']');\n }\n value::Value::LTable(rows) | value::Value::UTable(rows) => {\n let row_schema = match typ {\n schema::ValueType::Table(table_schema) => &table_schema.row,\n _ => {\n api_bail!(\"Expected table type, got {}\", typ);\n }\n };\n cypher.query_mut().push('[');\n let mut prefix = \"\";\n for v in rows.iter() {\n cypher.query_mut().push_str(prefix);\n cypher.query_mut().push('{');\n append_struct_fields(cypher, &row_schema.fields, v.fields.iter())?;\n cypher.query_mut().push('}');\n prefix = \", \";\n }\n cypher.query_mut().push(']');\n }\n }\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Deal with mutations\n////////////////////////////////////////////////////////////\n\nstruct ExportContext {\n conn_ref: AuthEntryReference,\n kuzu_client: KuzuThinClient,\n analyzed_data_coll: AnalyzedDataCollection,\n}\n\nfn append_key_pattern<'a>(\n cypher: &'a mut CypherBuilder,\n key_fields: &'a [FieldSchema],\n values: impl Iterator>,\n) -> Result<()> {\n write!(cypher.query_mut(), \"{{\")?;\n let mut prefix = \"\";\n for (f, v) in std::iter::zip(key_fields.iter(), values) {\n write!(cypher.query_mut(), \"{prefix}{}: \", f.name)?;\n append_value(cypher, &f.value_type.typ, v.as_ref())?;\n prefix = \", \";\n }\n write!(cypher.query_mut(), \"}}\")?;\n Ok(())\n}\n\nfn append_set_value_fields(\n cypher: &mut CypherBuilder,\n var_name: &str,\n value_fields: &[FieldSchema],\n value_fields_idx: &[usize],\n upsert_entry: &ExportTargetUpsertEntry,\n set_self_contained_tag: bool,\n) -> Result<()> {\n let mut prefix = \" SET \";\n if set_self_contained_tag {\n write!(\n cypher.query_mut(),\n \"{prefix}{var_name}.{SELF_CONTAINED_TAG_FIELD_NAME} = TRUE\"\n )?;\n prefix = \", \";\n }\n for (value_field, value_idx) in std::iter::zip(value_fields.iter(), value_fields_idx.iter()) {\n let field_name = &value_field.name;\n write!(cypher.query_mut(), \"{prefix}{var_name}.{field_name}=\")?;\n append_value(\n cypher,\n &value_field.value_type.typ,\n &upsert_entry.value.fields[*value_idx],\n )?;\n prefix = \", \";\n }\n Ok(())\n}\n\nfn append_upsert_node(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n {\n write!(\n cypher.query_mut(),\n \"MERGE ({NODE_VAR_NAME}:{label} \",\n label = data_coll.schema.elem_type.label(),\n )?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n upsert_entry\n .key\n .fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n write!(cypher.query_mut(), \")\")?;\n }\n append_set_value_fields(\n cypher,\n NODE_VAR_NAME,\n &data_coll.schema.value_fields,\n &data_coll.value_fields_input_idx,\n upsert_entry,\n true,\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_merge_node_for_rel(\n cypher: &mut CypherBuilder,\n var_name: &str,\n field_mapping: &AnalyzedGraphElementFieldMapping,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n {\n write!(\n cypher.query_mut(),\n \"MERGE ({var_name}:{label} \",\n label = field_mapping.schema.elem_type.label(),\n )?;\n append_key_pattern(\n cypher,\n &field_mapping.schema.key_fields,\n field_mapping\n .fields_input_idx\n .key\n .iter()\n .map(|idx| Cow::Borrowed(&upsert_entry.value.fields[*idx])),\n )?;\n write!(cypher.query_mut(), \")\")?;\n }\n append_set_value_fields(\n cypher,\n var_name,\n &field_mapping.schema.value_fields,\n &field_mapping.fields_input_idx.value,\n upsert_entry,\n false,\n )?;\n writeln!(cypher.query_mut())?;\n Ok(())\n}\n\nfn append_upsert_rel(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n const REL_VAR_NAME: &str = \"r\";\n const SRC_NODE_VAR_NAME: &str = \"s\";\n const TGT_NODE_VAR_NAME: &str = \"t\";\n\n let rel_info = if let Some(rel_info) = &data_coll.rel {\n rel_info\n } else {\n return Ok(());\n };\n append_merge_node_for_rel(cypher, SRC_NODE_VAR_NAME, &rel_info.source, upsert_entry)?;\n append_merge_node_for_rel(cypher, TGT_NODE_VAR_NAME, &rel_info.target, upsert_entry)?;\n {\n let rel_type = data_coll.schema.elem_type.label();\n write!(\n cypher.query_mut(),\n \"MERGE ({SRC_NODE_VAR_NAME})-[{REL_VAR_NAME}:{rel_type} \"\n )?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n upsert_entry\n .key\n .fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n write!(cypher.query_mut(), \"]->({TGT_NODE_VAR_NAME})\")?;\n }\n append_set_value_fields(\n cypher,\n REL_VAR_NAME,\n &data_coll.schema.value_fields,\n &data_coll.value_fields_input_idx,\n upsert_entry,\n false,\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_delete_node(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n key: &KeyValue,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n let node_label = data_coll.schema.elem_type.label();\n write!(cypher.query_mut(), \"MATCH ({NODE_VAR_NAME}:{node_label} \")?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n key.fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n writeln!(cypher.query_mut(), \")\")?;\n writeln!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} SET {NODE_VAR_NAME}.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\"\n )?;\n writeln!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} WHERE NOT ({NODE_VAR_NAME})--() DELETE {NODE_VAR_NAME}\"\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_delete_rel(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n key: &KeyValue,\n src_node_key: &KeyValue,\n tgt_node_key: &KeyValue,\n) -> Result<()> {\n const REL_VAR_NAME: &str = \"r\";\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n let rel_type = data_coll.schema.elem_type.label();\n\n write!(\n cypher.query_mut(),\n \"MATCH (:{label} \",\n label = rel.source.schema.elem_type.label()\n )?;\n let src_key_schema = &rel.source.schema.key_fields;\n append_key_pattern(\n cypher,\n src_key_schema,\n src_node_key\n .fields_iter(src_key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n\n write!(cypher.query_mut(), \")-[{REL_VAR_NAME}:{rel_type} \")?;\n let key_schema = &data_coll.schema.key_fields;\n append_key_pattern(\n cypher,\n key_schema,\n key.fields_iter(key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n\n write!(\n cypher.query_mut(),\n \"]->(:{label} \",\n label = rel.target.schema.elem_type.label()\n )?;\n let tgt_key_schema = &rel.target.schema.key_fields;\n append_key_pattern(\n cypher,\n tgt_key_schema,\n tgt_node_key\n .fields_iter(tgt_key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n write!(cypher.query_mut(), \") DELETE {REL_VAR_NAME}\")?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_maybe_gc_node(\n cypher: &mut CypherBuilder,\n schema: &GraphElementSchema,\n key: &KeyValue,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n let node_label = schema.elem_type.label();\n write!(cypher.query_mut(), \"MATCH ({NODE_VAR_NAME}:{node_label} \")?;\n append_key_pattern(\n cypher,\n &schema.key_fields,\n key.fields_iter(schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n writeln!(cypher.query_mut(), \")\")?;\n write!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} WHERE NOT ({NODE_VAR_NAME})--() DELETE {NODE_VAR_NAME}\"\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Factory implementation\n////////////////////////////////////////////////////////////\n\ntype KuzuGraphElement = GraphElementType;\n\nstruct Factory {\n reqwest_client: reqwest::Client,\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = Declaration;\n type SetupState = SetupState;\n type SetupStatus = GraphElementDataSetupStatus;\n\n type Key = KuzuGraphElement;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Kuzu\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(KuzuGraphElement, SetupState)>,\n )> {\n let (analyzed_data_colls, declared_graph_elements) = analyze_graph_mappings(\n data_collections\n .iter()\n .map(|d| DataCollectionGraphMappingInput {\n auth_ref: &d.spec.connection,\n mapping: &d.spec.mapping,\n index_options: &d.index_options,\n key_fields_schema: d.key_fields_schema.clone(),\n value_fields_schema: d.value_fields_schema.clone(),\n }),\n declarations.iter().map(|d| (&d.connection, &d.decl)),\n )?;\n fn to_kuzu_cols(fields: &[FieldSchema]) -> Result> {\n fields\n .iter()\n .map(|f| Ok((f.name.clone(), value_type_to_kuzu(&f.value_type.typ)?)))\n .collect::>>()\n }\n let data_coll_outputs: Vec> =\n std::iter::zip(data_collections, analyzed_data_colls.into_iter())\n .map(|(data_coll, analyzed)| {\n fn to_dep_table(\n field_mapping: &AnalyzedGraphElementFieldMapping,\n ) -> Result {\n Ok(ReferencedNodeTable {\n table_name: field_mapping.schema.elem_type.label().to_string(),\n key_columns: to_kuzu_cols(&field_mapping.schema.key_fields)?,\n })\n }\n let setup_key = KuzuGraphElement {\n connection: data_coll.spec.connection.clone(),\n typ: analyzed.schema.elem_type.clone(),\n };\n let desired_setup_state = SetupState {\n schema: TableColumnsSchema {\n key_columns: to_kuzu_cols(&analyzed.schema.key_fields)?,\n value_columns: to_kuzu_cols(&analyzed.schema.value_fields)?,\n },\n referenced_node_tables: (analyzed.rel.as_ref())\n .map(|rel| {\n anyhow::Ok((to_dep_table(&rel.source)?, to_dep_table(&rel.target)?))\n })\n .transpose()?,\n };\n\n let export_context = ExportContext {\n conn_ref: data_coll.spec.connection.clone(),\n kuzu_client: KuzuThinClient::new(\n &context\n .auth_registry\n .get::(&data_coll.spec.connection)?,\n self.reqwest_client.clone(),\n ),\n analyzed_data_coll: analyzed,\n };\n Ok(TypedExportDataCollectionBuildOutput {\n export_context: async move { Ok(Arc::new(export_context)) }.boxed(),\n setup_key,\n desired_setup_state,\n })\n })\n .collect::>()?;\n let decl_output = std::iter::zip(declarations, declared_graph_elements)\n .map(|(decl, graph_elem_schema)| {\n let setup_state = SetupState {\n schema: TableColumnsSchema {\n key_columns: to_kuzu_cols(&graph_elem_schema.key_fields)?,\n value_columns: to_kuzu_cols(&graph_elem_schema.value_fields)?,\n },\n referenced_node_tables: None,\n };\n let setup_key = GraphElementType {\n connection: decl.connection,\n typ: graph_elem_schema.elem_type.clone(),\n };\n Ok((setup_key, setup_state))\n })\n .collect::>()?;\n Ok((data_coll_outputs, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n _key: KuzuGraphElement,\n desired: Option,\n existing: CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n let existing_invalidated = desired.as_ref().is_some_and(|desired| {\n existing\n .possible_versions()\n .any(|v| v.referenced_node_tables != desired.referenced_node_tables)\n });\n let actions =\n TableMainSetupAction::from_states(desired.as_ref(), &existing, existing_invalidated);\n let drop_affected_referenced_node_tables = if actions.drop_existing {\n existing\n .possible_versions()\n .flat_map(|v| &v.referenced_node_tables)\n .flat_map(|(src, tgt)| [src.table_name.clone(), tgt.table_name.clone()].into_iter())\n .collect()\n } else {\n IndexSet::new()\n };\n Ok(GraphElementDataSetupStatus {\n actions,\n referenced_node_tables: desired\n .and_then(|desired| desired.referenced_node_tables)\n .map(|(src, tgt)| (src.table_name, tgt.table_name)),\n drop_affected_referenced_node_tables,\n })\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(\n if desired.referenced_node_tables != existing.referenced_node_tables {\n SetupStateCompatibility::NotCompatible\n } else {\n check_table_compatibility(&desired.schema, &existing.schema)\n },\n )\n }\n\n fn describe_resource(&self, key: &KuzuGraphElement) -> Result {\n Ok(format!(\n \"Kuzu {} TABLE {}\",\n kuzu_table_type(&key.typ),\n key.typ.label()\n ))\n }\n\n fn extract_additional_key(\n &self,\n _key: &KeyValue,\n value: &FieldValues,\n export_context: &ExportContext,\n ) -> Result {\n let additional_key = if let Some(rel_info) = &export_context.analyzed_data_coll.rel {\n serde_json::to_value((\n (rel_info.source.fields_input_idx).extract_key(&value.fields)?,\n (rel_info.target.fields_input_idx).extract_key(&value.fields)?,\n ))?\n } else {\n serde_json::Value::Null\n };\n Ok(additional_key)\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut mutations_by_conn = IndexMap::new();\n for mutation in mutations.into_iter() {\n mutations_by_conn\n .entry(mutation.export_context.conn_ref.clone())\n .or_insert_with(Vec::new)\n .push(mutation);\n }\n for mutations in mutations_by_conn.into_values() {\n let kuzu_client = &mutations[0].export_context.kuzu_client;\n let mut cypher = CypherBuilder::new();\n writeln!(cypher.query_mut(), \"BEGIN TRANSACTION;\")?;\n\n let (mut rel_mutations, nodes_mutations): (Vec<_>, Vec<_>) = mutations\n .into_iter()\n .partition(|m| m.export_context.analyzed_data_coll.rel.is_some());\n\n struct NodeTableGcInfo {\n schema: Arc,\n keys: IndexSet,\n }\n fn register_gc_node(\n map: &mut IndexMap,\n schema: &Arc,\n key: KeyValue,\n ) {\n map.entry(schema.elem_type.clone())\n .or_insert_with(|| NodeTableGcInfo {\n schema: schema.clone(),\n keys: IndexSet::new(),\n })\n .keys\n .insert(key);\n }\n fn resolve_gc_node(\n map: &mut IndexMap,\n schema: &Arc,\n key: &KeyValue,\n ) {\n map.get_mut(&schema.elem_type)\n .map(|info| info.keys.shift_remove(key));\n }\n let mut gc_info = IndexMap::::new();\n\n // Deletes for relationships\n for rel_mutation in rel_mutations.iter_mut() {\n let data_coll = &rel_mutation.export_context.analyzed_data_coll;\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n for delete in rel_mutation.mutation.deletes.iter_mut() {\n let mut additional_keys = match delete.additional_key.take() {\n serde_json::Value::Array(keys) => keys,\n _ => return Err(invariance_violation()),\n };\n if additional_keys.len() != 2 {\n api_bail!(\n \"Expected additional key with 2 fields, got {}\",\n delete.additional_key\n );\n }\n let src_key = KeyValue::from_json(\n additional_keys[0].take(),\n &rel.source.schema.key_fields,\n )?;\n let tgt_key = KeyValue::from_json(\n additional_keys[1].take(),\n &rel.target.schema.key_fields,\n )?;\n append_delete_rel(&mut cypher, data_coll, &delete.key, &src_key, &tgt_key)?;\n register_gc_node(&mut gc_info, &rel.source.schema, src_key);\n register_gc_node(&mut gc_info, &rel.target.schema, tgt_key);\n }\n }\n\n for node_mutation in nodes_mutations.iter() {\n let data_coll = &node_mutation.export_context.analyzed_data_coll;\n // Deletes for nodes\n for delete in node_mutation.mutation.deletes.iter() {\n append_delete_node(&mut cypher, data_coll, &delete.key)?;\n resolve_gc_node(&mut gc_info, &data_coll.schema, &delete.key);\n }\n\n // Upserts for nodes\n for upsert in node_mutation.mutation.upserts.iter() {\n append_upsert_node(&mut cypher, data_coll, upsert)?;\n resolve_gc_node(&mut gc_info, &data_coll.schema, &upsert.key);\n }\n }\n // Upserts for relationships\n for rel_mutation in rel_mutations.iter() {\n let data_coll = &rel_mutation.export_context.analyzed_data_coll;\n for upsert in rel_mutation.mutation.upserts.iter() {\n append_upsert_rel(&mut cypher, data_coll, upsert)?;\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n resolve_gc_node(\n &mut gc_info,\n &rel.source.schema,\n &(rel.source.fields_input_idx).extract_key(&upsert.value.fields)?,\n );\n resolve_gc_node(\n &mut gc_info,\n &rel.target.schema,\n &(rel.target.fields_input_idx).extract_key(&upsert.value.fields)?,\n );\n }\n }\n\n // GC orphaned nodes\n for info in gc_info.into_values() {\n for key in info.keys {\n append_maybe_gc_node(&mut cypher, &info.schema, &key)?;\n }\n }\n\n writeln!(cypher.query_mut(), \"COMMIT;\")?;\n kuzu_client.run_cypher(cypher).await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n let mut changes_by_conn = IndexMap::new();\n for change in changes.into_iter() {\n changes_by_conn\n .entry(change.key.connection.clone())\n .or_insert_with(Vec::new)\n .push(change);\n }\n for (conn, changes) in changes_by_conn.into_iter() {\n let conn_spec = context.auth_registry.get::(&conn)?;\n let kuzu_client = KuzuThinClient::new(&conn_spec, self.reqwest_client.clone());\n\n let (node_changes, rel_changes): (Vec<_>, Vec<_>) =\n changes.into_iter().partition(|c| match &c.key.typ {\n ElementType::Node(_) => true,\n ElementType::Relationship(_) => false,\n });\n\n let mut partial_affected_node_tables = IndexSet::new();\n let mut cypher = CypherBuilder::new();\n // Relationships first when dropping.\n for change in rel_changes.iter().chain(node_changes.iter()) {\n if !change.setup_status.actions.drop_existing {\n continue;\n }\n append_drop_table(&mut cypher, change.setup_status, &change.key.typ)?;\n\n partial_affected_node_tables.extend(\n change\n .setup_status\n .drop_affected_referenced_node_tables\n .iter(),\n );\n if let ElementType::Node(label) = &change.key.typ {\n partial_affected_node_tables.swap_remove(label);\n }\n }\n // Nodes first when creating.\n for change in node_changes.iter().chain(rel_changes.iter()) {\n append_upsert_table(&mut cypher, change.setup_status, &change.key.typ)?;\n }\n\n for table in partial_affected_node_tables {\n append_delete_orphaned_nodes(&mut cypher, table)?;\n }\n\n kuzu_client.run_cypher(cypher).await?;\n }\n Ok(())\n }\n}\n\npub fn register(\n registry: &mut ExecutorFactoryRegistry,\n reqwest_client: reqwest::Client,\n) -> Result<()> {\n Factory { reqwest_client }.register(registry)\n}\n"], ["/cocoindex/src/ops/targets/neo4j.rs", "use crate::prelude::*;\n\nuse super::shared::property_graph::*;\n\nuse crate::setup::components::{self, State, apply_component_changes};\nuse crate::setup::{ResourceSetupStatus, SetupChangeType};\nuse crate::{ops::sdk::*, setup::CombinedState};\n\nuse indoc::formatdoc;\nuse neo4rs::{BoltType, ConfigBuilder, Graph};\nuse std::fmt::Write;\nuse tokio::sync::OnceCell;\n\nconst DEFAULT_DB: &str = \"neo4j\";\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n uri: String,\n user: String,\n password: String,\n db: Option,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n connection: spec::AuthEntryReference,\n mapping: GraphElementMapping,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Declaration {\n connection: spec::AuthEntryReference,\n #[serde(flatten)]\n decl: GraphDeclaration,\n}\n\ntype Neo4jGraphElement = GraphElementType;\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct GraphKey {\n uri: String,\n db: String,\n}\n\nimpl GraphKey {\n fn from_spec(spec: &ConnectionSpec) -> Self {\n Self {\n uri: spec.uri.clone(),\n db: spec.db.clone().unwrap_or_else(|| DEFAULT_DB.to_string()),\n }\n }\n}\n\nimpl retryable::IsRetryable for neo4rs::Error {\n fn is_retryable(&self) -> bool {\n match self {\n neo4rs::Error::ConnectionError => true,\n neo4rs::Error::Neo4j(e) => e.kind() == neo4rs::Neo4jErrorKind::Transient,\n _ => false,\n }\n }\n}\n\n#[derive(Default)]\npub struct GraphPool {\n graphs: Mutex>>>>,\n}\n\nimpl GraphPool {\n async fn get_graph(&self, spec: &ConnectionSpec) -> Result> {\n let graph_key = GraphKey::from_spec(spec);\n let cell = {\n let mut graphs = self.graphs.lock().unwrap();\n graphs.entry(graph_key).or_default().clone()\n };\n let graph = cell\n .get_or_try_init(|| async {\n let mut config_builder = ConfigBuilder::default()\n .uri(spec.uri.clone())\n .user(spec.user.clone())\n .password(spec.password.clone());\n if let Some(db) = &spec.db {\n config_builder = config_builder.db(db.clone());\n }\n anyhow::Ok(Arc::new(Graph::connect(config_builder.build()?).await?))\n })\n .await?;\n Ok(graph.clone())\n }\n\n async fn get_graph_for_key(\n &self,\n key: &Neo4jGraphElement,\n auth_registry: &AuthRegistry,\n ) -> Result> {\n let spec = auth_registry.get::(&key.connection)?;\n self.get_graph(&spec).await\n }\n}\n\npub struct ExportContext {\n connection_ref: AuthEntryReference,\n graph: Arc,\n\n create_order: u8,\n\n delete_cypher: String,\n insert_cypher: String,\n delete_before_upsert: bool,\n\n analyzed_data_coll: AnalyzedDataCollection,\n\n key_field_params: Vec,\n src_key_field_params: Vec,\n tgt_key_field_params: Vec,\n}\n\nfn json_value_to_bolt_value(value: &serde_json::Value) -> Result {\n let bolt_value = match value {\n serde_json::Value::Null => BoltType::Null(neo4rs::BoltNull),\n serde_json::Value::Bool(v) => BoltType::Boolean(neo4rs::BoltBoolean::new(*v)),\n serde_json::Value::Number(v) => {\n if let Some(i) = v.as_i64() {\n BoltType::Integer(neo4rs::BoltInteger::new(i))\n } else if let Some(f) = v.as_f64() {\n BoltType::Float(neo4rs::BoltFloat::new(f))\n } else {\n anyhow::bail!(\"Unsupported JSON number: {}\", v)\n }\n }\n serde_json::Value::String(v) => BoltType::String(neo4rs::BoltString::new(v)),\n serde_json::Value::Array(v) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(json_value_to_bolt_value)\n .collect::>()?,\n }),\n serde_json::Value::Object(v) => BoltType::Map(neo4rs::BoltMap {\n value: v\n .into_iter()\n .map(|(k, v)| Ok((neo4rs::BoltString::new(k), json_value_to_bolt_value(v)?)))\n .collect::>()?,\n }),\n };\n Ok(bolt_value)\n}\n\nfn key_to_bolt(key: &KeyValue, schema: &schema::ValueType) -> Result {\n value_to_bolt(&key.into(), schema)\n}\n\nfn field_values_to_bolt<'a>(\n field_values: impl IntoIterator,\n schema: impl IntoIterator,\n) -> Result {\n let bolt_value = BoltType::Map(neo4rs::BoltMap {\n value: std::iter::zip(schema, field_values)\n .map(|(schema, value)| {\n Ok((\n neo4rs::BoltString::new(&schema.name),\n value_to_bolt(value, &schema.value_type.typ)?,\n ))\n })\n .collect::>()?,\n });\n Ok(bolt_value)\n}\n\nfn mapped_field_values_to_bolt(\n fields_schema: &[schema::FieldSchema],\n fields_input_idx: &[usize],\n field_values: &FieldValues,\n) -> Result {\n let bolt_value = BoltType::Map(neo4rs::BoltMap {\n value: std::iter::zip(fields_schema.iter(), fields_input_idx.iter())\n .map(|(schema, field_idx)| {\n Ok((\n neo4rs::BoltString::new(&schema.name),\n value_to_bolt(&field_values.fields[*field_idx], &schema.value_type.typ)?,\n ))\n })\n .collect::>()?,\n });\n Ok(bolt_value)\n}\n\nfn basic_value_to_bolt(value: &BasicValue, schema: &BasicValueType) -> Result {\n let bolt_value = match value {\n BasicValue::Bytes(v) => {\n BoltType::Bytes(neo4rs::BoltBytes::new(bytes::Bytes::from_owner(v.clone())))\n }\n BasicValue::Str(v) => BoltType::String(neo4rs::BoltString::new(v)),\n BasicValue::Bool(v) => BoltType::Boolean(neo4rs::BoltBoolean::new(*v)),\n BasicValue::Int64(v) => BoltType::Integer(neo4rs::BoltInteger::new(*v)),\n BasicValue::Float64(v) => BoltType::Float(neo4rs::BoltFloat::new(*v)),\n BasicValue::Float32(v) => BoltType::Float(neo4rs::BoltFloat::new(*v as f64)),\n BasicValue::Range(v) => BoltType::List(neo4rs::BoltList {\n value: [\n BoltType::Integer(neo4rs::BoltInteger::new(v.start as i64)),\n BoltType::Integer(neo4rs::BoltInteger::new(v.end as i64)),\n ]\n .into(),\n }),\n BasicValue::Uuid(v) => BoltType::String(neo4rs::BoltString::new(&v.to_string())),\n BasicValue::Date(v) => BoltType::Date(neo4rs::BoltDate::from(*v)),\n BasicValue::Time(v) => BoltType::LocalTime(neo4rs::BoltLocalTime::from(*v)),\n BasicValue::LocalDateTime(v) => {\n BoltType::LocalDateTime(neo4rs::BoltLocalDateTime::from(*v))\n }\n BasicValue::OffsetDateTime(v) => BoltType::DateTime(neo4rs::BoltDateTime::from(*v)),\n BasicValue::TimeDelta(v) => BoltType::Duration(neo4rs::BoltDuration::new(\n neo4rs::BoltInteger { value: 0 },\n neo4rs::BoltInteger { value: 0 },\n neo4rs::BoltInteger {\n value: v.num_seconds(),\n },\n v.subsec_nanos().into(),\n )),\n BasicValue::Vector(v) => match schema {\n BasicValueType::Vector(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|v| basic_value_to_bolt(v, &t.element_type))\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-vector type got vector value: {}\", schema),\n },\n BasicValue::Json(v) => json_value_to_bolt_value(v)?,\n BasicValue::UnionVariant { tag_id, value } => match schema {\n BasicValueType::Union(s) => {\n let typ = s\n .types\n .get(*tag_id)\n .ok_or_else(|| anyhow::anyhow!(\"Invalid `tag_id`: {}\", tag_id))?;\n\n basic_value_to_bolt(value, typ)?\n }\n _ => anyhow::bail!(\"Non-union type got union value: {}\", schema),\n },\n };\n Ok(bolt_value)\n}\n\nfn value_to_bolt(value: &Value, schema: &schema::ValueType) -> Result {\n let bolt_value = match value {\n Value::Null => BoltType::Null(neo4rs::BoltNull),\n Value::Basic(v) => match schema {\n ValueType::Basic(t) => basic_value_to_bolt(v, t)?,\n _ => anyhow::bail!(\"Non-basic type got basic value: {}\", schema),\n },\n Value::Struct(v) => match schema {\n ValueType::Struct(t) => field_values_to_bolt(v.fields.iter(), t.fields.iter())?,\n _ => anyhow::bail!(\"Non-struct type got struct value: {}\", schema),\n },\n Value::UTable(v) | Value::LTable(v) => match schema {\n ValueType::Table(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|v| field_values_to_bolt(v.0.fields.iter(), t.row.fields.iter()))\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-table type got table value: {}\", schema),\n },\n Value::KTable(v) => match schema {\n ValueType::Table(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|(k, v)| {\n field_values_to_bolt(\n std::iter::once(&Into::::into(k.clone()))\n .chain(v.0.fields.iter()),\n t.row.fields.iter(),\n )\n })\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-table type got table value: {}\", schema),\n },\n };\n Ok(bolt_value)\n}\n\nconst CORE_KEY_PARAM_PREFIX: &str = \"key\";\nconst CORE_PROPS_PARAM: &str = \"props\";\nconst SRC_KEY_PARAM_PREFIX: &str = \"source_key\";\nconst SRC_PROPS_PARAM: &str = \"source_props\";\nconst TGT_KEY_PARAM_PREFIX: &str = \"target_key\";\nconst TGT_PROPS_PARAM: &str = \"target_props\";\nconst CORE_ELEMENT_MATCHER_VAR: &str = \"e\";\nconst SELF_CONTAINED_TAG_FIELD_NAME: &str = \"__self_contained\";\n\nimpl ExportContext {\n fn build_key_field_params_n_literal<'a>(\n param_prefix: &str,\n key_fields: impl Iterator,\n ) -> (Vec, String) {\n let (params, items): (Vec, Vec) = key_fields\n .into_iter()\n .enumerate()\n .map(|(i, name)| {\n let param = format!(\"{param_prefix}_{i}\");\n let item = format!(\"{name}: ${param}\");\n (param, item)\n })\n .unzip();\n (params, format!(\"{{{}}}\", items.into_iter().join(\", \")))\n }\n\n fn new(\n graph: Arc,\n spec: Spec,\n analyzed_data_coll: AnalyzedDataCollection,\n ) -> Result {\n let (key_field_params, key_fields_literal) = Self::build_key_field_params_n_literal(\n CORE_KEY_PARAM_PREFIX,\n analyzed_data_coll.schema.key_fields.iter().map(|f| &f.name),\n );\n let result = match spec.mapping {\n GraphElementMapping::Node(node_spec) => {\n let delete_cypher = formatdoc! {\"\n OPTIONAL MATCH (old_node:{label} {key_fields_literal})\n WITH old_node\n SET old_node.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\n WITH old_node\n WHERE NOT (old_node)--()\n DELETE old_node\n FINISH\n \",\n label = node_spec.label,\n };\n\n let insert_cypher = formatdoc! {\"\n MERGE (new_node:{label} {key_fields_literal})\n SET new_node.{SELF_CONTAINED_TAG_FIELD_NAME} = TRUE{optional_set_props}\n FINISH\n \",\n label = node_spec.label,\n optional_set_props = if !analyzed_data_coll.value_fields_input_idx.is_empty() {\n format!(\", new_node += ${CORE_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n };\n\n Self {\n connection_ref: spec.connection,\n graph,\n create_order: 0,\n delete_cypher,\n insert_cypher,\n delete_before_upsert: false,\n analyzed_data_coll,\n key_field_params,\n src_key_field_params: vec![],\n tgt_key_field_params: vec![],\n }\n }\n GraphElementMapping::Relationship(rel_spec) => {\n let delete_cypher = formatdoc! {\"\n OPTIONAL MATCH (old_src)-[old_rel:{rel_type} {key_fields_literal}]->(old_tgt)\n\n DELETE old_rel\n\n WITH collect(old_src) + collect(old_tgt) AS nodes_to_check\n UNWIND nodes_to_check AS node\n WITH DISTINCT node\n WHERE NOT COALESCE(node.{SELF_CONTAINED_TAG_FIELD_NAME}, FALSE)\n AND COUNT{{ (node)--() }} = 0\n DELETE node\n\n FINISH\n \",\n rel_type = rel_spec.rel_type,\n };\n\n let analyzed_rel = analyzed_data_coll\n .rel\n .as_ref()\n .ok_or_else(invariance_violation)?;\n let analyzed_src = &analyzed_rel.source;\n let analyzed_tgt = &analyzed_rel.target;\n\n let (src_key_field_params, src_key_fields_literal) =\n Self::build_key_field_params_n_literal(\n SRC_KEY_PARAM_PREFIX,\n analyzed_src.schema.key_fields.iter().map(|f| &f.name),\n );\n let (tgt_key_field_params, tgt_key_fields_literal) =\n Self::build_key_field_params_n_literal(\n TGT_KEY_PARAM_PREFIX,\n analyzed_tgt.schema.key_fields.iter().map(|f| &f.name),\n );\n\n let insert_cypher = formatdoc! {\"\n MERGE (new_src:{src_node_label} {src_key_fields_literal})\n {optional_set_src_props}\n\n MERGE (new_tgt:{tgt_node_label} {tgt_key_fields_literal})\n {optional_set_tgt_props}\n\n MERGE (new_src)-[new_rel:{rel_type} {key_fields_literal}]->(new_tgt)\n {optional_set_rel_props}\n\n FINISH\n \",\n src_node_label = rel_spec.source.label,\n optional_set_src_props = if analyzed_src.has_value_fields() {\n format!(\"SET new_src += ${SRC_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n tgt_node_label = rel_spec.target.label,\n optional_set_tgt_props = if analyzed_tgt.has_value_fields() {\n format!(\"SET new_tgt += ${TGT_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n rel_type = rel_spec.rel_type,\n optional_set_rel_props = if !analyzed_data_coll.value_fields_input_idx.is_empty() {\n format!(\"SET new_rel += ${CORE_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n };\n Self {\n connection_ref: spec.connection,\n graph,\n create_order: 1,\n delete_cypher,\n insert_cypher,\n delete_before_upsert: true,\n analyzed_data_coll,\n key_field_params,\n src_key_field_params,\n tgt_key_field_params,\n }\n }\n };\n Ok(result)\n }\n\n fn bind_key_field_params<'a>(\n query: neo4rs::Query,\n params: &[String],\n type_val: impl Iterator,\n ) -> Result {\n let mut query = query;\n for (i, (typ, val)) in type_val.enumerate() {\n query = query.param(¶ms[i], value_to_bolt(val, typ)?);\n }\n Ok(query)\n }\n\n fn bind_rel_key_field_params(\n &self,\n query: neo4rs::Query,\n val: &KeyValue,\n ) -> Result {\n let mut query = query;\n for (i, val) in val\n .fields_iter(self.analyzed_data_coll.schema.key_fields.len())?\n .enumerate()\n {\n query = query.param(\n &self.key_field_params[i],\n key_to_bolt(\n val,\n &self.analyzed_data_coll.schema.key_fields[i].value_type.typ,\n )?,\n );\n }\n Ok(query)\n }\n\n fn add_upsert_queries(\n &self,\n upsert: &ExportTargetUpsertEntry,\n queries: &mut Vec,\n ) -> Result<()> {\n if self.delete_before_upsert {\n queries.push(\n self.bind_rel_key_field_params(neo4rs::query(&self.delete_cypher), &upsert.key)?,\n );\n }\n\n let value = &upsert.value;\n let mut query =\n self.bind_rel_key_field_params(neo4rs::query(&self.insert_cypher), &upsert.key)?;\n\n if let Some(analyzed_rel) = &self.analyzed_data_coll.rel {\n let bind_params = |query: neo4rs::Query,\n analyzed: &AnalyzedGraphElementFieldMapping,\n key_field_params: &[String]|\n -> Result {\n let mut query = Self::bind_key_field_params(\n query,\n key_field_params,\n std::iter::zip(\n analyzed.schema.key_fields.iter(),\n analyzed.fields_input_idx.key.iter(),\n )\n .map(|(f, field_idx)| (&f.value_type.typ, &value.fields[*field_idx])),\n )?;\n if analyzed.has_value_fields() {\n query = query.param(\n SRC_PROPS_PARAM,\n mapped_field_values_to_bolt(\n &analyzed.schema.value_fields,\n &analyzed.fields_input_idx.value,\n value,\n )?,\n );\n }\n Ok(query)\n };\n query = bind_params(query, &analyzed_rel.source, &self.src_key_field_params)?;\n query = bind_params(query, &analyzed_rel.target, &self.tgt_key_field_params)?;\n }\n\n if !self.analyzed_data_coll.value_fields_input_idx.is_empty() {\n query = query.param(\n CORE_PROPS_PARAM,\n mapped_field_values_to_bolt(\n &self.analyzed_data_coll.schema.value_fields,\n &self.analyzed_data_coll.value_fields_input_idx,\n value,\n )?,\n );\n }\n queries.push(query);\n Ok(())\n }\n\n fn add_delete_queries(\n &self,\n delete_key: &value::KeyValue,\n queries: &mut Vec,\n ) -> Result<()> {\n queries\n .push(self.bind_rel_key_field_params(neo4rs::query(&self.delete_cypher), delete_key)?);\n Ok(())\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct SetupState {\n key_field_names: Vec,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n dependent_node_labels: Vec,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n sub_components: Vec,\n}\n\nimpl SetupState {\n fn new(\n schema: &GraphElementSchema,\n index_options: &IndexOptions,\n dependent_node_labels: Vec,\n ) -> Result {\n let key_field_names: Vec =\n schema.key_fields.iter().map(|f| f.name.clone()).collect();\n let mut sub_components = vec![];\n sub_components.push(ComponentState {\n object_label: schema.elem_type.clone(),\n index_def: IndexDef::KeyConstraint {\n field_names: key_field_names.clone(),\n },\n });\n let value_field_types = schema\n .value_fields\n .iter()\n .map(|f| (f.name.as_str(), &f.value_type.typ))\n .collect::>();\n for index_def in index_options.vector_indexes.iter() {\n sub_components.push(ComponentState {\n object_label: schema.elem_type.clone(),\n index_def: IndexDef::from_vector_index_def(\n index_def,\n value_field_types\n .get(index_def.field_name.as_str())\n .ok_or_else(|| {\n api_error!(\n \"Unknown field name for vector index: {}\",\n index_def.field_name\n )\n })?,\n )?,\n });\n }\n Ok(Self {\n key_field_names,\n dependent_node_labels,\n sub_components,\n })\n }\n\n fn check_compatible(&self, existing: &Self) -> SetupStateCompatibility {\n if self.key_field_names == existing.key_field_names {\n SetupStateCompatibility::Compatible\n } else {\n SetupStateCompatibility::NotCompatible\n }\n }\n}\n\nimpl IntoIterator for SetupState {\n type Item = ComponentState;\n type IntoIter = std::vec::IntoIter;\n\n fn into_iter(self) -> Self::IntoIter {\n self.sub_components.into_iter()\n }\n}\n#[derive(Debug, Default)]\nstruct DataClearAction {\n dependent_node_labels: Vec,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\nenum ComponentKind {\n KeyConstraint,\n VectorIndex,\n}\n\nimpl ComponentKind {\n fn describe(&self) -> &str {\n match self {\n ComponentKind::KeyConstraint => \"KEY CONSTRAINT\",\n ComponentKind::VectorIndex => \"VECTOR INDEX\",\n }\n }\n}\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct ComponentKey {\n kind: ComponentKind,\n name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\nenum IndexDef {\n KeyConstraint {\n field_names: Vec,\n },\n VectorIndex {\n field_name: String,\n metric: spec::VectorSimilarityMetric,\n vector_size: usize,\n },\n}\n\nimpl IndexDef {\n fn from_vector_index_def(\n index_def: &spec::VectorIndexDef,\n field_typ: &schema::ValueType,\n ) -> Result {\n Ok(Self::VectorIndex {\n field_name: index_def.field_name.clone(),\n vector_size: (match field_typ {\n schema::ValueType::Basic(schema::BasicValueType::Vector(schema)) => {\n schema.dimension\n }\n _ => None,\n })\n .ok_or_else(|| {\n api_error!(\"Vector index field must be a vector with fixed dimension\")\n })?,\n metric: index_def.metric,\n })\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\npub struct ComponentState {\n object_label: ElementType,\n index_def: IndexDef,\n}\n\nimpl components::State for ComponentState {\n fn key(&self) -> ComponentKey {\n let prefix = match &self.object_label {\n ElementType::Relationship(_) => \"r\",\n ElementType::Node(_) => \"n\",\n };\n let label = self.object_label.label();\n match &self.index_def {\n IndexDef::KeyConstraint { .. } => ComponentKey {\n kind: ComponentKind::KeyConstraint,\n name: format!(\"{prefix}__{label}__key\"),\n },\n IndexDef::VectorIndex {\n field_name, metric, ..\n } => ComponentKey {\n kind: ComponentKind::VectorIndex,\n name: format!(\"{prefix}__{label}__{field_name}__{metric}__vidx\"),\n },\n }\n }\n}\n\npub struct SetupComponentOperator {\n graph_pool: Arc,\n conn_spec: ConnectionSpec,\n}\n\n#[async_trait]\nimpl components::SetupOperator for SetupComponentOperator {\n type Key = ComponentKey;\n type State = ComponentState;\n type SetupState = SetupState;\n type Context = ();\n\n fn describe_key(&self, key: &Self::Key) -> String {\n format!(\"{} {}\", key.kind.describe(), key.name)\n }\n\n fn describe_state(&self, state: &Self::State) -> String {\n let key_desc = self.describe_key(&state.key());\n let label = state.object_label.label();\n match &state.index_def {\n IndexDef::KeyConstraint { field_names } => {\n format!(\"{key_desc} ON {label} (key: {})\", field_names.join(\", \"))\n }\n IndexDef::VectorIndex {\n field_name,\n metric,\n vector_size,\n } => {\n format!(\n \"{key_desc} ON {label} (field_name: {field_name}, vector_size: {vector_size}, metric: {metric})\",\n )\n }\n }\n }\n\n fn is_up_to_date(&self, current: &ComponentState, desired: &ComponentState) -> bool {\n current == desired\n }\n\n async fn create(&self, state: &ComponentState, _context: &Self::Context) -> Result<()> {\n let graph = self.graph_pool.get_graph(&self.conn_spec).await?;\n let key = state.key();\n let qualifier = CORE_ELEMENT_MATCHER_VAR;\n let matcher = state.object_label.matcher(qualifier);\n let query = neo4rs::query(&match &state.index_def {\n IndexDef::KeyConstraint { field_names } => {\n let key_type = match &state.object_label {\n ElementType::Node(_) => \"NODE\",\n ElementType::Relationship(_) => \"RELATIONSHIP\",\n };\n format!(\n \"CREATE CONSTRAINT {name} IF NOT EXISTS FOR {matcher} REQUIRE {field_names} IS {key_type} KEY\",\n name = key.name,\n field_names = build_composite_field_names(qualifier, field_names),\n )\n }\n IndexDef::VectorIndex {\n field_name,\n metric,\n vector_size,\n } => {\n formatdoc! {\"\n CREATE VECTOR INDEX {name} IF NOT EXISTS\n FOR {matcher} ON {qualifier}.{field_name}\n OPTIONS {{\n indexConfig: {{\n `vector.dimensions`: {vector_size},\n `vector.similarity_function`: '{metric}'\n }}\n }}\",\n name = key.name,\n }\n }\n });\n Ok(graph.run(query).await?)\n }\n\n async fn delete(&self, key: &ComponentKey, _context: &Self::Context) -> Result<()> {\n let graph = self.graph_pool.get_graph(&self.conn_spec).await?;\n let query = neo4rs::query(&format!(\n \"DROP {kind} {name} IF EXISTS\",\n kind = match key.kind {\n ComponentKind::KeyConstraint => \"CONSTRAINT\",\n ComponentKind::VectorIndex => \"INDEX\",\n },\n name = key.name,\n ));\n Ok(graph.run(query).await?)\n }\n}\n\nfn build_composite_field_names(qualifier: &str, field_names: &[String]) -> String {\n let strs = field_names\n .iter()\n .map(|name| format!(\"{qualifier}.{name}\"))\n .join(\", \");\n if field_names.len() == 1 {\n strs\n } else {\n format!(\"({strs})\")\n }\n}\n#[derive(Debug)]\npub struct GraphElementDataSetupStatus {\n data_clear: Option,\n change_type: SetupChangeType,\n}\n\nimpl GraphElementDataSetupStatus {\n fn new(desired_state: Option<&SetupState>, existing: &CombinedState) -> Self {\n let mut data_clear: Option = None;\n for v in existing.possible_versions() {\n if desired_state.as_ref().is_none_or(|desired| {\n desired.check_compatible(v) == SetupStateCompatibility::NotCompatible\n }) {\n data_clear\n .get_or_insert_default()\n .dependent_node_labels\n .extend(v.dependent_node_labels.iter().cloned());\n }\n }\n\n let change_type = match (desired_state, existing.possible_versions().next()) {\n (Some(_), Some(_)) => {\n if data_clear.is_none() {\n SetupChangeType::NoChange\n } else {\n SetupChangeType::Update\n }\n }\n (Some(_), None) => SetupChangeType::Create,\n (None, Some(_)) => SetupChangeType::Delete,\n (None, None) => SetupChangeType::NoChange,\n };\n\n Self {\n data_clear,\n change_type,\n }\n }\n}\n\nimpl ResourceSetupStatus for GraphElementDataSetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n if let Some(data_clear) = &self.data_clear {\n let mut desc = \"Clear data\".to_string();\n if !data_clear.dependent_node_labels.is_empty() {\n write!(\n &mut desc,\n \"; dependents {}\",\n data_clear\n .dependent_node_labels\n .iter()\n .map(|l| format!(\"{}\", ElementType::Node(l.clone())))\n .join(\", \")\n )\n .unwrap();\n }\n result.push(setup::ChangeDescription::Action(desc));\n }\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.change_type\n }\n}\n\nasync fn clear_graph_element_data(\n graph: &Graph,\n key: &Neo4jGraphElement,\n is_self_contained: bool,\n) -> Result<()> {\n let var_name = CORE_ELEMENT_MATCHER_VAR;\n let matcher = key.typ.matcher(var_name);\n let query_string = match key.typ {\n ElementType::Node(_) => {\n let optional_reset_self_contained = if is_self_contained {\n formatdoc! {\"\n WITH {var_name}\n SET {var_name}.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\n \"}\n } else {\n \"\".to_string()\n };\n formatdoc! {\"\n CALL {{\n MATCH {matcher}\n {optional_reset_self_contained}\n WITH {var_name} WHERE NOT ({var_name})--() DELETE {var_name}\n }} IN TRANSACTIONS\n \"}\n }\n ElementType::Relationship(_) => {\n formatdoc! {\"\n CALL {{\n MATCH {matcher} WITH {var_name} DELETE {var_name}\n }} IN TRANSACTIONS\n \"}\n }\n };\n let delete_query = neo4rs::query(&query_string);\n graph.run(delete_query).await?;\n Ok(())\n}\n\n/// Factory for Neo4j relationships\npub struct Factory {\n graph_pool: Arc,\n}\n\nimpl Factory {\n pub fn new() -> Self {\n Self {\n graph_pool: Arc::default(),\n }\n }\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = Declaration;\n type SetupState = SetupState;\n type SetupStatus = (\n GraphElementDataSetupStatus,\n components::SetupStatus,\n );\n type Key = Neo4jGraphElement;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Neo4j\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(Neo4jGraphElement, SetupState)>,\n )> {\n let (analyzed_data_colls, declared_graph_elements) = analyze_graph_mappings(\n data_collections\n .iter()\n .map(|d| DataCollectionGraphMappingInput {\n auth_ref: &d.spec.connection,\n mapping: &d.spec.mapping,\n index_options: &d.index_options,\n key_fields_schema: d.key_fields_schema.clone(),\n value_fields_schema: d.value_fields_schema.clone(),\n }),\n declarations.iter().map(|d| (&d.connection, &d.decl)),\n )?;\n let data_coll_output = std::iter::zip(data_collections, analyzed_data_colls)\n .map(|(data_coll, analyzed)| {\n let setup_key = Neo4jGraphElement {\n connection: data_coll.spec.connection.clone(),\n typ: analyzed.schema.elem_type.clone(),\n };\n let desired_setup_state = SetupState::new(\n &analyzed.schema,\n &data_coll.index_options,\n analyzed\n .dependent_node_labels()\n .into_iter()\n .map(|s| s.to_string())\n .collect(),\n )?;\n\n let conn_spec = context\n .auth_registry\n .get::(&data_coll.spec.connection)?;\n let factory = self.clone();\n let export_context = async move {\n Ok(Arc::new(ExportContext::new(\n factory.graph_pool.get_graph(&conn_spec).await?,\n data_coll.spec,\n analyzed,\n )?))\n }\n .boxed();\n\n Ok(TypedExportDataCollectionBuildOutput {\n export_context,\n setup_key,\n desired_setup_state,\n })\n })\n .collect::>>()?;\n let decl_output = std::iter::zip(declarations, declared_graph_elements)\n .map(|(decl, graph_elem_schema)| {\n let setup_state =\n SetupState::new(&graph_elem_schema, &decl.decl.index_options, vec![])?;\n let setup_key = GraphElementType {\n connection: decl.connection,\n typ: graph_elem_schema.elem_type.clone(),\n };\n Ok((setup_key, setup_state))\n })\n .collect::>>()?;\n Ok((data_coll_output, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n key: Neo4jGraphElement,\n desired: Option,\n existing: CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result {\n let conn_spec = flow_instance_ctx\n .auth_registry\n .get::(&key.connection)?;\n let data_status = GraphElementDataSetupStatus::new(desired.as_ref(), &existing);\n let components = components::SetupStatus::create(\n SetupComponentOperator {\n graph_pool: self.graph_pool.clone(),\n conn_spec,\n },\n desired,\n existing,\n )?;\n Ok((data_status, components))\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(desired.check_compatible(existing))\n }\n\n fn describe_resource(&self, key: &Neo4jGraphElement) -> Result {\n Ok(format!(\"Neo4j {}\", key.typ))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut muts_by_graph = HashMap::new();\n for mut_with_ctx in mutations.iter() {\n muts_by_graph\n .entry(&mut_with_ctx.export_context.connection_ref)\n .or_insert_with(Vec::new)\n .push(mut_with_ctx);\n }\n let retry_options = retryable::RetryOptions::default();\n for muts in muts_by_graph.values_mut() {\n muts.sort_by_key(|m| m.export_context.create_order);\n let graph = &muts[0].export_context.graph;\n retryable::run(\n async || {\n let mut queries = vec![];\n for mut_with_ctx in muts.iter() {\n let export_ctx = &mut_with_ctx.export_context;\n for upsert in mut_with_ctx.mutation.upserts.iter() {\n export_ctx.add_upsert_queries(upsert, &mut queries)?;\n }\n }\n for mut_with_ctx in muts.iter().rev() {\n let export_ctx = &mut_with_ctx.export_context;\n for deletion in mut_with_ctx.mutation.deletes.iter() {\n export_ctx.add_delete_queries(&deletion.key, &mut queries)?;\n }\n }\n let mut txn = graph.start_txn().await?;\n txn.run_queries(queries).await?;\n txn.commit().await?;\n retryable::Ok(())\n },\n &retry_options,\n )\n .await\n .map_err(Into::::into)?\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n // Relationships first, then nodes, as relationships need to be deleted before nodes they referenced.\n let mut relationship_types = IndexSet::<&Neo4jGraphElement>::new();\n let mut node_labels = IndexSet::<&Neo4jGraphElement>::new();\n let mut dependent_node_labels = IndexSet::::new();\n\n let mut components = vec![];\n for change in changes.iter() {\n if let Some(data_clear) = &change.setup_status.0.data_clear {\n match &change.key.typ {\n ElementType::Relationship(_) => {\n relationship_types.insert(&change.key);\n for label in &data_clear.dependent_node_labels {\n dependent_node_labels.insert(Neo4jGraphElement {\n connection: change.key.connection.clone(),\n typ: ElementType::Node(label.clone()),\n });\n }\n }\n ElementType::Node(_) => {\n node_labels.insert(&change.key);\n }\n }\n }\n components.push(&change.setup_status.1);\n }\n\n // Relationships have no dependency, so can be cleared first.\n for rel_type in relationship_types.into_iter() {\n let graph = self\n .graph_pool\n .get_graph_for_key(rel_type, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, rel_type, true).await?;\n }\n // Clear standalone nodes, which is simpler than dependent nodes.\n for node_label in node_labels.iter() {\n let graph = self\n .graph_pool\n .get_graph_for_key(node_label, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, node_label, true).await?;\n }\n // Clear dependent nodes if they're not covered by standalone nodes.\n for node_label in dependent_node_labels.iter() {\n if !node_labels.contains(node_label) {\n let graph = self\n .graph_pool\n .get_graph_for_key(node_label, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, node_label, false).await?;\n }\n }\n\n apply_component_changes(components, &()).await?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/targets/postgres.rs", "use crate::prelude::*;\n\nuse super::shared::table_columns::{\n TableColumnsSchema, TableMainSetupAction, TableUpsertionAction, check_table_compatibility,\n};\nuse crate::base::spec::{self, *};\nuse crate::ops::sdk::*;\nuse crate::settings::DatabaseConnectionSpec;\nuse async_trait::async_trait;\nuse indexmap::{IndexMap, IndexSet};\nuse itertools::Itertools;\nuse serde::Serialize;\nuse sqlx::PgPool;\nuse sqlx::postgres::types::PgRange;\nuse std::ops::Bound;\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n database: Option>,\n table_name: Option,\n}\nconst BIND_LIMIT: usize = 65535;\n\nfn key_value_fields_iter<'a>(\n key_fields_schema: &[FieldSchema],\n key_value: &'a KeyValue,\n) -> Result<&'a [KeyValue]> {\n let slice = if key_fields_schema.len() == 1 {\n std::slice::from_ref(key_value)\n } else {\n match key_value {\n KeyValue::Struct(fields) => fields,\n _ => bail!(\"expect struct key value\"),\n }\n };\n Ok(slice)\n}\n\nfn convertible_to_pgvector(vec_schema: &VectorTypeSchema) -> bool {\n if vec_schema.dimension.is_some() {\n matches!(\n *vec_schema.element_type,\n BasicValueType::Float32 | BasicValueType::Float64 | BasicValueType::Int64\n )\n } else {\n false\n }\n}\n\nfn bind_key_field<'arg>(\n builder: &mut sqlx::QueryBuilder<'arg, sqlx::Postgres>,\n key_value: &'arg KeyValue,\n) -> Result<()> {\n match key_value {\n KeyValue::Bytes(v) => {\n builder.push_bind(&**v);\n }\n KeyValue::Str(v) => {\n builder.push_bind(&**v);\n }\n KeyValue::Bool(v) => {\n builder.push_bind(v);\n }\n KeyValue::Int64(v) => {\n builder.push_bind(v);\n }\n KeyValue::Range(v) => {\n builder.push_bind(PgRange {\n start: Bound::Included(v.start as i64),\n end: Bound::Excluded(v.end as i64),\n });\n }\n KeyValue::Uuid(v) => {\n builder.push_bind(v);\n }\n KeyValue::Date(v) => {\n builder.push_bind(v);\n }\n KeyValue::Struct(fields) => {\n builder.push_bind(sqlx::types::Json(fields));\n }\n }\n Ok(())\n}\n\nfn bind_value_field<'arg>(\n builder: &mut sqlx::QueryBuilder<'arg, sqlx::Postgres>,\n field_schema: &'arg FieldSchema,\n value: &'arg Value,\n) -> Result<()> {\n match &value {\n Value::Basic(v) => match v {\n BasicValue::Bytes(v) => {\n builder.push_bind(&**v);\n }\n BasicValue::Str(v) => {\n builder.push_bind(&**v);\n }\n BasicValue::Bool(v) => {\n builder.push_bind(v);\n }\n BasicValue::Int64(v) => {\n builder.push_bind(v);\n }\n BasicValue::Float32(v) => {\n builder.push_bind(v);\n }\n BasicValue::Float64(v) => {\n builder.push_bind(v);\n }\n BasicValue::Range(v) => {\n builder.push_bind(PgRange {\n start: Bound::Included(v.start as i64),\n end: Bound::Excluded(v.end as i64),\n });\n }\n BasicValue::Uuid(v) => {\n builder.push_bind(v);\n }\n BasicValue::Date(v) => {\n builder.push_bind(v);\n }\n BasicValue::Time(v) => {\n builder.push_bind(v);\n }\n BasicValue::LocalDateTime(v) => {\n builder.push_bind(v);\n }\n BasicValue::OffsetDateTime(v) => {\n builder.push_bind(v);\n }\n BasicValue::TimeDelta(v) => {\n builder.push_bind(v);\n }\n BasicValue::Json(v) => {\n builder.push_bind(sqlx::types::Json(&**v));\n }\n BasicValue::Vector(v) => match &field_schema.value_type.typ {\n ValueType::Basic(BasicValueType::Vector(vs)) if convertible_to_pgvector(vs) => {\n let vec = v\n .iter()\n .map(|v| {\n Ok(match v {\n BasicValue::Float32(v) => *v,\n BasicValue::Float64(v) => *v as f32,\n BasicValue::Int64(v) => *v as f32,\n v => bail!(\"unexpected vector element type: {}\", v.kind()),\n })\n })\n .collect::>>()?;\n builder.push_bind(pgvector::Vector::from(vec));\n }\n _ => {\n builder.push_bind(sqlx::types::Json(v));\n }\n },\n BasicValue::UnionVariant { .. } => {\n builder.push_bind(sqlx::types::Json(TypedValue {\n t: &field_schema.value_type.typ,\n v: value,\n }));\n }\n },\n Value::Null => {\n builder.push(\"NULL\");\n }\n v => {\n builder.push_bind(sqlx::types::Json(TypedValue {\n t: &field_schema.value_type.typ,\n v,\n }));\n }\n };\n Ok(())\n}\n\npub struct ExportContext {\n db_ref: Option>,\n db_pool: PgPool,\n key_fields_schema: Vec,\n value_fields_schema: Vec,\n upsert_sql_prefix: String,\n upsert_sql_suffix: String,\n delete_sql_prefix: String,\n}\n\nimpl ExportContext {\n fn new(\n db_ref: Option>,\n db_pool: PgPool,\n table_name: String,\n key_fields_schema: Vec,\n value_fields_schema: Vec,\n ) -> Result {\n let key_fields = key_fields_schema\n .iter()\n .map(|f| format!(\"\\\"{}\\\"\", f.name))\n .collect::>()\n .join(\", \");\n let all_fields = (key_fields_schema.iter().chain(value_fields_schema.iter()))\n .map(|f| format!(\"\\\"{}\\\"\", f.name))\n .collect::>()\n .join(\", \");\n let set_value_fields = value_fields_schema\n .iter()\n .map(|f| format!(\"\\\"{}\\\" = EXCLUDED.\\\"{}\\\"\", f.name, f.name))\n .collect::>()\n .join(\", \");\n\n Ok(Self {\n db_ref,\n db_pool,\n upsert_sql_prefix: format!(\"INSERT INTO {table_name} ({all_fields}) VALUES \"),\n upsert_sql_suffix: if value_fields_schema.is_empty() {\n format!(\" ON CONFLICT ({key_fields}) DO NOTHING;\")\n } else {\n format!(\" ON CONFLICT ({key_fields}) DO UPDATE SET {set_value_fields};\")\n },\n delete_sql_prefix: format!(\"DELETE FROM {table_name} WHERE \"),\n key_fields_schema,\n value_fields_schema,\n })\n }\n}\n\nimpl ExportContext {\n async fn upsert(\n &self,\n upserts: &[interface::ExportTargetUpsertEntry],\n txn: &mut sqlx::PgTransaction<'_>,\n ) -> Result<()> {\n let num_parameters = self.key_fields_schema.len() + self.value_fields_schema.len();\n for upsert_chunk in upserts.chunks(BIND_LIMIT / num_parameters) {\n let mut query_builder = sqlx::QueryBuilder::new(&self.upsert_sql_prefix);\n for (i, upsert) in upsert_chunk.iter().enumerate() {\n if i > 0 {\n query_builder.push(\",\");\n }\n query_builder.push(\" (\");\n for (j, key_value) in key_value_fields_iter(&self.key_fields_schema, &upsert.key)?\n .iter()\n .enumerate()\n {\n if j > 0 {\n query_builder.push(\", \");\n }\n bind_key_field(&mut query_builder, key_value)?;\n }\n if self.value_fields_schema.len() != upsert.value.fields.len() {\n bail!(\n \"unmatched value length: {} vs {}\",\n self.value_fields_schema.len(),\n upsert.value.fields.len()\n );\n }\n for (schema, value) in self\n .value_fields_schema\n .iter()\n .zip(upsert.value.fields.iter())\n {\n query_builder.push(\", \");\n bind_value_field(&mut query_builder, schema, value)?;\n }\n query_builder.push(\")\");\n }\n query_builder.push(&self.upsert_sql_suffix);\n query_builder.build().execute(&mut **txn).await?;\n }\n Ok(())\n }\n\n async fn delete(\n &self,\n deletions: &[interface::ExportTargetDeleteEntry],\n txn: &mut sqlx::PgTransaction<'_>,\n ) -> Result<()> {\n // TODO: Find a way to batch delete.\n for deletion in deletions.iter() {\n let mut query_builder = sqlx::QueryBuilder::new(\"\");\n query_builder.push(&self.delete_sql_prefix);\n for (i, (schema, value)) in self\n .key_fields_schema\n .iter()\n .zip(key_value_fields_iter(&self.key_fields_schema, &deletion.key)?.iter())\n .enumerate()\n {\n if i > 0 {\n query_builder.push(\" AND \");\n }\n query_builder.push(\"\\\"\");\n query_builder.push(schema.name.as_str());\n query_builder.push(\"\\\"\");\n query_builder.push(\"=\");\n bind_key_field(&mut query_builder, value)?;\n }\n query_builder.build().execute(&mut **txn).await?;\n }\n Ok(())\n }\n}\n\n#[derive(Default)]\npub struct Factory {}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct TableId {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n database: Option>,\n table_name: String,\n}\n\nimpl std::fmt::Display for TableId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.table_name)?;\n if let Some(database) = &self.database {\n write!(f, \" (database: {database})\")?;\n }\n Ok(())\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SetupState {\n #[serde(flatten)]\n columns: TableColumnsSchema,\n\n vector_indexes: BTreeMap,\n}\n\nimpl SetupState {\n fn new(\n table_id: &TableId,\n key_fields_schema: &[FieldSchema],\n value_fields_schema: &[FieldSchema],\n index_options: &IndexOptions,\n ) -> Self {\n Self {\n columns: TableColumnsSchema {\n key_columns: key_fields_schema\n .iter()\n .map(|f| (f.name.clone(), f.value_type.typ.without_attrs()))\n .collect(),\n value_columns: value_fields_schema\n .iter()\n .map(|f| (f.name.clone(), f.value_type.typ.without_attrs()))\n .collect(),\n },\n vector_indexes: index_options\n .vector_indexes\n .iter()\n .map(|v| (to_vector_index_name(&table_id.table_name, v), v.clone()))\n .collect(),\n }\n }\n\n fn uses_pgvector(&self) -> bool {\n self.columns\n .value_columns\n .iter()\n .any(|(_, value)| match &value {\n ValueType::Basic(BasicValueType::Vector(vec_schema)) => {\n convertible_to_pgvector(vec_schema)\n }\n _ => false,\n })\n }\n}\n\nfn to_column_type_sql(column_type: &ValueType) -> String {\n match column_type {\n ValueType::Basic(basic_type) => match basic_type {\n BasicValueType::Bytes => \"bytea\".into(),\n BasicValueType::Str => \"text\".into(),\n BasicValueType::Bool => \"boolean\".into(),\n BasicValueType::Int64 => \"bigint\".into(),\n BasicValueType::Float32 => \"real\".into(),\n BasicValueType::Float64 => \"double precision\".into(),\n BasicValueType::Range => \"int8range\".into(),\n BasicValueType::Uuid => \"uuid\".into(),\n BasicValueType::Date => \"date\".into(),\n BasicValueType::Time => \"time\".into(),\n BasicValueType::LocalDateTime => \"timestamp\".into(),\n BasicValueType::OffsetDateTime => \"timestamp with time zone\".into(),\n BasicValueType::TimeDelta => \"interval\".into(),\n BasicValueType::Json => \"jsonb\".into(),\n BasicValueType::Vector(vec_schema) => {\n if convertible_to_pgvector(vec_schema) {\n format!(\"vector({})\", vec_schema.dimension.unwrap_or(0))\n } else {\n \"jsonb\".into()\n }\n }\n BasicValueType::Union(_) => \"jsonb\".into(),\n },\n _ => \"jsonb\".into(),\n }\n}\n\nimpl<'a> From<&'a SetupState> for Cow<'a, TableColumnsSchema> {\n fn from(val: &'a SetupState) -> Self {\n Cow::Owned(TableColumnsSchema {\n key_columns: val\n .columns\n .key_columns\n .iter()\n .map(|(k, v)| (k.clone(), to_column_type_sql(v)))\n .collect(),\n value_columns: val\n .columns\n .value_columns\n .iter()\n .map(|(k, v)| (k.clone(), to_column_type_sql(v)))\n .collect(),\n })\n }\n}\n\n#[derive(Debug)]\npub struct TableSetupAction {\n table_action: TableMainSetupAction,\n indexes_to_delete: IndexSet,\n indexes_to_create: IndexMap,\n}\n\n#[derive(Debug)]\npub struct SetupStatus {\n create_pgvector_extension: bool,\n actions: TableSetupAction,\n vector_as_jsonb_columns: Vec<(String, ValueType)>,\n}\n\nimpl SetupStatus {\n fn new(desired_state: Option, existing: setup::CombinedState) -> Self {\n let table_action =\n TableMainSetupAction::from_states(desired_state.as_ref(), &existing, false);\n let vector_as_jsonb_columns = desired_state\n .as_ref()\n .iter()\n .flat_map(|s| {\n s.columns.value_columns.iter().filter_map(|(name, schema)| {\n if let ValueType::Basic(BasicValueType::Vector(vec_schema)) = schema\n && !convertible_to_pgvector(vec_schema)\n {\n let is_touched = match &table_action.table_upsertion {\n Some(TableUpsertionAction::Create { values, .. }) => {\n values.contains_key(name)\n }\n Some(TableUpsertionAction::Update {\n columns_to_upsert, ..\n }) => columns_to_upsert.contains_key(name),\n None => false,\n };\n if is_touched {\n Some((name.clone(), schema.clone()))\n } else {\n None\n }\n } else {\n None\n }\n })\n })\n .collect::>();\n let (indexes_to_delete, indexes_to_create) = desired_state\n .as_ref()\n .map(|desired| {\n (\n existing\n .possible_versions()\n .flat_map(|v| v.vector_indexes.keys())\n .filter(|index_name| !desired.vector_indexes.contains_key(*index_name))\n .cloned()\n .collect::>(),\n desired\n .vector_indexes\n .iter()\n .filter(|(name, def)| {\n !existing.always_exists()\n || existing\n .possible_versions()\n .any(|v| v.vector_indexes.get(*name) != Some(def))\n })\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect::>(),\n )\n })\n .unwrap_or_default();\n let create_pgvector_extension = desired_state\n .as_ref()\n .map(|s| s.uses_pgvector())\n .unwrap_or(false)\n && !existing.current.map(|s| s.uses_pgvector()).unwrap_or(false);\n\n Self {\n create_pgvector_extension,\n actions: TableSetupAction {\n table_action,\n indexes_to_delete,\n indexes_to_create,\n },\n vector_as_jsonb_columns,\n }\n }\n}\n\nfn to_vector_similarity_metric_sql(metric: VectorSimilarityMetric) -> &'static str {\n match metric {\n VectorSimilarityMetric::CosineSimilarity => \"vector_cosine_ops\",\n VectorSimilarityMetric::L2Distance => \"vector_l2_ops\",\n VectorSimilarityMetric::InnerProduct => \"vector_ip_ops\",\n }\n}\n\nfn to_index_spec_sql(index_spec: &VectorIndexDef) -> Cow<'static, str> {\n format!(\n \"USING hnsw ({} {})\",\n index_spec.field_name,\n to_vector_similarity_metric_sql(index_spec.metric)\n )\n .into()\n}\n\nfn to_vector_index_name(table_name: &str, vector_index_def: &spec::VectorIndexDef) -> String {\n format!(\n \"{}__{}__{}\",\n table_name,\n vector_index_def.field_name,\n to_vector_similarity_metric_sql(vector_index_def.metric)\n )\n}\n\nfn describe_index_spec(index_name: &str, index_spec: &VectorIndexDef) -> String {\n format!(\"{} {}\", index_name, to_index_spec_sql(index_spec))\n}\n\nimpl setup::ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut descriptions = self.actions.table_action.describe_changes();\n for (column_name, schema) in self.vector_as_jsonb_columns.iter() {\n descriptions.push(setup::ChangeDescription::Note(format!(\n \"Field `{}` has type `{}`. Only number vector with fixed size is supported by pgvector. It will be stored as `jsonb`.\",\n column_name,\n schema\n )));\n }\n if self.create_pgvector_extension {\n descriptions.push(setup::ChangeDescription::Action(\n \"Create pg_vector extension (if not exists)\".to_string(),\n ));\n }\n if !self.actions.indexes_to_delete.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Delete indexes from table: {}\",\n self.actions.indexes_to_delete.iter().join(\", \"),\n )));\n }\n if !self.actions.indexes_to_create.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Create indexes in table: {}\",\n self.actions\n .indexes_to_create\n .iter()\n .map(|(index_name, index_spec)| describe_index_spec(index_name, index_spec))\n .join(\", \"),\n )));\n }\n descriptions\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n let has_other_update = !self.actions.indexes_to_create.is_empty()\n || !self.actions.indexes_to_delete.is_empty();\n self.actions.table_action.change_type(has_other_update)\n }\n}\n\nimpl SetupStatus {\n async fn apply_change(&self, db_pool: &PgPool, table_name: &str) -> Result<()> {\n if self.actions.table_action.drop_existing {\n sqlx::query(&format!(\"DROP TABLE IF EXISTS {table_name}\"))\n .execute(db_pool)\n .await?;\n }\n if self.create_pgvector_extension {\n sqlx::query(\"CREATE EXTENSION IF NOT EXISTS vector;\")\n .execute(db_pool)\n .await?;\n }\n for index_name in self.actions.indexes_to_delete.iter() {\n let sql = format!(\"DROP INDEX IF EXISTS {index_name}\");\n sqlx::query(&sql).execute(db_pool).await?;\n }\n if let Some(table_upsertion) = &self.actions.table_action.table_upsertion {\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n let mut fields = (keys\n .iter()\n .map(|(name, typ)| format!(\"\\\"{name}\\\" {typ} NOT NULL\")))\n .chain(values.iter().map(|(name, typ)| format!(\"\\\"{name}\\\" {typ}\")));\n let sql = format!(\n \"CREATE TABLE IF NOT EXISTS {table_name} ({}, PRIMARY KEY ({}))\",\n fields.join(\", \"),\n keys.keys().join(\", \")\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n for column_name in columns_to_delete.iter() {\n let sql = format!(\n \"ALTER TABLE {table_name} DROP COLUMN IF EXISTS \\\"{column_name}\\\"\",\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n for (column_name, column_type) in columns_to_upsert.iter() {\n let sql = format!(\n \"ALTER TABLE {table_name} DROP COLUMN IF EXISTS \\\"{column_name}\\\", ADD COLUMN \\\"{column_name}\\\" {column_type}\"\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n }\n }\n }\n for (index_name, index_spec) in self.actions.indexes_to_create.iter() {\n let sql = format!(\n \"CREATE INDEX IF NOT EXISTS {index_name} ON {table_name} {}\",\n to_index_spec_sql(index_spec)\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n Ok(())\n }\n}\n\nasync fn get_db_pool(\n db_ref: Option<&spec::AuthEntryReference>,\n auth_registry: &AuthRegistry,\n) -> Result {\n let lib_context = get_lib_context()?;\n let db_conn_spec = db_ref\n .as_ref()\n .map(|db_ref| auth_registry.get(db_ref))\n .transpose()?;\n let db_pool = match db_conn_spec {\n Some(db_conn_spec) => lib_context.db_pools.get_pool(&db_conn_spec).await?,\n None => lib_context.require_builtin_db_pool()?.clone(),\n };\n Ok(db_pool)\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = ();\n type SetupState = SetupState;\n type SetupStatus = SetupStatus;\n type Key = TableId;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Postgres\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n _declarations: Vec<()>,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(TableId, SetupState)>,\n )> {\n let data_coll_output = data_collections\n .into_iter()\n .map(|d| {\n let table_id = TableId {\n database: d.spec.database.clone(),\n table_name: d.spec.table_name.unwrap_or_else(|| {\n utils::db::sanitize_identifier(&format!(\n \"{}__{}\",\n context.flow_instance_name, d.name\n ))\n }),\n };\n let setup_state = SetupState::new(\n &table_id,\n &d.key_fields_schema,\n &d.value_fields_schema,\n &d.index_options,\n );\n let table_name = table_id.table_name.clone();\n let db_ref = d.spec.database;\n let auth_registry = context.auth_registry.clone();\n let export_context = Box::pin(async move {\n let db_pool = get_db_pool(db_ref.as_ref(), &auth_registry).await?;\n let export_context = Arc::new(ExportContext::new(\n db_ref,\n db_pool.clone(),\n table_name,\n d.key_fields_schema,\n d.value_fields_schema,\n )?);\n Ok(export_context)\n });\n Ok(TypedExportDataCollectionBuildOutput {\n setup_key: table_id,\n desired_setup_state: setup_state,\n export_context,\n })\n })\n .collect::>>()?;\n Ok((data_coll_output, vec![]))\n }\n\n async fn check_setup_status(\n &self,\n _key: TableId,\n desired: Option,\n existing: setup::CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n Ok(SetupStatus::new(desired, existing))\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(check_table_compatibility(\n &desired.columns,\n &existing.columns,\n ))\n }\n\n fn describe_resource(&self, key: &TableId) -> Result {\n Ok(format!(\"Postgres table {}\", key.table_name))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut mut_groups_by_db_ref = HashMap::new();\n for mutation in mutations.iter() {\n mut_groups_by_db_ref\n .entry(mutation.export_context.db_ref.clone())\n .or_insert_with(Vec::new)\n .push(mutation);\n }\n for mut_groups in mut_groups_by_db_ref.values() {\n let db_pool = &mut_groups\n .first()\n .ok_or_else(|| anyhow!(\"empty group\"))?\n .export_context\n .db_pool;\n let mut txn = db_pool.begin().await?;\n for mut_group in mut_groups.iter() {\n mut_group\n .export_context\n .upsert(&mut_group.mutation.upserts, &mut txn)\n .await?;\n }\n for mut_group in mut_groups.iter() {\n mut_group\n .export_context\n .delete(&mut_group.mutation.deletes, &mut txn)\n .await?;\n }\n txn.commit().await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n for change in changes.iter() {\n let db_pool = get_db_pool(change.key.database.as_ref(), &context.auth_registry).await?;\n change\n .setup_status\n .apply_change(&db_pool, &change.key.table_name)\n .await?;\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/execution/row_indexer.rs", "use crate::prelude::*;\n\nuse futures::future::try_join_all;\nuse sqlx::PgPool;\nuse std::collections::{HashMap, HashSet};\n\nuse super::db_tracking::{self, TrackedTargetKeyInfo, read_source_tracking_info_for_processing};\nuse super::db_tracking_setup;\nuse super::evaluator::{\n EvaluateSourceEntryOutput, SourceRowEvaluationContext, evaluate_source_entry,\n};\nuse super::memoization::{EvaluationMemory, EvaluationMemoryOptions, StoredMemoizationInfo};\nuse super::stats;\n\nuse crate::base::value::{self, FieldValues, KeyValue};\nuse crate::builder::plan::*;\nuse crate::ops::interface::{\n ExportTargetMutation, ExportTargetUpsertEntry, Ordinal, SourceExecutorGetOptions,\n};\nuse crate::utils::db::WriteAction;\nuse crate::utils::fingerprint::{Fingerprint, Fingerprinter};\n\npub fn extract_primary_key(\n primary_key_def: &AnalyzedPrimaryKeyDef,\n record: &FieldValues,\n) -> Result {\n match primary_key_def {\n AnalyzedPrimaryKeyDef::Fields(fields) => {\n KeyValue::from_values(fields.iter().map(|field| &record.fields[*field]))\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]\npub enum SourceVersionKind {\n #[default]\n UnknownLogic,\n DifferentLogic,\n CurrentLogic,\n NonExistence,\n}\n\n#[derive(Debug, Clone, Default)]\npub struct SourceVersion {\n pub ordinal: Ordinal,\n pub kind: SourceVersionKind,\n}\n\nimpl SourceVersion {\n pub fn from_stored(\n stored_ordinal: Option,\n stored_fp: &Option>,\n curr_fp: Fingerprint,\n ) -> Self {\n Self {\n ordinal: Ordinal(stored_ordinal),\n kind: match &stored_fp {\n Some(stored_fp) => {\n if stored_fp.as_slice() == curr_fp.0.as_slice() {\n SourceVersionKind::CurrentLogic\n } else {\n SourceVersionKind::DifferentLogic\n }\n }\n None => SourceVersionKind::UnknownLogic,\n },\n }\n }\n\n pub fn from_stored_processing_info(\n info: &db_tracking::SourceTrackingInfoForProcessing,\n curr_fp: Fingerprint,\n ) -> Self {\n Self::from_stored(\n info.processed_source_ordinal,\n &info.process_logic_fingerprint,\n curr_fp,\n )\n }\n\n pub fn from_stored_precommit_info(\n info: &db_tracking::SourceTrackingInfoForPrecommit,\n curr_fp: Fingerprint,\n ) -> Self {\n Self::from_stored(\n info.processed_source_ordinal,\n &info.process_logic_fingerprint,\n curr_fp,\n )\n }\n\n pub fn from_current_with_ordinal(ordinal: Ordinal) -> Self {\n Self {\n ordinal,\n kind: SourceVersionKind::CurrentLogic,\n }\n }\n\n pub fn from_current_data(data: &interface::SourceData) -> Self {\n let kind = match &data.value {\n interface::SourceValue::Existence(_) => SourceVersionKind::CurrentLogic,\n interface::SourceValue::NonExistence => SourceVersionKind::NonExistence,\n };\n Self {\n ordinal: data.ordinal,\n kind,\n }\n }\n\n pub fn should_skip(\n &self,\n target: &SourceVersion,\n update_stats: Option<&stats::UpdateStats>,\n ) -> bool {\n // Ordinal indicates monotonic invariance - always respect ordinal order\n // Never process older ordinals to maintain consistency\n let should_skip = match (self.ordinal.0, target.ordinal.0) {\n (Some(existing_ordinal), Some(target_ordinal)) => {\n // Skip if target ordinal is older, or same ordinal with same/older logic version\n existing_ordinal > target_ordinal\n || (existing_ordinal == target_ordinal && self.kind >= target.kind)\n }\n _ => false,\n };\n if should_skip {\n if let Some(update_stats) = update_stats {\n update_stats.num_no_change.inc(1);\n }\n }\n should_skip\n }\n}\n\npub enum SkippedOr {\n Normal(T),\n Skipped(SourceVersion),\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\nstruct TargetKeyPair {\n pub key: serde_json::Value,\n pub additional_key: serde_json::Value,\n}\n\n#[derive(Default)]\nstruct TrackingInfoForTarget<'a> {\n export_op: Option<&'a AnalyzedExportOp>,\n\n // Existing keys info. Keyed by target key.\n // Will be removed after new rows for the same key are added into `new_staging_keys_info` and `mutation.upserts`,\n // hence all remaining ones are to be deleted.\n existing_staging_keys_info: HashMap)>>,\n existing_keys_info: HashMap)>>,\n\n // New keys info for staging.\n new_staging_keys_info: Vec,\n\n // Mutation to apply to the target storage.\n mutation: ExportTargetMutation,\n}\n\n#[derive(Debug)]\nstruct PrecommitData<'a> {\n evaluate_output: &'a EvaluateSourceEntryOutput,\n memoization_info: &'a StoredMemoizationInfo,\n}\nstruct PrecommitMetadata {\n source_entry_exists: bool,\n process_ordinal: i64,\n existing_process_ordinal: Option,\n new_target_keys: db_tracking::TrackedTargetKeyForSource,\n}\nstruct PrecommitOutput {\n metadata: PrecommitMetadata,\n target_mutations: HashMap,\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn precommit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n logic_fp: Fingerprint,\n data: Option>,\n process_timestamp: &chrono::DateTime,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n export_ops: &[AnalyzedExportOp],\n export_ops_exec_ctx: &[exec_ctx::ExportOpExecutionContext],\n update_stats: &stats::UpdateStats,\n pool: &PgPool,\n) -> Result> {\n let mut txn = pool.begin().await?;\n\n let tracking_info = db_tracking::read_source_tracking_info_for_precommit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n if let Some(tracking_info) = &tracking_info {\n let existing_source_version =\n SourceVersion::from_stored_precommit_info(tracking_info, logic_fp);\n if existing_source_version.should_skip(source_version, Some(update_stats)) {\n return Ok(SkippedOr::Skipped(existing_source_version));\n }\n }\n let tracking_info_exists = tracking_info.is_some();\n let process_ordinal = (tracking_info\n .as_ref()\n .map(|info| info.max_process_ordinal)\n .unwrap_or(0)\n + 1)\n .max(process_timestamp.timestamp_millis());\n let existing_process_ordinal = tracking_info.as_ref().and_then(|info| info.process_ordinal);\n\n let mut tracking_info_for_targets = HashMap::::new();\n for (export_op, export_op_exec_ctx) in\n std::iter::zip(export_ops.iter(), export_ops_exec_ctx.iter())\n {\n tracking_info_for_targets\n .entry(export_op_exec_ctx.target_id)\n .or_default()\n .export_op = Some(export_op);\n }\n\n // Collect `tracking_info_for_targets` from existing tracking info.\n if let Some(info) = tracking_info {\n let sqlx::types::Json(staging_target_keys) = info.staging_target_keys;\n for (target_id, keys_info) in staging_target_keys.into_iter() {\n let target_info = tracking_info_for_targets.entry(target_id).or_default();\n for key_info in keys_info.into_iter() {\n target_info\n .existing_staging_keys_info\n .entry(TargetKeyPair {\n key: key_info.key,\n additional_key: key_info.additional_key,\n })\n .or_default()\n .push((key_info.process_ordinal, key_info.fingerprint));\n }\n }\n\n if let Some(sqlx::types::Json(target_keys)) = info.target_keys {\n for (target_id, keys_info) in target_keys.into_iter() {\n let target_info = tracking_info_for_targets.entry(target_id).or_default();\n for key_info in keys_info.into_iter() {\n target_info\n .existing_keys_info\n .entry(TargetKeyPair {\n key: key_info.key,\n additional_key: key_info.additional_key,\n })\n .or_default()\n .push((key_info.process_ordinal, key_info.fingerprint));\n }\n }\n }\n }\n\n let mut new_target_keys_info = db_tracking::TrackedTargetKeyForSource::default();\n if let Some(data) = &data {\n for (export_op, export_op_exec_ctx) in\n std::iter::zip(export_ops.iter(), export_ops_exec_ctx.iter())\n {\n let target_info = tracking_info_for_targets\n .entry(export_op_exec_ctx.target_id)\n .or_default();\n let mut keys_info = Vec::new();\n let collected_values =\n &data.evaluate_output.collected_values[export_op.input.collector_idx as usize];\n for value in collected_values.iter() {\n let primary_key = extract_primary_key(&export_op.primary_key_def, value)?;\n let primary_key_json = serde_json::to_value(&primary_key)?;\n\n let mut field_values = FieldValues {\n fields: Vec::with_capacity(export_op.value_fields.len()),\n };\n for field in export_op.value_fields.iter() {\n field_values\n .fields\n .push(value.fields[*field as usize].clone());\n }\n let additional_key = export_op.export_target_factory.extract_additional_key(\n &primary_key,\n &field_values,\n export_op.export_context.as_ref(),\n )?;\n let target_key_pair = TargetKeyPair {\n key: primary_key_json,\n additional_key,\n };\n let existing_target_keys = target_info.existing_keys_info.remove(&target_key_pair);\n let existing_staging_target_keys = target_info\n .existing_staging_keys_info\n .remove(&target_key_pair);\n\n let curr_fp = if !export_op.value_stable {\n Some(\n Fingerprinter::default()\n .with(&field_values)?\n .into_fingerprint(),\n )\n } else {\n None\n };\n if existing_target_keys\n .as_ref()\n .map(|keys| !keys.is_empty() && keys.iter().all(|(_, fp)| fp == &curr_fp))\n .unwrap_or(false)\n && existing_staging_target_keys\n .map(|keys| keys.iter().all(|(_, fp)| fp == &curr_fp))\n .unwrap_or(true)\n {\n // Already exists, with exactly the same value fingerprint.\n // Nothing need to be changed, except carrying over the existing target keys info.\n let (existing_ordinal, existing_fp) = existing_target_keys\n .ok_or_else(invariance_violation)?\n .into_iter()\n .next()\n .ok_or_else(invariance_violation)?;\n keys_info.push(TrackedTargetKeyInfo {\n key: target_key_pair.key,\n additional_key: target_key_pair.additional_key,\n process_ordinal: existing_ordinal,\n fingerprint: existing_fp,\n });\n } else {\n // Entry with new value. Needs to be upserted.\n let tracked_target_key = TrackedTargetKeyInfo {\n key: target_key_pair.key.clone(),\n additional_key: target_key_pair.additional_key.clone(),\n process_ordinal,\n fingerprint: curr_fp,\n };\n target_info.mutation.upserts.push(ExportTargetUpsertEntry {\n key: primary_key,\n additional_key: target_key_pair.additional_key,\n value: field_values,\n });\n target_info\n .new_staging_keys_info\n .push(tracked_target_key.clone());\n keys_info.push(tracked_target_key);\n }\n }\n new_target_keys_info.push((export_op_exec_ctx.target_id, keys_info));\n }\n }\n\n let mut new_staging_target_keys = db_tracking::TrackedTargetKeyForSource::default();\n let mut target_mutations = HashMap::with_capacity(export_ops.len());\n for (target_id, target_tracking_info) in tracking_info_for_targets.into_iter() {\n let legacy_keys: HashSet = target_tracking_info\n .existing_keys_info\n .into_keys()\n .chain(target_tracking_info.existing_staging_keys_info.into_keys())\n .collect();\n\n let mut new_staging_keys_info = target_tracking_info.new_staging_keys_info;\n // Add tracking info for deletions.\n new_staging_keys_info.extend(legacy_keys.iter().map(|key| TrackedTargetKeyInfo {\n key: key.key.clone(),\n additional_key: key.additional_key.clone(),\n process_ordinal,\n fingerprint: None,\n }));\n new_staging_target_keys.push((target_id, new_staging_keys_info));\n\n if let Some(export_op) = target_tracking_info.export_op {\n let mut mutation = target_tracking_info.mutation;\n mutation.deletes.reserve(legacy_keys.len());\n for legacy_key in legacy_keys.into_iter() {\n let key = value::Value::::from_json(\n legacy_key.key,\n &export_op.primary_key_type,\n )?\n .as_key()?;\n mutation.deletes.push(interface::ExportTargetDeleteEntry {\n key,\n additional_key: legacy_key.additional_key,\n });\n }\n target_mutations.insert(target_id, mutation);\n }\n }\n\n db_tracking::precommit_source_tracking_info(\n source_id,\n source_key_json,\n process_ordinal,\n new_staging_target_keys,\n data.as_ref().map(|data| data.memoization_info),\n db_setup,\n &mut *txn,\n if tracking_info_exists {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n )\n .await?;\n\n txn.commit().await?;\n\n Ok(SkippedOr::Normal(PrecommitOutput {\n metadata: PrecommitMetadata {\n source_entry_exists: data.is_some(),\n process_ordinal,\n existing_process_ordinal,\n new_target_keys: new_target_keys_info,\n },\n target_mutations,\n }))\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn commit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n logic_fingerprint: &[u8],\n precommit_metadata: PrecommitMetadata,\n process_timestamp: &chrono::DateTime,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n pool: &PgPool,\n) -> Result<()> {\n let mut txn = pool.begin().await?;\n\n let tracking_info = db_tracking::read_source_tracking_info_for_commit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n let tracking_info_exists = tracking_info.is_some();\n if tracking_info.as_ref().and_then(|info| info.process_ordinal)\n >= Some(precommit_metadata.process_ordinal)\n {\n return Ok(());\n }\n\n let cleaned_staging_target_keys = tracking_info\n .map(|info| {\n let sqlx::types::Json(staging_target_keys) = info.staging_target_keys;\n staging_target_keys\n .into_iter()\n .filter_map(|(target_id, target_keys)| {\n let cleaned_target_keys: Vec<_> = target_keys\n .into_iter()\n .filter(|key_info| {\n Some(key_info.process_ordinal)\n > precommit_metadata.existing_process_ordinal\n && key_info.process_ordinal != precommit_metadata.process_ordinal\n })\n .collect();\n if !cleaned_target_keys.is_empty() {\n Some((target_id, cleaned_target_keys))\n } else {\n None\n }\n })\n .collect::>()\n })\n .unwrap_or_default();\n if !precommit_metadata.source_entry_exists && cleaned_staging_target_keys.is_empty() {\n // TODO: When we support distributed execution in the future, we'll need to leave a tombstone for a while\n // to prevent an earlier update causing the record reappear because of out-of-order processing.\n if tracking_info_exists {\n db_tracking::delete_source_tracking_info(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n }\n } else {\n db_tracking::commit_source_tracking_info(\n source_id,\n source_key_json,\n cleaned_staging_target_keys,\n source_version.ordinal.into(),\n logic_fingerprint,\n precommit_metadata.process_ordinal,\n process_timestamp.timestamp_micros(),\n precommit_metadata.new_target_keys,\n db_setup,\n &mut *txn,\n if tracking_info_exists {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n )\n .await?;\n }\n\n txn.commit().await?;\n\n Ok(())\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn try_content_hash_optimization(\n source_id: i32,\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n current_hash: &crate::utils::fingerprint::Fingerprint,\n tracking_info: &db_tracking::SourceTrackingInfoForProcessing,\n existing_version: &Option,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n update_stats: &stats::UpdateStats,\n pool: &PgPool,\n) -> Result>> {\n // Check if we can use content hash optimization\n if existing_version\n .as_ref()\n .is_none_or(|v| v.kind != SourceVersionKind::CurrentLogic)\n {\n return Ok(None);\n }\n\n if tracking_info\n .max_process_ordinal\n .zip(tracking_info.process_ordinal)\n .is_none_or(|(max_ord, proc_ord)| max_ord != proc_ord)\n {\n return Ok(None);\n }\n\n let existing_hash = tracking_info\n .memoization_info\n .as_ref()\n .and_then(|info| info.0.as_ref())\n .and_then(|stored_info| stored_info.content_hash.as_ref());\n\n if existing_hash != Some(current_hash) {\n return Ok(None);\n }\n\n // Content hash matches - try optimization\n let mut txn = pool.begin().await?;\n\n let current_tracking_info = db_tracking::read_source_tracking_info_for_precommit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n\n let Some(current_tracking_info) = current_tracking_info else {\n return Ok(None);\n };\n\n // Check 1: Same check as precommit - verify no newer version exists\n let current_source_version = SourceVersion::from_stored_precommit_info(\n ¤t_tracking_info,\n src_eval_ctx.plan.logic_fingerprint,\n );\n if current_source_version.should_skip(source_version, Some(update_stats)) {\n return Ok(Some(SkippedOr::Skipped(current_source_version)));\n }\n\n // Check 2: Verify process_ordinal hasn't changed (no concurrent processing)\n let original_process_ordinal = tracking_info.process_ordinal;\n if current_tracking_info.process_ordinal != original_process_ordinal {\n return Ok(None);\n }\n\n // Safe to apply optimization - just update tracking table\n db_tracking::update_source_tracking_ordinal(\n source_id,\n source_key_json,\n source_version.ordinal.0,\n db_setup,\n &mut *txn,\n )\n .await?;\n\n txn.commit().await?;\n update_stats.num_no_change.inc(1);\n Ok(Some(SkippedOr::Normal(())))\n}\n\npub async fn evaluate_source_entry_with_memory(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n options: EvaluationMemoryOptions,\n pool: &PgPool,\n) -> Result> {\n let stored_info = if options.enable_cache || !options.evaluation_only {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let source_id = setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id;\n let existing_tracking_info = read_source_tracking_info_for_processing(\n source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n existing_tracking_info\n .and_then(|info| info.memoization_info.map(|info| info.0))\n .flatten()\n } else {\n None\n };\n let memory = EvaluationMemory::new(chrono::Utc::now(), stored_info, options);\n let source_value = src_eval_ctx\n .import_op\n .executor\n .get_value(\n src_eval_ctx.key,\n &SourceExecutorGetOptions {\n include_value: true,\n include_ordinal: false,\n },\n )\n .await?\n .value\n .ok_or_else(|| anyhow::anyhow!(\"value not returned\"))?;\n let output = match source_value {\n interface::SourceValue::Existence(source_value) => {\n Some(evaluate_source_entry(src_eval_ctx, source_value, &memory).await?)\n }\n interface::SourceValue::NonExistence => None,\n };\n Ok(output)\n}\n\npub async fn update_source_row(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n source_value: interface::SourceValue,\n source_version: &SourceVersion,\n pool: &PgPool,\n update_stats: &stats::UpdateStats,\n) -> Result> {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let process_time = chrono::Utc::now();\n let source_id = setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id;\n\n // Phase 1: Check existing tracking info and apply optimizations\n let existing_tracking_info = read_source_tracking_info_for_processing(\n source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n\n let existing_version = match &existing_tracking_info {\n Some(info) => {\n let existing_version = SourceVersion::from_stored_processing_info(\n info,\n src_eval_ctx.plan.logic_fingerprint,\n );\n\n // First check ordinal-based skipping\n if existing_version.should_skip(source_version, Some(update_stats)) {\n return Ok(SkippedOr::Skipped(existing_version));\n }\n\n Some(existing_version)\n }\n None => None,\n };\n\n // Compute content hash once if needed for both optimization and evaluation\n let current_content_hash = match &source_value {\n interface::SourceValue::Existence(source_value) => Some(\n Fingerprinter::default()\n .with(source_value)?\n .into_fingerprint(),\n ),\n interface::SourceValue::NonExistence => None,\n };\n\n if let (Some(current_hash), Some(existing_tracking_info)) =\n (¤t_content_hash, &existing_tracking_info)\n {\n if let Some(optimization_result) = try_content_hash_optimization(\n source_id,\n src_eval_ctx,\n &source_key_json,\n source_version,\n current_hash,\n existing_tracking_info,\n &existing_version,\n &setup_execution_ctx.setup_state.tracking_table,\n update_stats,\n pool,\n )\n .await?\n {\n return Ok(optimization_result);\n }\n }\n\n let (output, stored_mem_info) = {\n let extracted_memoization_info = existing_tracking_info\n .and_then(|info| info.memoization_info)\n .and_then(|info| info.0);\n\n match source_value {\n interface::SourceValue::Existence(source_value) => {\n let evaluation_memory = EvaluationMemory::new(\n process_time,\n extracted_memoization_info,\n EvaluationMemoryOptions {\n enable_cache: true,\n evaluation_only: false,\n },\n );\n\n let output =\n evaluate_source_entry(src_eval_ctx, source_value, &evaluation_memory).await?;\n let mut stored_info = evaluation_memory.into_stored()?;\n stored_info.content_hash = current_content_hash;\n\n (Some(output), stored_info)\n }\n interface::SourceValue::NonExistence => (None, Default::default()),\n }\n };\n\n // Phase 2 (precommit): Update with the memoization info and stage target keys.\n let precommit_output = precommit_source_tracking_info(\n source_id,\n &source_key_json,\n source_version,\n src_eval_ctx.plan.logic_fingerprint,\n output.as_ref().map(|scope_value| PrecommitData {\n evaluate_output: scope_value,\n memoization_info: &stored_mem_info,\n }),\n &process_time,\n &setup_execution_ctx.setup_state.tracking_table,\n &src_eval_ctx.plan.export_ops,\n &setup_execution_ctx.export_ops,\n update_stats,\n pool,\n )\n .await?;\n let precommit_output = match precommit_output {\n SkippedOr::Normal(output) => output,\n SkippedOr::Skipped(source_version) => return Ok(SkippedOr::Skipped(source_version)),\n };\n\n // Phase 3: Apply changes to the target storage, including upserting new target records and removing existing ones.\n let mut target_mutations = precommit_output.target_mutations;\n let apply_futs = src_eval_ctx\n .plan\n .export_op_groups\n .iter()\n .filter_map(|export_op_group| {\n let mutations_w_ctx: Vec<_> = export_op_group\n .op_idx\n .iter()\n .filter_map(|export_op_idx| {\n let export_op = &src_eval_ctx.plan.export_ops[*export_op_idx];\n target_mutations\n .remove(&setup_execution_ctx.export_ops[*export_op_idx].target_id)\n .filter(|m| !m.is_empty())\n .map(|mutation| interface::ExportTargetMutationWithContext {\n mutation,\n export_context: export_op.export_context.as_ref(),\n })\n })\n .collect();\n (!mutations_w_ctx.is_empty()).then(|| {\n export_op_group\n .target_factory\n .apply_mutation(mutations_w_ctx)\n })\n });\n\n // TODO: Handle errors.\n try_join_all(apply_futs).await?;\n\n // Phase 4: Update the tracking record.\n commit_source_tracking_info(\n source_id,\n &source_key_json,\n source_version,\n &src_eval_ctx.plan.logic_fingerprint.0,\n precommit_output.metadata,\n &process_time,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n\n if let Some(existing_version) = existing_version {\n if output.is_some() {\n if !source_version.ordinal.is_available()\n || source_version.ordinal != existing_version.ordinal\n {\n update_stats.num_updates.inc(1);\n } else {\n update_stats.num_reprocesses.inc(1);\n }\n } else {\n update_stats.num_deletions.inc(1);\n }\n } else if output.is_some() {\n update_stats.num_insertions.inc(1);\n }\n\n Ok(SkippedOr::Normal(()))\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_github_actions_scenario_ordinal_behavior() {\n // Test ordinal-based behavior - should_skip only cares about ordinal monotonic invariance\n // Content hash optimization is handled at update_source_row level\n\n let processed_version = SourceVersion {\n ordinal: Ordinal(Some(1000)), // Original timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // GitHub Actions checkout: timestamp changes but content same\n let after_checkout_version = SourceVersion {\n ordinal: Ordinal(Some(2000)), // New timestamp after checkout\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Should NOT skip at should_skip level (ordinal is newer - monotonic invariance)\n // Content hash optimization happens at update_source_row level to update only tracking\n assert!(!processed_version.should_skip(&after_checkout_version, None));\n\n // Reverse case: if we somehow get an older ordinal, always skip\n assert!(after_checkout_version.should_skip(&processed_version, None));\n\n // Now simulate actual content change\n let content_changed_version = SourceVersion {\n ordinal: Ordinal(Some(3000)), // Even newer timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Should NOT skip processing (ordinal is newer)\n assert!(!processed_version.should_skip(&content_changed_version, None));\n }\n\n #[test]\n fn test_content_hash_computation() {\n use crate::base::value::{BasicValue, FieldValues, Value};\n use crate::utils::fingerprint::Fingerprinter;\n\n // Test that content hash is computed correctly from source data\n let source_data1 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"Hello\".into())),\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let source_data2 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"Hello\".into())),\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let source_data3 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"World\".into())), // Different content\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let hash1 = Fingerprinter::default()\n .with(&source_data1)\n .unwrap()\n .into_fingerprint();\n\n let hash2 = Fingerprinter::default()\n .with(&source_data2)\n .unwrap()\n .into_fingerprint();\n\n let hash3 = Fingerprinter::default()\n .with(&source_data3)\n .unwrap()\n .into_fingerprint();\n\n // Same content should produce same hash\n assert_eq!(hash1, hash2);\n\n // Different content should produce different hash\n assert_ne!(hash1, hash3);\n assert_ne!(hash2, hash3);\n }\n\n #[test]\n fn test_github_actions_content_hash_optimization_requirements() {\n // This test documents the exact requirements for GitHub Actions scenario\n // where file modification times change but content remains the same\n\n use crate::utils::fingerprint::Fingerprinter;\n\n // Simulate file content that remains the same across GitHub Actions checkout\n let file_content = \"const hello = 'world';\\nexport default hello;\";\n\n // Hash before checkout (original file)\n let hash_before_checkout = Fingerprinter::default()\n .with(&file_content)\n .unwrap()\n .into_fingerprint();\n\n // Hash after checkout (same content, different timestamp)\n let hash_after_checkout = Fingerprinter::default()\n .with(&file_content)\n .unwrap()\n .into_fingerprint();\n\n // Content hashes must be identical for optimization to work\n assert_eq!(\n hash_before_checkout, hash_after_checkout,\n \"Content hash optimization requires identical hashes for same content\"\n );\n\n // Test with slightly different content (should produce different hashes)\n let modified_content = \"const hello = 'world!';\\nexport default hello;\"; // Added !\n let hash_modified = Fingerprinter::default()\n .with(&modified_content)\n .unwrap()\n .into_fingerprint();\n\n assert_ne!(\n hash_before_checkout, hash_modified,\n \"Different content should produce different hashes\"\n );\n }\n\n #[test]\n fn test_github_actions_ordinal_behavior_with_content_optimization() {\n // Test the complete GitHub Actions scenario:\n // 1. File processed with ordinal=1000, content_hash=ABC\n // 2. GitHub Actions checkout: ordinal=2000, content_hash=ABC (same content)\n // 3. Should use content hash optimization (update only tracking, skip evaluation)\n\n let original_processing = SourceVersion {\n ordinal: Ordinal(Some(1000)), // Original file timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n let after_github_checkout = SourceVersion {\n ordinal: Ordinal(Some(2000)), // New timestamp after checkout\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Step 1: Ordinal check should NOT skip (newer ordinal means potential processing needed)\n assert!(\n !original_processing.should_skip(&after_github_checkout, None),\n \"GitHub Actions: newer ordinal should not be skipped at ordinal level\"\n );\n\n // Step 2: Content hash optimization should trigger when content is same\n // This is tested in the integration level - the optimization path should:\n // - Compare content hashes\n // - If same: update only tracking info (process_ordinal, process_time)\n // - Skip expensive evaluation and target storage updates\n\n // Step 3: After optimization, tracking shows the new ordinal\n let after_optimization = SourceVersion {\n ordinal: Ordinal(Some(2000)), // Updated to new ordinal\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Future requests with same ordinal should be skipped\n assert!(\n after_optimization.should_skip(&after_github_checkout, None),\n \"After optimization, same ordinal should be skipped\"\n );\n }\n}\n"], ["/cocoindex/src/builder/analyzer.rs", "use crate::builder::exec_ctx::AnalyzedSetupState;\nuse crate::ops::get_executor_factory;\nuse crate::prelude::*;\n\nuse super::plan::*;\nuse crate::lib_context::get_auth_registry;\nuse crate::utils::fingerprint::Fingerprinter;\nuse crate::{\n base::{schema::*, spec::*},\n ops::interface::*,\n};\nuse futures::future::{BoxFuture, try_join3};\nuse futures::{FutureExt, future::try_join_all};\n\n#[derive(Debug)]\npub(super) enum ValueTypeBuilder {\n Basic(BasicValueType),\n Struct(StructSchemaBuilder),\n Table(TableSchemaBuilder),\n}\n\nimpl TryFrom<&ValueType> for ValueTypeBuilder {\n type Error = anyhow::Error;\n\n fn try_from(value_type: &ValueType) -> Result {\n match value_type {\n ValueType::Basic(basic_type) => Ok(ValueTypeBuilder::Basic(basic_type.clone())),\n ValueType::Struct(struct_type) => Ok(ValueTypeBuilder::Struct(struct_type.try_into()?)),\n ValueType::Table(table_type) => Ok(ValueTypeBuilder::Table(table_type.try_into()?)),\n }\n }\n}\n\nimpl TryInto for &ValueTypeBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n match self {\n ValueTypeBuilder::Basic(basic_type) => Ok(ValueType::Basic(basic_type.clone())),\n ValueTypeBuilder::Struct(struct_type) => Ok(ValueType::Struct(struct_type.try_into()?)),\n ValueTypeBuilder::Table(table_type) => Ok(ValueType::Table(table_type.try_into()?)),\n }\n }\n}\n\n#[derive(Default, Debug)]\npub(super) struct StructSchemaBuilder {\n fields: Vec>,\n field_name_idx: HashMap,\n description: Option>,\n}\n\nimpl StructSchemaBuilder {\n fn add_field(&mut self, field: FieldSchema) -> Result {\n let field_idx = self.fields.len() as u32;\n match self.field_name_idx.entry(field.name.clone()) {\n std::collections::hash_map::Entry::Occupied(_) => {\n bail!(\"Field name already exists: {}\", field.name);\n }\n std::collections::hash_map::Entry::Vacant(entry) => {\n entry.insert(field_idx);\n }\n }\n self.fields.push(field);\n Ok(field_idx)\n }\n\n pub fn find_field(&self, field_name: &'_ str) -> Option<(u32, &FieldSchema)> {\n self.field_name_idx\n .get(field_name)\n .map(|&field_idx| (field_idx, &self.fields[field_idx as usize]))\n }\n}\n\nimpl TryFrom<&StructSchema> for StructSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_from(schema: &StructSchema) -> Result {\n let mut result = StructSchemaBuilder {\n fields: Vec::with_capacity(schema.fields.len()),\n field_name_idx: HashMap::with_capacity(schema.fields.len()),\n description: schema.description.clone(),\n };\n for field in schema.fields.iter() {\n result.add_field(FieldSchema::::from_alternative(field)?)?;\n }\n Ok(result)\n }\n}\n\nimpl TryInto for &StructSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n Ok(StructSchema {\n fields: Arc::new(\n self.fields\n .iter()\n .map(FieldSchema::::from_alternative)\n .collect::>>()?,\n ),\n description: self.description.clone(),\n })\n }\n}\n\n#[derive(Debug)]\npub(super) struct TableSchemaBuilder {\n pub kind: TableKind,\n pub sub_scope: Arc>,\n}\n\nimpl TryFrom<&TableSchema> for TableSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_from(schema: &TableSchema) -> Result {\n Ok(Self {\n kind: schema.kind,\n sub_scope: Arc::new(Mutex::new(DataScopeBuilder {\n data: (&schema.row).try_into()?,\n })),\n })\n }\n}\n\nimpl TryInto for &TableSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n let sub_scope = self.sub_scope.lock().unwrap();\n let row = (&sub_scope.data).try_into()?;\n Ok(TableSchema {\n kind: self.kind,\n row,\n })\n }\n}\n\nfn try_make_common_value_type(\n value_type1: &EnrichedValueType,\n value_type2: &EnrichedValueType,\n) -> Result {\n let typ = match (&value_type1.typ, &value_type2.typ) {\n (ValueType::Basic(basic_type1), ValueType::Basic(basic_type2)) => {\n if basic_type1 != basic_type2 {\n api_bail!(\"Value types are not compatible: {basic_type1} vs {basic_type2}\");\n }\n ValueType::Basic(basic_type1.clone())\n }\n (ValueType::Struct(struct_type1), ValueType::Struct(struct_type2)) => {\n let common_schema = try_merge_struct_schemas(struct_type1, struct_type2)?;\n ValueType::Struct(common_schema)\n }\n (ValueType::Table(table_type1), ValueType::Table(table_type2)) => {\n if table_type1.kind != table_type2.kind {\n api_bail!(\n \"Collection types are not compatible: {} vs {}\",\n table_type1,\n table_type2\n );\n }\n let row = try_merge_struct_schemas(&table_type1.row, &table_type2.row)?;\n ValueType::Table(TableSchema {\n kind: table_type1.kind,\n row,\n })\n }\n (t1 @ (ValueType::Basic(_) | ValueType::Struct(_) | ValueType::Table(_)), t2) => {\n api_bail!(\"Unmatched types:\\n {t1}\\n {t2}\\n\",)\n }\n };\n let common_attrs: Vec<_> = value_type1\n .attrs\n .iter()\n .filter_map(|(k, v)| {\n if value_type2.attrs.get(k) == Some(v) {\n Some((k, v))\n } else {\n None\n }\n })\n .collect();\n let attrs = if common_attrs.len() == value_type1.attrs.len() {\n value_type1.attrs.clone()\n } else {\n Arc::new(\n common_attrs\n .into_iter()\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect(),\n )\n };\n\n Ok(EnrichedValueType {\n typ,\n nullable: value_type1.nullable || value_type2.nullable,\n attrs,\n })\n}\n\nfn try_merge_fields_schemas(\n schema1: &[FieldSchema],\n schema2: &[FieldSchema],\n) -> Result> {\n if schema1.len() != schema2.len() {\n api_bail!(\n \"Fields are not compatible as they have different fields count:\\n ({})\\n ({})\\n\",\n schema1\n .iter()\n .map(|f| f.to_string())\n .collect::>()\n .join(\", \"),\n schema2\n .iter()\n .map(|f| f.to_string())\n .collect::>()\n .join(\", \")\n );\n }\n let mut result_fields = Vec::with_capacity(schema1.len());\n for (field1, field2) in schema1.iter().zip(schema2.iter()) {\n if field1.name != field2.name {\n api_bail!(\n \"Structs are not compatible as they have incompatible field names `{}` vs `{}`\",\n field1.name,\n field2.name\n );\n }\n result_fields.push(FieldSchema {\n name: field1.name.clone(),\n value_type: try_make_common_value_type(&field1.value_type, &field2.value_type)?,\n });\n }\n Ok(result_fields)\n}\n\nfn try_merge_struct_schemas(\n schema1: &StructSchema,\n schema2: &StructSchema,\n) -> Result {\n let fields = try_merge_fields_schemas(&schema1.fields, &schema2.fields)?;\n Ok(StructSchema {\n fields: Arc::new(fields),\n description: schema1\n .description\n .clone()\n .or_else(|| schema2.description.clone()),\n })\n}\n\nfn try_merge_collector_schemas(\n schema1: &CollectorSchema,\n schema2: &CollectorSchema,\n) -> Result {\n let fields = try_merge_fields_schemas(&schema1.fields, &schema2.fields)?;\n Ok(CollectorSchema {\n fields,\n auto_uuid_field_idx: if schema1.auto_uuid_field_idx == schema2.auto_uuid_field_idx {\n schema1.auto_uuid_field_idx\n } else {\n None\n },\n })\n}\n\n#[derive(Debug)]\npub(super) struct CollectorBuilder {\n pub schema: Arc,\n pub is_used: bool,\n}\n\nimpl CollectorBuilder {\n pub fn new(schema: Arc) -> Self {\n Self {\n schema,\n is_used: false,\n }\n }\n\n pub fn merge_schema(&mut self, schema: &CollectorSchema) -> Result<()> {\n if self.is_used {\n api_bail!(\"Collector is already used\");\n }\n let existing_schema = Arc::make_mut(&mut self.schema);\n *existing_schema = try_merge_collector_schemas(existing_schema, schema)?;\n Ok(())\n }\n\n pub fn use_schema(&mut self) -> Arc {\n self.is_used = true;\n self.schema.clone()\n }\n}\n\n#[derive(Debug)]\npub(super) struct DataScopeBuilder {\n pub data: StructSchemaBuilder,\n}\n\nimpl DataScopeBuilder {\n pub fn new() -> Self {\n Self {\n data: Default::default(),\n }\n }\n\n pub fn last_field(&self) -> Option<&FieldSchema> {\n self.data.fields.last()\n }\n\n pub fn add_field(\n &mut self,\n name: FieldName,\n value_type: &EnrichedValueType,\n ) -> Result {\n let field_index = self.data.add_field(FieldSchema {\n name,\n value_type: EnrichedValueType::from_alternative(value_type)?,\n })?;\n Ok(AnalyzedOpOutput {\n field_idx: field_index,\n })\n }\n\n pub fn analyze_field_path<'a>(\n &'a self,\n field_path: &'_ FieldPath,\n ) -> Result<(\n AnalyzedLocalFieldReference,\n &'a EnrichedValueType,\n )> {\n let mut indices = Vec::with_capacity(field_path.len());\n let mut struct_schema = &self.data;\n\n let mut i = 0;\n let value_type = loop {\n let field_name = &field_path[i];\n let (field_idx, field) = struct_schema.find_field(field_name).ok_or_else(|| {\n api_error!(\"Field {} not found\", field_path[0..(i + 1)].join(\".\"))\n })?;\n indices.push(field_idx);\n if i + 1 >= field_path.len() {\n break &field.value_type;\n }\n i += 1;\n\n struct_schema = match &field.value_type.typ {\n ValueTypeBuilder::Struct(struct_type) => struct_type,\n _ => {\n api_bail!(\"Field {} is not a struct\", field_path[0..(i + 1)].join(\".\"));\n }\n };\n };\n Ok((\n AnalyzedLocalFieldReference {\n fields_idx: indices,\n },\n value_type,\n ))\n }\n}\n\npub(super) struct AnalyzerContext {\n pub lib_ctx: Arc,\n pub flow_ctx: Arc,\n}\n\n#[derive(Debug, Default)]\npub(super) struct OpScopeStates {\n pub op_output_types: HashMap,\n pub collectors: IndexMap,\n pub sub_scopes: HashMap>,\n}\n\nimpl OpScopeStates {\n pub fn add_collector(\n &mut self,\n collector_name: FieldName,\n schema: CollectorSchema,\n ) -> Result {\n let existing_len = self.collectors.len();\n let idx = match self.collectors.entry(collector_name) {\n indexmap::map::Entry::Occupied(mut entry) => {\n entry.get_mut().merge_schema(&schema)?;\n entry.index()\n }\n indexmap::map::Entry::Vacant(entry) => {\n entry.insert(CollectorBuilder::new(Arc::new(schema)));\n existing_len\n }\n };\n Ok(AnalyzedLocalCollectorReference {\n collector_idx: idx as u32,\n })\n }\n\n pub fn consume_collector(\n &mut self,\n collector_name: &FieldName,\n ) -> Result<(AnalyzedLocalCollectorReference, Arc)> {\n let (collector_idx, _, collector) = self\n .collectors\n .get_full_mut(collector_name)\n .ok_or_else(|| api_error!(\"Collector not found: {}\", collector_name))?;\n Ok((\n AnalyzedLocalCollectorReference {\n collector_idx: collector_idx as u32,\n },\n collector.use_schema(),\n ))\n }\n\n fn build_op_scope_schema(&self) -> OpScopeSchema {\n OpScopeSchema {\n op_output_types: self\n .op_output_types\n .iter()\n .map(|(name, value_type)| (name.clone(), value_type.without_attrs()))\n .collect(),\n collectors: self\n .collectors\n .iter()\n .map(|(name, schema)| NamedSpec {\n name: name.clone(),\n spec: schema.schema.clone(),\n })\n .collect(),\n op_scopes: self.sub_scopes.clone(),\n }\n }\n}\n\n#[derive(Debug)]\npub struct OpScope {\n pub name: String,\n pub parent: Option<(Arc, spec::FieldPath)>,\n pub(super) data: Arc>,\n pub(super) states: Mutex,\n}\n\nstruct Iter<'a>(Option<&'a OpScope>);\n\nimpl<'a> Iterator for Iter<'a> {\n type Item = &'a OpScope;\n\n fn next(&mut self) -> Option {\n match self.0 {\n Some(scope) => {\n self.0 = scope.parent.as_ref().map(|(parent, _)| parent.as_ref());\n Some(scope)\n }\n None => None,\n }\n }\n}\n\nimpl OpScope {\n pub(super) fn new(\n name: String,\n parent: Option<(Arc, spec::FieldPath)>,\n data: Arc>,\n ) -> Arc {\n Arc::new(Self {\n name,\n parent,\n data,\n states: Mutex::default(),\n })\n }\n\n fn add_op_output(\n &self,\n name: FieldName,\n value_type: EnrichedValueType,\n ) -> Result {\n let op_output = self\n .data\n .lock()\n .unwrap()\n .add_field(name.clone(), &value_type)?;\n self.states\n .lock()\n .unwrap()\n .op_output_types\n .insert(name, value_type);\n Ok(op_output)\n }\n\n pub fn ancestors(&self) -> impl Iterator {\n Iter(Some(self))\n }\n\n pub fn is_op_scope_descendant(&self, other: &Self) -> bool {\n if self == other {\n return true;\n }\n match &self.parent {\n Some((parent, _)) => parent.is_op_scope_descendant(other),\n None => false,\n }\n }\n\n pub(super) fn new_foreach_op_scope(\n self: &Arc,\n scope_name: String,\n field_path: &FieldPath,\n ) -> Result<(AnalyzedLocalFieldReference, Arc)> {\n let (local_field_ref, sub_data_scope) = {\n let data_scope = self.data.lock().unwrap();\n let (local_field_ref, value_type) = data_scope.analyze_field_path(field_path)?;\n let sub_data_scope = match &value_type.typ {\n ValueTypeBuilder::Table(table_type) => table_type.sub_scope.clone(),\n _ => api_bail!(\"ForEach only works on collection, field {field_path} is not\"),\n };\n (local_field_ref, sub_data_scope)\n };\n let sub_op_scope = OpScope::new(\n scope_name,\n Some((self.clone(), field_path.clone())),\n sub_data_scope,\n );\n Ok((local_field_ref, sub_op_scope))\n }\n}\n\nimpl std::fmt::Display for OpScope {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n if let Some((scope, field_path)) = &self.parent {\n write!(f, \"{} [{} AS {}]\", scope, field_path, self.name)?;\n } else {\n write!(f, \"[{}]\", self.name)?;\n }\n Ok(())\n }\n}\n\nimpl PartialEq for OpScope {\n fn eq(&self, other: &Self) -> bool {\n std::ptr::eq(self, other)\n }\n}\nimpl Eq for OpScope {}\n\nfn find_scope<'a>(scope_name: &ScopeName, op_scope: &'a OpScope) -> Result<(u32, &'a OpScope)> {\n let (up_level, scope) = op_scope\n .ancestors()\n .enumerate()\n .find(|(_, s)| &s.name == scope_name)\n .ok_or_else(|| api_error!(\"Scope not found: {}\", scope_name))?;\n Ok((up_level as u32, scope))\n}\n\nfn analyze_struct_mapping(\n mapping: &StructMapping,\n op_scope: &OpScope,\n) -> Result<(AnalyzedStructMapping, Vec)> {\n let mut field_mappings = Vec::with_capacity(mapping.fields.len());\n let mut field_schemas = Vec::with_capacity(mapping.fields.len());\n for field in mapping.fields.iter() {\n let (field_mapping, value_type) = analyze_value_mapping(&field.spec, op_scope)?;\n field_mappings.push(field_mapping);\n field_schemas.push(FieldSchema {\n name: field.name.clone(),\n value_type,\n });\n }\n Ok((\n AnalyzedStructMapping {\n fields: field_mappings,\n },\n field_schemas,\n ))\n}\n\nfn analyze_value_mapping(\n value_mapping: &ValueMapping,\n op_scope: &OpScope,\n) -> Result<(AnalyzedValueMapping, EnrichedValueType)> {\n let result = match value_mapping {\n ValueMapping::Constant(v) => {\n let value = value::Value::from_json(v.value.clone(), &v.schema.typ)?;\n (AnalyzedValueMapping::Constant { value }, v.schema.clone())\n }\n\n ValueMapping::Field(v) => {\n let (scope_up_level, op_scope) = match &v.scope {\n Some(scope_name) => find_scope(scope_name, op_scope)?,\n None => (0, op_scope),\n };\n let data_scope = op_scope.data.lock().unwrap();\n let (local_field_ref, value_type) = data_scope.analyze_field_path(&v.field_path)?;\n (\n AnalyzedValueMapping::Field(AnalyzedFieldReference {\n local: local_field_ref,\n scope_up_level,\n }),\n EnrichedValueType::from_alternative(value_type)?,\n )\n }\n\n ValueMapping::Struct(v) => {\n let (struct_mapping, field_schemas) = analyze_struct_mapping(v, op_scope)?;\n (\n AnalyzedValueMapping::Struct(struct_mapping),\n EnrichedValueType {\n typ: ValueType::Struct(StructSchema {\n fields: Arc::new(field_schemas),\n description: None,\n }),\n nullable: false,\n attrs: Default::default(),\n },\n )\n }\n };\n Ok(result)\n}\n\nfn analyze_input_fields(\n arg_bindings: &[OpArgBinding],\n op_scope: &OpScope,\n) -> Result> {\n let mut input_field_schemas = Vec::with_capacity(arg_bindings.len());\n for arg_binding in arg_bindings.iter() {\n let (analyzed_value, value_type) = analyze_value_mapping(&arg_binding.value, op_scope)?;\n input_field_schemas.push(OpArgSchema {\n name: arg_binding.arg_name.clone(),\n value_type,\n analyzed_value: analyzed_value.clone(),\n });\n }\n Ok(input_field_schemas)\n}\n\nfn add_collector(\n scope_name: &ScopeName,\n collector_name: FieldName,\n schema: CollectorSchema,\n op_scope: &OpScope,\n) -> Result {\n let (scope_up_level, scope) = find_scope(scope_name, op_scope)?;\n let local_ref = scope\n .states\n .lock()\n .unwrap()\n .add_collector(collector_name, schema)?;\n Ok(AnalyzedCollectorReference {\n local: local_ref,\n scope_up_level,\n })\n}\n\nstruct ExportDataFieldsInfo {\n local_collector_ref: AnalyzedLocalCollectorReference,\n primary_key_def: AnalyzedPrimaryKeyDef,\n primary_key_type: ValueType,\n value_fields_idx: Vec,\n value_stable: bool,\n}\n\nimpl AnalyzerContext {\n pub(super) async fn analyze_import_op(\n &self,\n op_scope: &Arc,\n import_op: NamedSpec,\n ) -> Result> + Send + use<>> {\n let source_factory = match get_executor_factory(&import_op.spec.source.kind)? {\n ExecutorFactory::Source(source_executor) => source_executor,\n _ => {\n return Err(anyhow::anyhow!(\n \"`{}` is not a source op\",\n import_op.spec.source.kind\n ));\n }\n };\n let (output_type, executor) = source_factory\n .build(\n serde_json::Value::Object(import_op.spec.source.spec),\n self.flow_ctx.clone(),\n )\n .await?;\n\n let op_name = import_op.name.clone();\n let primary_key_type = output_type\n .typ\n .key_type()\n .ok_or_else(|| api_error!(\"Source must produce a type with key: {op_name}\"))?\n .typ\n .clone();\n let output = op_scope.add_op_output(import_op.name, output_type)?;\n\n let concur_control_options = import_op\n .spec\n .execution_options\n .get_concur_control_options();\n let global_concurrency_controller = self.lib_ctx.global_concurrency_controller.clone();\n let result_fut = async move {\n trace!(\"Start building executor for source op `{op_name}`\");\n let executor = executor.await?;\n trace!(\"Finished building executor for source op `{op_name}`\");\n Ok(AnalyzedImportOp {\n executor,\n output,\n primary_key_type,\n name: op_name,\n refresh_options: import_op.spec.refresh_options,\n concurrency_controller: concur_control::CombinedConcurrencyController::new(\n &concur_control_options,\n global_concurrency_controller,\n ),\n })\n };\n Ok(result_fut)\n }\n\n pub(super) async fn analyze_reactive_op(\n &self,\n op_scope: &Arc,\n reactive_op: &NamedSpec,\n ) -> Result>> {\n let result_fut = match &reactive_op.spec {\n ReactiveOpSpec::Transform(op) => {\n let input_field_schemas =\n analyze_input_fields(&op.inputs, op_scope).with_context(|| {\n format!(\n \"Failed to analyze inputs for transform op: {}\",\n reactive_op.name\n )\n })?;\n let spec = serde_json::Value::Object(op.op.spec.clone());\n\n match get_executor_factory(&op.op.kind)? {\n ExecutorFactory::SimpleFunction(fn_executor) => {\n let input_value_mappings = input_field_schemas\n .iter()\n .map(|field| field.analyzed_value.clone())\n .collect();\n let (output_enriched_type, executor) = fn_executor\n .build(spec, input_field_schemas, self.flow_ctx.clone())\n .await?;\n let logic_fingerprinter = Fingerprinter::default()\n .with(&op.op)?\n .with(&output_enriched_type.without_attrs())?;\n let output_type = output_enriched_type.typ.clone();\n let output = op_scope\n .add_op_output(reactive_op.name.clone(), output_enriched_type)?;\n let op_name = reactive_op.name.clone();\n async move {\n trace!(\"Start building executor for transform op `{op_name}`\");\n let executor = executor.await.with_context(|| {\n format!(\"Failed to build executor for transform op: {op_name}\")\n })?;\n let enable_cache = executor.enable_cache();\n let behavior_version = executor.behavior_version();\n trace!(\"Finished building executor for transform op `{op_name}`, enable cache: {enable_cache}, behavior version: {behavior_version:?}\");\n let function_exec_info = AnalyzedFunctionExecInfo {\n enable_cache,\n behavior_version,\n fingerprinter: logic_fingerprinter\n .with(&behavior_version)?,\n output_type\n };\n if function_exec_info.enable_cache\n && function_exec_info.behavior_version.is_none()\n {\n api_bail!(\n \"When caching is enabled, behavior version must be specified for transform op: {op_name}\"\n );\n }\n Ok(AnalyzedReactiveOp::Transform(AnalyzedTransformOp {\n name: op_name,\n inputs: input_value_mappings,\n function_exec_info,\n executor,\n output,\n }))\n }\n .boxed()\n }\n _ => api_bail!(\"`{}` is not a function op\", op.op.kind),\n }\n }\n\n ReactiveOpSpec::ForEach(foreach_op) => {\n let (local_field_ref, sub_op_scope) = op_scope.new_foreach_op_scope(\n foreach_op.op_scope.name.clone(),\n &foreach_op.field_path,\n )?;\n let analyzed_op_scope_fut = {\n let analyzed_op_scope_fut = self\n .analyze_op_scope(&sub_op_scope, &foreach_op.op_scope.ops)\n .boxed_local()\n .await?;\n let sub_op_scope_schema =\n sub_op_scope.states.lock().unwrap().build_op_scope_schema();\n op_scope.states.lock().unwrap().sub_scopes.insert(\n foreach_op.op_scope.name.clone(),\n Arc::new(sub_op_scope_schema),\n );\n analyzed_op_scope_fut\n };\n let op_name = reactive_op.name.clone();\n\n let concur_control_options =\n foreach_op.execution_options.get_concur_control_options();\n async move {\n Ok(AnalyzedReactiveOp::ForEach(AnalyzedForEachOp {\n local_field_ref,\n op_scope: analyzed_op_scope_fut\n .await\n .with_context(|| format!(\"Analyzing foreach op: {op_name}\"))?,\n name: op_name,\n concurrency_controller: concur_control::ConcurrencyController::new(\n &concur_control_options,\n ),\n }))\n }\n .boxed()\n }\n\n ReactiveOpSpec::Collect(op) => {\n let (struct_mapping, fields_schema) = analyze_struct_mapping(&op.input, op_scope)?;\n let has_auto_uuid_field = op.auto_uuid_field.is_some();\n let fingerprinter = Fingerprinter::default().with(&fields_schema)?;\n let collect_op = AnalyzedReactiveOp::Collect(AnalyzedCollectOp {\n name: reactive_op.name.clone(),\n has_auto_uuid_field,\n input: struct_mapping,\n collector_ref: add_collector(\n &op.scope_name,\n op.collector_name.clone(),\n CollectorSchema::from_fields(fields_schema, op.auto_uuid_field.clone()),\n op_scope,\n )?,\n fingerprinter,\n });\n async move { Ok(collect_op) }.boxed()\n }\n };\n Ok(result_fut)\n }\n\n #[allow(clippy::too_many_arguments)]\n async fn analyze_export_op_group(\n &self,\n target_kind: &str,\n op_scope: &Arc,\n flow_inst: &FlowInstanceSpec,\n export_op_group: &AnalyzedExportTargetOpGroup,\n declarations: Vec,\n targets_analyzed_ss: &mut [Option],\n declarations_analyzed_ss: &mut Vec,\n ) -> Result> + Send + use<>>> {\n let mut collection_specs = Vec::::new();\n let mut data_fields_infos = Vec::::new();\n for idx in export_op_group.op_idx.iter() {\n let export_op = &flow_inst.export_ops[*idx];\n let (local_collector_ref, collector_schema) = op_scope\n .states\n .lock()\n .unwrap()\n .consume_collector(&export_op.spec.collector_name)?;\n let (key_fields_schema, value_fields_schema, data_collection_info) =\n match &export_op.spec.index_options.primary_key_fields {\n Some(fields) => {\n let pk_fields_idx = fields\n .iter()\n .map(|f| {\n collector_schema\n .fields\n .iter()\n .position(|field| &field.name == f)\n .ok_or_else(|| anyhow!(\"field not found: {}\", f))\n })\n .collect::>>()?;\n\n let key_fields_schema = pk_fields_idx\n .iter()\n .map(|idx| collector_schema.fields[*idx].clone())\n .collect::>();\n let primary_key_type = if pk_fields_idx.len() == 1 {\n key_fields_schema[0].value_type.typ.clone()\n } else {\n ValueType::Struct(StructSchema {\n fields: Arc::from(key_fields_schema.clone()),\n description: None,\n })\n };\n let mut value_fields_schema: Vec = vec![];\n let mut value_fields_idx = vec![];\n for (idx, field) in collector_schema.fields.iter().enumerate() {\n if !pk_fields_idx.contains(&idx) {\n value_fields_schema.push(field.clone());\n value_fields_idx.push(idx as u32);\n }\n }\n let value_stable = collector_schema\n .auto_uuid_field_idx\n .as_ref()\n .map(|uuid_idx| pk_fields_idx.contains(uuid_idx))\n .unwrap_or(false);\n (\n key_fields_schema,\n value_fields_schema,\n ExportDataFieldsInfo {\n local_collector_ref,\n primary_key_def: AnalyzedPrimaryKeyDef::Fields(pk_fields_idx),\n primary_key_type,\n value_fields_idx,\n value_stable,\n },\n )\n }\n None => {\n // TODO: Support auto-generate primary key\n api_bail!(\"Primary key fields must be specified\")\n }\n };\n collection_specs.push(interface::ExportDataCollectionSpec {\n name: export_op.name.clone(),\n spec: serde_json::Value::Object(export_op.spec.target.spec.clone()),\n key_fields_schema,\n value_fields_schema,\n index_options: export_op.spec.index_options.clone(),\n });\n data_fields_infos.push(data_collection_info);\n }\n let (data_collections_output, declarations_output) = export_op_group\n .target_factory\n .clone()\n .build(collection_specs, declarations, self.flow_ctx.clone())\n .await?;\n let analyzed_export_ops = export_op_group\n .op_idx\n .iter()\n .zip(data_collections_output.into_iter())\n .zip(data_fields_infos.into_iter())\n .map(|((idx, data_coll_output), data_fields_info)| {\n let export_op = &flow_inst.export_ops[*idx];\n let op_name = export_op.name.clone();\n let export_target_factory = export_op_group.target_factory.clone();\n\n let export_op_ss = exec_ctx::AnalyzedTargetSetupState {\n target_kind: target_kind.to_string(),\n setup_key: data_coll_output.setup_key,\n desired_setup_state: data_coll_output.desired_setup_state,\n setup_by_user: export_op.spec.setup_by_user,\n };\n targets_analyzed_ss[*idx] = Some(export_op_ss);\n\n Ok(async move {\n trace!(\"Start building executor for export op `{op_name}`\");\n let export_context = data_coll_output\n .export_context\n .await\n .with_context(|| format!(\"Analyzing export op: {op_name}\"))?;\n trace!(\"Finished building executor for export op `{op_name}`\");\n Ok(AnalyzedExportOp {\n name: op_name,\n input: data_fields_info.local_collector_ref,\n export_target_factory,\n export_context,\n primary_key_def: data_fields_info.primary_key_def,\n primary_key_type: data_fields_info.primary_key_type,\n value_fields: data_fields_info.value_fields_idx,\n value_stable: data_fields_info.value_stable,\n })\n })\n })\n .collect::>>()?;\n for (setup_key, desired_setup_state) in declarations_output {\n let decl_ss = exec_ctx::AnalyzedTargetSetupState {\n target_kind: target_kind.to_string(),\n setup_key,\n desired_setup_state,\n setup_by_user: false,\n };\n declarations_analyzed_ss.push(decl_ss);\n }\n Ok(analyzed_export_ops)\n }\n\n async fn analyze_op_scope(\n &self,\n op_scope: &Arc,\n reactive_ops: &[NamedSpec],\n ) -> Result> + Send + use<>> {\n let mut op_futs = Vec::with_capacity(reactive_ops.len());\n for reactive_op in reactive_ops.iter() {\n op_futs.push(self.analyze_reactive_op(op_scope, reactive_op).await?);\n }\n let collector_len = op_scope.states.lock().unwrap().collectors.len();\n let result_fut = async move {\n Ok(AnalyzedOpScope {\n reactive_ops: try_join_all(op_futs).await?,\n collector_len,\n })\n };\n Ok(result_fut)\n }\n}\n\npub fn build_flow_instance_context(\n flow_inst_name: &str,\n py_exec_ctx: Option,\n) -> Arc {\n Arc::new(FlowInstanceContext {\n flow_instance_name: flow_inst_name.to_string(),\n auth_registry: get_auth_registry().clone(),\n py_exec_ctx: py_exec_ctx.map(Arc::new),\n })\n}\n\nfn build_flow_schema(root_op_scope: &OpScope) -> Result {\n let schema = (&root_op_scope.data.lock().unwrap().data).try_into()?;\n let root_op_scope_schema = root_op_scope.states.lock().unwrap().build_op_scope_schema();\n Ok(FlowSchema {\n schema,\n root_op_scope: root_op_scope_schema,\n })\n}\n\npub async fn analyze_flow(\n flow_inst: &FlowInstanceSpec,\n flow_ctx: Arc,\n) -> Result<(\n FlowSchema,\n AnalyzedSetupState,\n impl Future> + Send + use<>,\n)> {\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: get_lib_context()?,\n flow_ctx,\n };\n let root_data_scope = Arc::new(Mutex::new(DataScopeBuilder::new()));\n let root_op_scope = OpScope::new(ROOT_SCOPE_NAME.to_string(), None, root_data_scope);\n let mut import_ops_futs = Vec::with_capacity(flow_inst.import_ops.len());\n for import_op in flow_inst.import_ops.iter() {\n import_ops_futs.push(\n analyzer_ctx\n .analyze_import_op(&root_op_scope, import_op.clone())\n .await?,\n );\n }\n let op_scope_fut = analyzer_ctx\n .analyze_op_scope(&root_op_scope, &flow_inst.reactive_ops)\n .await?;\n\n #[derive(Default)]\n struct TargetOpGroup {\n export_op_ids: Vec,\n declarations: Vec,\n }\n let mut target_op_group = IndexMap::::new();\n for (idx, export_op) in flow_inst.export_ops.iter().enumerate() {\n target_op_group\n .entry(export_op.spec.target.kind.clone())\n .or_default()\n .export_op_ids\n .push(idx);\n }\n for declaration in flow_inst.declarations.iter() {\n target_op_group\n .entry(declaration.kind.clone())\n .or_default()\n .declarations\n .push(serde_json::Value::Object(declaration.spec.clone()));\n }\n\n let mut export_ops_futs = vec![];\n let mut analyzed_target_op_groups = vec![];\n\n let mut targets_analyzed_ss = Vec::with_capacity(flow_inst.export_ops.len());\n targets_analyzed_ss.resize_with(flow_inst.export_ops.len(), || None);\n\n let mut declarations_analyzed_ss = Vec::with_capacity(flow_inst.declarations.len());\n\n for (target_kind, op_ids) in target_op_group.into_iter() {\n let target_factory = match get_executor_factory(&target_kind)? {\n ExecutorFactory::ExportTarget(export_executor) => export_executor,\n _ => api_bail!(\"`{}` is not a export target op\", target_kind),\n };\n let analyzed_target_op_group = AnalyzedExportTargetOpGroup {\n target_factory,\n op_idx: op_ids.export_op_ids,\n };\n export_ops_futs.extend(\n analyzer_ctx\n .analyze_export_op_group(\n target_kind.as_str(),\n &root_op_scope,\n flow_inst,\n &analyzed_target_op_group,\n op_ids.declarations,\n &mut targets_analyzed_ss,\n &mut declarations_analyzed_ss,\n )\n .await?,\n );\n analyzed_target_op_groups.push(analyzed_target_op_group);\n }\n\n let flow_schema = build_flow_schema(&root_op_scope)?;\n let analyzed_ss = exec_ctx::AnalyzedSetupState {\n targets: targets_analyzed_ss\n .into_iter()\n .enumerate()\n .map(|(idx, v)| v.ok_or_else(|| anyhow!(\"target op `{}` not found\", idx)))\n .collect::>>()?,\n declarations: declarations_analyzed_ss,\n };\n\n let logic_fingerprint = Fingerprinter::default()\n .with(&flow_inst)?\n .with(&flow_schema.schema)?\n .into_fingerprint();\n let plan_fut = async move {\n let (import_ops, op_scope, export_ops) = try_join3(\n try_join_all(import_ops_futs),\n op_scope_fut,\n try_join_all(export_ops_futs),\n )\n .await?;\n\n Ok(ExecutionPlan {\n logic_fingerprint,\n import_ops,\n op_scope,\n export_ops,\n export_op_groups: analyzed_target_op_groups,\n })\n };\n\n Ok((flow_schema, analyzed_ss, plan_fut))\n}\n\npub async fn analyze_transient_flow<'a>(\n flow_inst: &TransientFlowSpec,\n flow_ctx: Arc,\n) -> Result<(\n EnrichedValueType,\n FlowSchema,\n impl Future> + Send + 'a,\n)> {\n let mut root_data_scope = DataScopeBuilder::new();\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: get_lib_context()?,\n flow_ctx,\n };\n let mut input_fields = vec![];\n for field in flow_inst.input_fields.iter() {\n let analyzed_field = root_data_scope.add_field(field.name.clone(), &field.value_type)?;\n input_fields.push(analyzed_field);\n }\n let root_op_scope = OpScope::new(\n ROOT_SCOPE_NAME.to_string(),\n None,\n Arc::new(Mutex::new(root_data_scope)),\n );\n let op_scope_fut = analyzer_ctx\n .analyze_op_scope(&root_op_scope, &flow_inst.reactive_ops)\n .await?;\n let (output_value, output_type) =\n analyze_value_mapping(&flow_inst.output_value, &root_op_scope)?;\n let data_schema = build_flow_schema(&root_op_scope)?;\n let plan_fut = async move {\n let op_scope = op_scope_fut.await?;\n Ok(TransientExecutionPlan {\n input_fields,\n op_scope,\n output_value,\n })\n };\n Ok((output_type, data_schema, plan_fut))\n}\n"], ["/cocoindex/src/execution/dumper.rs", "use crate::prelude::*;\n\nuse futures::{StreamExt, future::try_join_all};\nuse itertools::Itertools;\nuse serde::ser::SerializeSeq;\nuse sqlx::PgPool;\nuse std::path::{Path, PathBuf};\nuse yaml_rust2::YamlEmitter;\n\nuse super::evaluator::SourceRowEvaluationContext;\nuse super::memoization::EvaluationMemoryOptions;\nuse super::row_indexer;\nuse crate::base::{schema, value};\nuse crate::builder::plan::{AnalyzedImportOp, ExecutionPlan};\nuse crate::ops::interface::SourceExecutorListOptions;\nuse crate::utils::yaml_ser::YamlSerializer;\n\n#[derive(Debug, Clone, Deserialize)]\npub struct EvaluateAndDumpOptions {\n pub output_dir: String,\n pub use_cache: bool,\n}\n\nconst FILENAME_PREFIX_MAX_LENGTH: usize = 128;\n\nstruct TargetExportData<'a> {\n schema: &'a Vec,\n // The purpose is to make rows sorted by primary key.\n data: BTreeMap,\n}\n\nimpl Serialize for TargetExportData<'_> {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let mut seq = serializer.serialize_seq(Some(self.data.len()))?;\n for (_, values) in self.data.iter() {\n seq.serialize_element(&value::TypedFieldsValue {\n schema: self.schema,\n values_iter: values.fields.iter(),\n })?;\n }\n seq.end()\n }\n}\n\n#[derive(Serialize)]\nstruct SourceOutputData<'a> {\n key: value::TypedValue<'a>,\n\n #[serde(skip_serializing_if = \"Option::is_none\")]\n exports: Option>>,\n\n #[serde(skip_serializing_if = \"Option::is_none\")]\n error: Option,\n}\n\nstruct Dumper<'a> {\n plan: &'a ExecutionPlan,\n setup_execution_ctx: &'a exec_ctx::FlowSetupExecutionContext,\n schema: &'a schema::FlowSchema,\n pool: &'a PgPool,\n options: EvaluateAndDumpOptions,\n}\n\nimpl<'a> Dumper<'a> {\n async fn evaluate_source_entry<'b>(\n &'a self,\n import_op_idx: usize,\n import_op: &'a AnalyzedImportOp,\n key: &value::KeyValue,\n collected_values_buffer: &'b mut Vec>,\n ) -> Result>>>\n where\n 'a: 'b,\n {\n let data_builder = row_indexer::evaluate_source_entry_with_memory(\n &SourceRowEvaluationContext {\n plan: self.plan,\n import_op,\n schema: self.schema,\n key,\n import_op_idx,\n },\n self.setup_execution_ctx,\n EvaluationMemoryOptions {\n enable_cache: self.options.use_cache,\n evaluation_only: true,\n },\n self.pool,\n )\n .await?;\n\n let data_builder = if let Some(data_builder) = data_builder {\n data_builder\n } else {\n return Ok(None);\n };\n\n *collected_values_buffer = data_builder.collected_values;\n let exports = self\n .plan\n .export_ops\n .iter()\n .map(|export_op| -> Result<_> {\n let collector_idx = export_op.input.collector_idx as usize;\n let entry = (\n export_op.name.as_str(),\n TargetExportData {\n schema: &self.schema.root_op_scope.collectors[collector_idx]\n .spec\n .fields,\n data: collected_values_buffer[collector_idx]\n .iter()\n .map(|v| -> Result<_> {\n let key = row_indexer::extract_primary_key(\n &export_op.primary_key_def,\n v,\n )?;\n Ok((key, v))\n })\n .collect::>()?,\n },\n );\n Ok(entry)\n })\n .collect::>()?;\n Ok(Some(exports))\n }\n\n async fn evaluate_and_dump_source_entry(\n &self,\n import_op_idx: usize,\n import_op: &AnalyzedImportOp,\n key: value::KeyValue,\n file_path: PathBuf,\n ) -> Result<()> {\n let _permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n let mut collected_values_buffer = Vec::new();\n let (exports, error) = match self\n .evaluate_source_entry(import_op_idx, import_op, &key, &mut collected_values_buffer)\n .await\n {\n Ok(exports) => (exports, None),\n Err(e) => (None, Some(format!(\"{e:?}\"))),\n };\n let key_value = value::Value::from(key);\n let file_data = SourceOutputData {\n key: value::TypedValue {\n t: &import_op.primary_key_type,\n v: &key_value,\n },\n exports,\n error,\n };\n\n let yaml_output = {\n let mut yaml_output = String::new();\n let yaml_data = YamlSerializer::serialize(&file_data)?;\n let mut yaml_emitter = YamlEmitter::new(&mut yaml_output);\n yaml_emitter.multiline_strings(true);\n yaml_emitter.compact(true);\n yaml_emitter.dump(&yaml_data)?;\n yaml_output\n };\n tokio::fs::write(file_path, yaml_output).await?;\n\n Ok(())\n }\n\n async fn evaluate_and_dump_for_source(\n &self,\n import_op_idx: usize,\n import_op: &AnalyzedImportOp,\n ) -> Result<()> {\n let mut keys_by_filename_prefix: IndexMap> = IndexMap::new();\n\n let mut rows_stream = import_op.executor.list(&SourceExecutorListOptions {\n include_ordinal: false,\n });\n while let Some(rows) = rows_stream.next().await {\n for row in rows?.into_iter() {\n let mut s = row\n .key\n .to_strs()\n .into_iter()\n .map(|s| urlencoding::encode(&s).into_owned())\n .join(\":\");\n s.truncate(\n (0..(FILENAME_PREFIX_MAX_LENGTH - import_op.name.as_str().len()))\n .rev()\n .find(|i| s.is_char_boundary(*i))\n .unwrap_or(0),\n );\n keys_by_filename_prefix.entry(s).or_default().push(row.key);\n }\n }\n let output_dir = Path::new(&self.options.output_dir);\n let evaluate_futs =\n keys_by_filename_prefix\n .into_iter()\n .flat_map(|(filename_prefix, keys)| {\n let num_keys = keys.len();\n keys.into_iter().enumerate().map(move |(i, key)| {\n let extra_id = if num_keys > 1 {\n Cow::Owned(format!(\".{i}\"))\n } else {\n Cow::Borrowed(\"\")\n };\n let file_name =\n format!(\"{}@{}{}.yaml\", import_op.name, filename_prefix, extra_id);\n let file_path = output_dir.join(Path::new(&file_name));\n self.evaluate_and_dump_source_entry(\n import_op_idx,\n import_op,\n key,\n file_path,\n )\n })\n });\n try_join_all(evaluate_futs).await?;\n Ok(())\n }\n\n async fn evaluate_and_dump(&self) -> Result<()> {\n try_join_all(\n self.plan\n .import_ops\n .iter()\n .enumerate()\n .map(|(idx, import_op)| self.evaluate_and_dump_for_source(idx, import_op)),\n )\n .await?;\n Ok(())\n }\n}\n\npub async fn evaluate_and_dump(\n plan: &ExecutionPlan,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n schema: &schema::FlowSchema,\n options: EvaluateAndDumpOptions,\n pool: &PgPool,\n) -> Result<()> {\n let output_dir = Path::new(&options.output_dir);\n if output_dir.exists() {\n if !output_dir.is_dir() {\n return Err(anyhow::anyhow!(\"The path exists and is not a directory\"));\n }\n } else {\n tokio::fs::create_dir(output_dir).await?;\n }\n\n let dumper = Dumper {\n plan,\n setup_execution_ctx,\n schema,\n pool,\n options,\n };\n dumper.evaluate_and_dump().await\n}\n"], ["/cocoindex/src/setup/driver.rs", "use crate::{\n lib_context::{FlowContext, FlowExecutionContext, LibSetupContext},\n ops::{\n get_optional_executor_factory,\n interface::{ExportTargetFactory, FlowInstanceContext},\n },\n prelude::*,\n};\n\nuse sqlx::PgPool;\nuse std::{\n fmt::{Debug, Display},\n str::FromStr,\n};\n\nuse super::{AllSetupStates, GlobalSetupStatus};\nuse super::{\n CombinedState, DesiredMode, ExistingMode, FlowSetupState, FlowSetupStatus, ObjectSetupStatus,\n ObjectStatus, ResourceIdentifier, ResourceSetupInfo, ResourceSetupStatus, SetupChangeType,\n StateChange, TargetSetupState, db_metadata,\n};\nuse crate::execution::db_tracking_setup;\nuse crate::ops::interface::ExecutorFactory;\nuse std::fmt::Write;\n\nenum MetadataRecordType {\n FlowVersion,\n FlowMetadata,\n TrackingTable,\n Target(String),\n}\n\nimpl Display for MetadataRecordType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n MetadataRecordType::FlowVersion => f.write_str(db_metadata::FLOW_VERSION_RESOURCE_TYPE),\n MetadataRecordType::FlowMetadata => write!(f, \"FlowMetadata\"),\n MetadataRecordType::TrackingTable => write!(f, \"TrackingTable\"),\n MetadataRecordType::Target(target_id) => write!(f, \"Target:{target_id}\"),\n }\n }\n}\n\nimpl std::str::FromStr for MetadataRecordType {\n type Err = anyhow::Error;\n\n fn from_str(s: &str) -> Result {\n if s == db_metadata::FLOW_VERSION_RESOURCE_TYPE {\n Ok(Self::FlowVersion)\n } else if s == \"FlowMetadata\" {\n Ok(Self::FlowMetadata)\n } else if s == \"TrackingTable\" {\n Ok(Self::TrackingTable)\n } else if let Some(target_id) = s.strip_prefix(\"Target:\") {\n Ok(Self::Target(target_id.to_string()))\n } else {\n anyhow::bail!(\"Invalid MetadataRecordType string: {}\", s)\n }\n }\n}\n\nfn from_metadata_record(\n state: Option,\n staging_changes: sqlx::types::Json>>,\n legacy_state_key: Option,\n) -> Result> {\n let current: Option = state.map(serde_json::from_value).transpose()?;\n let staging: Vec> = (staging_changes.0.into_iter())\n .map(|sc| -> Result<_> {\n Ok(match sc {\n StateChange::Upsert(v) => StateChange::Upsert(serde_json::from_value(v)?),\n StateChange::Delete => StateChange::Delete,\n })\n })\n .collect::>()?;\n Ok(CombinedState {\n current,\n staging,\n legacy_state_key,\n })\n}\n\nfn get_export_target_factory(\n target_type: &str,\n) -> Option> {\n match get_optional_executor_factory(target_type) {\n Some(ExecutorFactory::ExportTarget(factory)) => Some(factory),\n _ => None,\n }\n}\n\npub async fn get_existing_setup_state(pool: &PgPool) -> Result> {\n let setup_metadata_records = db_metadata::read_setup_metadata(pool).await?;\n\n let setup_metadata_records = if let Some(records) = setup_metadata_records {\n records\n } else {\n return Ok(AllSetupStates::default());\n };\n\n // Group setup metadata records by flow name\n let setup_metadata_records = setup_metadata_records.into_iter().fold(\n BTreeMap::>::new(),\n |mut acc, record| {\n acc.entry(record.flow_name.clone())\n .or_default()\n .push(record);\n acc\n },\n );\n\n let flows = setup_metadata_records\n .into_iter()\n .map(|(flow_name, metadata_records)| -> anyhow::Result<_> {\n let mut flow_ss = FlowSetupState::default();\n for metadata_record in metadata_records {\n let state = metadata_record.state;\n let staging_changes = metadata_record.staging_changes;\n match MetadataRecordType::from_str(&metadata_record.resource_type)? {\n MetadataRecordType::FlowVersion => {\n flow_ss.seen_flow_metadata_version =\n db_metadata::parse_flow_version(&state);\n }\n MetadataRecordType::FlowMetadata => {\n flow_ss.metadata = from_metadata_record(state, staging_changes, None)?;\n }\n MetadataRecordType::TrackingTable => {\n flow_ss.tracking_table =\n from_metadata_record(state, staging_changes, None)?;\n }\n MetadataRecordType::Target(target_type) => {\n let normalized_key = {\n if let Some(factory) = get_export_target_factory(&target_type) {\n factory.normalize_setup_key(&metadata_record.key)?\n } else {\n metadata_record.key.clone()\n }\n };\n let combined_state = from_metadata_record(\n state,\n staging_changes,\n (normalized_key != metadata_record.key).then_some(metadata_record.key),\n )?;\n flow_ss.targets.insert(\n super::ResourceIdentifier {\n key: normalized_key,\n target_kind: target_type,\n },\n combined_state,\n );\n }\n }\n }\n Ok((flow_name, flow_ss))\n })\n .collect::>()?;\n\n Ok(AllSetupStates {\n has_metadata_table: true,\n flows,\n })\n}\n\nfn diff_state(\n existing_state: Option<&E>,\n desired_state: Option<&D>,\n diff: impl Fn(Option<&E>, &D) -> Option>,\n) -> Option>\nwhere\n E: PartialEq,\n{\n match (existing_state, desired_state) {\n (None, None) => None,\n (Some(_), None) => Some(StateChange::Delete),\n (existing_state, Some(desired_state)) => {\n if existing_state.map(|e| e == desired_state).unwrap_or(false) {\n None\n } else {\n diff(existing_state, desired_state)\n }\n }\n }\n}\n\nfn to_object_status(existing: Option, desired: Option) -> Option {\n Some(match (&existing, &desired) {\n (Some(_), None) => ObjectStatus::Deleted,\n (None, Some(_)) => ObjectStatus::New,\n (Some(_), Some(_)) => ObjectStatus::Existing,\n (None, None) => return None,\n })\n}\n\n#[derive(Debug, Default)]\nstruct GroupedResourceStates {\n desired: Option,\n existing: CombinedState,\n}\n\nfn group_resource_states<'a>(\n desired: impl Iterator,\n existing: impl Iterator)>,\n) -> Result> {\n let mut grouped: IndexMap<&'a ResourceIdentifier, GroupedResourceStates> = desired\n .into_iter()\n .map(|(key, state)| {\n (\n key,\n GroupedResourceStates {\n desired: Some(state.clone()),\n existing: CombinedState::default(),\n },\n )\n })\n .collect();\n for (key, state) in existing {\n let entry = grouped.entry(key);\n if state.current.is_some() {\n if let indexmap::map::Entry::Occupied(entry) = &entry {\n if entry.get().existing.current.is_some() {\n bail!(\"Duplicate existing state for key: {}\", entry.key());\n }\n }\n }\n let entry = entry.or_default();\n if let Some(current) = &state.current {\n entry.existing.current = Some(current.clone());\n }\n if let Some(legacy_state_key) = &state.legacy_state_key {\n if entry\n .existing\n .legacy_state_key\n .as_ref()\n .is_some_and(|v| v != legacy_state_key)\n {\n warn!(\n \"inconsistent legacy key: {:?}, {:?}\",\n key, entry.existing.legacy_state_key\n );\n }\n entry.existing.legacy_state_key = Some(legacy_state_key.clone());\n }\n for s in state.staging.iter() {\n match s {\n StateChange::Upsert(v) => {\n entry.existing.staging.push(StateChange::Upsert(v.clone()))\n }\n StateChange::Delete => entry.existing.staging.push(StateChange::Delete),\n }\n }\n }\n Ok(grouped)\n}\n\npub async fn check_flow_setup_status(\n desired_state: Option<&FlowSetupState>,\n existing_state: Option<&FlowSetupState>,\n flow_instance_ctx: &Arc,\n) -> Result {\n let metadata_change = diff_state(\n existing_state.map(|e| &e.metadata),\n desired_state.map(|d| &d.metadata),\n |_, desired_state| Some(StateChange::Upsert(desired_state.clone())),\n );\n\n let new_source_ids = desired_state\n .iter()\n .flat_map(|d| d.metadata.sources.values().map(|v| v.source_id))\n .collect::>();\n let tracking_table_change = db_tracking_setup::TrackingTableSetupStatus::new(\n desired_state.map(|d| &d.tracking_table),\n &existing_state\n .map(|e| Cow::Borrowed(&e.tracking_table))\n .unwrap_or_default(),\n (existing_state.iter())\n .flat_map(|state| state.metadata.possible_versions())\n .flat_map(|metadata| {\n metadata\n .sources\n .values()\n .map(|v| v.source_id)\n .filter(|id| !new_source_ids.contains(id))\n })\n .collect::>()\n .into_iter()\n .collect(),\n );\n\n let mut target_resources = Vec::new();\n let mut unknown_resources = Vec::new();\n\n let grouped_target_resources = group_resource_states(\n desired_state.iter().flat_map(|d| d.targets.iter()),\n existing_state.iter().flat_map(|e| e.targets.iter()),\n )?;\n for (resource_id, v) in grouped_target_resources.into_iter() {\n let factory = match get_export_target_factory(&resource_id.target_kind) {\n Some(factory) => factory,\n None => {\n unknown_resources.push(resource_id.clone());\n continue;\n }\n };\n let state = v.desired.clone();\n let target_state = v\n .desired\n .and_then(|state| (!state.common.setup_by_user).then_some(state.state));\n let existing_without_setup_by_user = CombinedState {\n current: v\n .existing\n .current\n .and_then(|s| s.state_unless_setup_by_user()),\n staging: v\n .existing\n .staging\n .into_iter()\n .filter_map(|s| match s {\n StateChange::Upsert(s) => {\n s.state_unless_setup_by_user().map(StateChange::Upsert)\n }\n StateChange::Delete => Some(StateChange::Delete),\n })\n .collect(),\n legacy_state_key: v.existing.legacy_state_key.clone(),\n };\n let never_setup_by_sys = target_state.is_none()\n && existing_without_setup_by_user.current.is_none()\n && existing_without_setup_by_user.staging.is_empty();\n let setup_status = if never_setup_by_sys {\n None\n } else {\n Some(\n factory\n .check_setup_status(\n &resource_id.key,\n target_state,\n existing_without_setup_by_user,\n flow_instance_ctx.clone(),\n )\n .await?,\n )\n };\n target_resources.push(ResourceSetupInfo {\n key: resource_id.clone(),\n state,\n description: factory.describe_resource(&resource_id.key)?,\n setup_status,\n legacy_key: v\n .existing\n .legacy_state_key\n .map(|legacy_state_key| ResourceIdentifier {\n target_kind: resource_id.target_kind.clone(),\n key: legacy_state_key,\n }),\n });\n }\n Ok(FlowSetupStatus {\n status: to_object_status(existing_state, desired_state),\n seen_flow_metadata_version: existing_state.and_then(|s| s.seen_flow_metadata_version),\n metadata_change,\n tracking_table: tracking_table_change.map(|c| c.into_setup_info()),\n target_resources,\n unknown_resources,\n })\n}\n\nstruct ResourceSetupChangeItem<'a, K: 'a, C: ResourceSetupStatus> {\n key: &'a K,\n setup_status: &'a C,\n}\n\nasync fn maybe_update_resource_setup<\n 'a,\n K: 'a,\n S: 'a,\n C: ResourceSetupStatus,\n ChangeApplierResultFut: Future>,\n>(\n resource_kind: &str,\n write: &mut (dyn std::io::Write + Send),\n resources: impl Iterator>,\n apply_change: impl FnOnce(Vec>) -> ChangeApplierResultFut,\n) -> Result<()> {\n let mut changes = Vec::new();\n for resource in resources {\n if let Some(setup_status) = &resource.setup_status {\n if setup_status.change_type() != SetupChangeType::NoChange {\n changes.push(ResourceSetupChangeItem {\n key: &resource.key,\n setup_status,\n });\n writeln!(write, \"{}:\", resource.description)?;\n for change in setup_status.describe_changes() {\n match change {\n setup::ChangeDescription::Action(action) => {\n writeln!(write, \" - {action}\")?;\n }\n setup::ChangeDescription::Note(_) => {}\n }\n }\n }\n }\n }\n if !changes.is_empty() {\n write!(write, \"Pushing change for {resource_kind}...\")?;\n apply_change(changes).await?;\n writeln!(write, \"DONE\")?;\n }\n Ok(())\n}\n\nasync fn apply_changes_for_flow(\n write: &mut (dyn std::io::Write + Send),\n flow_ctx: &FlowContext,\n flow_status: &FlowSetupStatus,\n existing_setup_state: &mut Option>,\n pool: &PgPool,\n) -> Result<()> {\n let Some(status) = flow_status.status else {\n return Ok(());\n };\n let verb = match status {\n ObjectStatus::New => \"Creating\",\n ObjectStatus::Deleted => \"Deleting\",\n ObjectStatus::Existing => \"Updating resources for \",\n _ => bail!(\"invalid flow status\"),\n };\n write!(write, \"\\n{verb} flow {}:\\n\", flow_ctx.flow_name())?;\n\n let mut update_info =\n HashMap::::new();\n\n if let Some(metadata_change) = &flow_status.metadata_change {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::FlowMetadata.to_string(),\n serde_json::Value::Null,\n ),\n db_metadata::StateUpdateInfo::new(metadata_change.desired_state(), None)?,\n );\n }\n if let Some(tracking_table) = &flow_status.tracking_table {\n if tracking_table\n .setup_status\n .as_ref()\n .map(|c| c.change_type() != SetupChangeType::NoChange)\n .unwrap_or_default()\n {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::TrackingTable.to_string(),\n serde_json::Value::Null,\n ),\n db_metadata::StateUpdateInfo::new(tracking_table.state.as_ref(), None)?,\n );\n }\n }\n\n for target_resource in &flow_status.target_resources {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::Target(target_resource.key.target_kind.clone()).to_string(),\n target_resource.key.key.clone(),\n ),\n db_metadata::StateUpdateInfo::new(\n target_resource.state.as_ref(),\n target_resource.legacy_key.as_ref().map(|k| {\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::Target(k.target_kind.clone()).to_string(),\n k.key.clone(),\n )\n }),\n )?,\n );\n }\n\n let new_version_id = db_metadata::stage_changes_for_flow(\n flow_ctx.flow_name(),\n flow_status.seen_flow_metadata_version,\n &update_info,\n pool,\n )\n .await?;\n\n if let Some(tracking_table) = &flow_status.tracking_table {\n maybe_update_resource_setup(\n \"tracking table\",\n write,\n std::iter::once(tracking_table),\n |setup_status| setup_status[0].setup_status.apply_change(),\n )\n .await?;\n }\n\n let mut setup_status_by_target_kind = IndexMap::<&str, Vec<_>>::new();\n for target_resource in &flow_status.target_resources {\n setup_status_by_target_kind\n .entry(target_resource.key.target_kind.as_str())\n .or_default()\n .push(target_resource);\n }\n for (target_kind, resources) in setup_status_by_target_kind.into_iter() {\n maybe_update_resource_setup(\n target_kind,\n write,\n resources.into_iter(),\n |setup_status| async move {\n let factory = get_export_target_factory(target_kind).ok_or_else(|| {\n anyhow::anyhow!(\"No factory found for target kind: {}\", target_kind)\n })?;\n factory\n .apply_setup_changes(\n setup_status\n .into_iter()\n .map(|s| interface::ResourceSetupChangeItem {\n key: &s.key.key,\n setup_status: s.setup_status.as_ref(),\n })\n .collect(),\n flow_ctx.flow.flow_instance_ctx.clone(),\n )\n .await?;\n Ok(())\n },\n )\n .await?;\n }\n\n let is_deletion = status == ObjectStatus::Deleted;\n db_metadata::commit_changes_for_flow(\n flow_ctx.flow_name(),\n new_version_id,\n &update_info,\n is_deletion,\n pool,\n )\n .await?;\n if is_deletion {\n *existing_setup_state = None;\n } else {\n let (existing_metadata, existing_tracking_table, existing_targets) =\n match std::mem::take(existing_setup_state) {\n Some(s) => (Some(s.metadata), Some(s.tracking_table), s.targets),\n None => Default::default(),\n };\n let metadata = CombinedState::from_change(\n existing_metadata,\n flow_status\n .metadata_change\n .as_ref()\n .map(|v| v.desired_state()),\n );\n let tracking_table = CombinedState::from_change(\n existing_tracking_table,\n flow_status.tracking_table.as_ref().map(|c| {\n c.setup_status\n .as_ref()\n .and_then(|c| c.desired_state.as_ref())\n }),\n );\n let mut targets = existing_targets;\n for target_resource in &flow_status.target_resources {\n match &target_resource.state {\n Some(state) => {\n targets.insert(\n target_resource.key.clone(),\n CombinedState::from_desired(state.clone()),\n );\n }\n None => {\n targets.shift_remove(&target_resource.key);\n }\n }\n }\n *existing_setup_state = Some(setup::FlowSetupState {\n metadata,\n tracking_table,\n seen_flow_metadata_version: Some(new_version_id),\n targets,\n });\n }\n\n writeln!(write, \"Done for flow {}\", flow_ctx.flow_name())?;\n Ok(())\n}\n\nasync fn apply_global_changes(\n write: &mut (dyn std::io::Write + Send),\n setup_status: &GlobalSetupStatus,\n all_setup_states: &mut AllSetupStates,\n) -> Result<()> {\n maybe_update_resource_setup(\n \"metadata table\",\n write,\n std::iter::once(&setup_status.metadata_table),\n |setup_status| setup_status[0].setup_status.apply_change(),\n )\n .await?;\n\n if setup_status\n .metadata_table\n .setup_status\n .as_ref()\n .is_some_and(|c| c.change_type() == SetupChangeType::Create)\n {\n all_setup_states.has_metadata_table = true;\n }\n\n Ok(())\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum FlowSetupChangeAction {\n Setup,\n Drop,\n}\npub struct SetupChangeBundle {\n pub action: FlowSetupChangeAction,\n pub flow_names: Vec,\n}\n\nimpl SetupChangeBundle {\n async fn get_flow_setup_status<'a>(\n setup_ctx: &LibSetupContext,\n flow_ctx: &'a FlowContext,\n flow_exec_ctx: &'a FlowExecutionContext,\n action: &FlowSetupChangeAction,\n buffer: &'a mut Option,\n ) -> Result<&'a FlowSetupStatus> {\n let result = match action {\n FlowSetupChangeAction::Setup => &flow_exec_ctx.setup_status,\n FlowSetupChangeAction::Drop => {\n let existing_state = setup_ctx.all_setup_states.flows.get(flow_ctx.flow_name());\n buffer.insert(\n check_flow_setup_status(None, existing_state, &flow_ctx.flow.flow_instance_ctx)\n .await?,\n )\n }\n };\n Ok(result)\n }\n\n pub async fn describe(&self, lib_context: &LibContext) -> Result<(String, bool)> {\n let mut text = String::new();\n let mut is_up_to_date = true;\n\n let setup_ctx = lib_context\n .require_persistence_ctx()?\n .setup_ctx\n .read()\n .await;\n let setup_ctx = &*setup_ctx;\n\n if self.action == FlowSetupChangeAction::Setup {\n is_up_to_date = is_up_to_date && setup_ctx.global_setup_status.is_up_to_date();\n write!(&mut text, \"{}\", setup_ctx.global_setup_status)?;\n }\n\n for flow_name in &self.flow_names {\n let flow_ctx = {\n let flows = lib_context.flows.lock().unwrap();\n flows\n .get(flow_name)\n .ok_or_else(|| anyhow::anyhow!(\"Flow instance not found: {flow_name}\"))?\n .clone()\n };\n let flow_exec_ctx = flow_ctx.get_execution_ctx_for_setup().read().await;\n\n let mut setup_status_buffer = None;\n let setup_status = Self::get_flow_setup_status(\n setup_ctx,\n &flow_ctx,\n &flow_exec_ctx,\n &self.action,\n &mut setup_status_buffer,\n )\n .await?;\n\n is_up_to_date = is_up_to_date && setup_status.is_up_to_date();\n write!(\n &mut text,\n \"{}\",\n setup::FormattedFlowSetupStatus(flow_name, setup_status)\n )?;\n }\n Ok((text, is_up_to_date))\n }\n\n pub async fn apply(\n &self,\n lib_context: &LibContext,\n write: &mut (dyn std::io::Write + Send),\n ) -> Result<()> {\n let persistence_ctx = lib_context.require_persistence_ctx()?;\n let mut setup_ctx = persistence_ctx.setup_ctx.write().await;\n let setup_ctx = &mut *setup_ctx;\n\n if self.action == FlowSetupChangeAction::Setup\n && !setup_ctx.global_setup_status.is_up_to_date()\n {\n apply_global_changes(\n write,\n &setup_ctx.global_setup_status,\n &mut setup_ctx.all_setup_states,\n )\n .await?;\n setup_ctx.global_setup_status =\n GlobalSetupStatus::from_setup_states(&setup_ctx.all_setup_states);\n }\n\n for flow_name in &self.flow_names {\n let flow_ctx = {\n let flows = lib_context.flows.lock().unwrap();\n flows\n .get(flow_name)\n .ok_or_else(|| anyhow::anyhow!(\"Flow instance not found: {flow_name}\"))?\n .clone()\n };\n let mut flow_exec_ctx = flow_ctx.get_execution_ctx_for_setup().write().await;\n\n let mut setup_status_buffer = None;\n let setup_status = Self::get_flow_setup_status(\n setup_ctx,\n &flow_ctx,\n &flow_exec_ctx,\n &self.action,\n &mut setup_status_buffer,\n )\n .await?;\n if setup_status.is_up_to_date() {\n continue;\n }\n\n let mut flow_states = setup_ctx.all_setup_states.flows.remove(flow_name);\n apply_changes_for_flow(\n write,\n &flow_ctx,\n setup_status,\n &mut flow_states,\n &persistence_ctx.builtin_db_pool,\n )\n .await?;\n\n flow_exec_ctx\n .update_setup_state(&flow_ctx.flow, flow_states.as_ref())\n .await?;\n if let Some(flow_states) = flow_states {\n setup_ctx\n .all_setup_states\n .flows\n .insert(flow_name.to_string(), flow_states);\n }\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/functions/extract_by_llm.rs", "use crate::llm::{\n LlmGenerateRequest, LlmGenerationClient, LlmSpec, OutputFormat, new_llm_generation_client,\n};\nuse crate::ops::sdk::*;\nuse crate::prelude::*;\nuse base::json_schema::build_json_schema;\nuse schemars::schema::SchemaObject;\nuse std::borrow::Cow;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Spec {\n llm_spec: LlmSpec,\n output_type: EnrichedValueType,\n instruction: Option,\n}\n\npub struct Args {\n text: Option,\n image: Option,\n}\n\nstruct Executor {\n args: Args,\n client: Box,\n model: String,\n output_json_schema: SchemaObject,\n system_prompt: String,\n value_extractor: base::json_schema::ValueExtractor,\n}\n\nfn get_system_prompt(instructions: &Option, extra_instructions: Option) -> String {\n let mut message =\n \"You are a helpful assistant that processes user-provided inputs (text, images, or both) to produce structured outputs. \\\nYour task is to follow the provided instructions to generate or extract information and output valid JSON matching the specified schema. \\\nBase your response solely on the content of the input. \\\nFor generative tasks, respond accurately and relevantly based on what is provided. \\\nUnless explicitly instructed otherwise, output only the JSON. DO NOT include explanations, descriptions, or formatting outside the JSON.\"\n .to_string();\n\n if let Some(custom_instructions) = instructions {\n message.push_str(\"\\n\\n\");\n message.push_str(custom_instructions);\n }\n\n if let Some(extra_instructions) = extra_instructions {\n message.push_str(\"\\n\\n\");\n message.push_str(&extra_instructions);\n }\n\n message\n}\n\nimpl Executor {\n async fn new(spec: Spec, args: Args) -> Result {\n let client = new_llm_generation_client(\n spec.llm_spec.api_type,\n spec.llm_spec.address,\n spec.llm_spec.api_config,\n )\n .await?;\n let schema_output = build_json_schema(spec.output_type, client.json_schema_options())?;\n Ok(Self {\n args,\n client,\n model: spec.llm_spec.model,\n output_json_schema: schema_output.schema,\n system_prompt: get_system_prompt(&spec.instruction, schema_output.extra_instructions),\n value_extractor: schema_output.value_extractor,\n })\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n fn behavior_version(&self) -> Option {\n Some(1)\n }\n\n fn enable_cache(&self) -> bool {\n true\n }\n\n async fn evaluate(&self, input: Vec) -> Result {\n let image_bytes: Option> = self\n .args\n .image\n .as_ref()\n .map(|arg| arg.value(&input)?.as_bytes())\n .transpose()?\n .map(|bytes| Cow::Borrowed(bytes.as_ref()));\n let text = self\n .args\n .text\n .as_ref()\n .map(|arg| arg.value(&input)?.as_str())\n .transpose()?;\n\n if text.is_none() && image_bytes.is_none() {\n api_bail!(\"At least one of `text` or `image` must be provided\");\n }\n\n let user_prompt = text.map_or(\"\", |v| v);\n let req = LlmGenerateRequest {\n model: &self.model,\n system_prompt: Some(Cow::Borrowed(&self.system_prompt)),\n user_prompt: Cow::Borrowed(user_prompt),\n image: image_bytes,\n output_format: Some(OutputFormat::JsonSchema {\n name: Cow::Borrowed(\"ExtractedData\"),\n schema: Cow::Borrowed(&self.output_json_schema),\n }),\n };\n let res = self.client.generate(req).await?;\n let json_value: serde_json::Value = serde_json::from_str(res.text.as_str())?;\n let value = self.value_extractor.extract_value(json_value)?;\n Ok(value)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"ExtractByLlm\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_optional_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n image: args_resolver\n .next_optional_arg(\"image\")?\n .expect_type(&ValueType::Basic(BasicValueType::Bytes))?,\n };\n\n if args.text.is_none() && args.image.is_none() {\n api_bail!(\"At least one of 'text' or 'image' must be provided\");\n }\n\n Ok((args, spec.output_type.clone()))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n resolved_input_schema: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(spec, resolved_input_schema).await?))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n #[tokio::test]\n #[ignore = \"This test requires an OpenAI API key or a configured local LLM and may make network calls.\"]\n async fn test_extract_by_llm() {\n // Define the expected output structure\n let target_output_schema = StructSchema {\n fields: Arc::new(vec![\n FieldSchema::new(\n \"extracted_field_name\",\n make_output_type(BasicValueType::Str),\n ),\n FieldSchema::new(\n \"extracted_field_value\",\n make_output_type(BasicValueType::Int64),\n ),\n ]),\n description: Some(\"A test structure for extraction\".into()),\n };\n\n let output_type_spec = EnrichedValueType {\n typ: ValueType::Struct(target_output_schema.clone()),\n nullable: false,\n attrs: Arc::new(BTreeMap::new()),\n };\n\n let spec = Spec {\n llm_spec: LlmSpec {\n api_type: crate::llm::LlmApiType::OpenAi,\n model: \"gpt-4o\".to_string(),\n address: None,\n api_config: None,\n },\n output_type: output_type_spec,\n instruction: Some(\"Extract the name and value from the text. The name is a string, the value is an integer.\".to_string()),\n };\n\n let factory = Arc::new(Factory);\n let text_content = \"The item is called 'CocoIndex Test' and its value is 42.\";\n\n let input_args_values = vec![text_content.to_string().into()];\n\n let input_arg_schemas = vec![build_arg_schema(\"text\", BasicValueType::Str)];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n if result.is_err() {\n eprintln!(\n \"test_extract_by_llm: test_flow_function returned error (potentially expected for evaluate): {:?}\",\n result.as_ref().err()\n );\n }\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed. NOTE: This test may require network access/API keys for OpenAI. Error: {:?}\",\n result.err()\n );\n\n let value = result.unwrap();\n\n match value {\n Value::Struct(field_values) => {\n assert_eq!(\n field_values.fields.len(),\n target_output_schema.fields.len(),\n \"Mismatched number of fields in output struct\"\n );\n for (idx, field_schema) in target_output_schema.fields.iter().enumerate() {\n match (&field_values.fields[idx], &field_schema.value_type.typ) {\n (\n Value::Basic(BasicValue::Str(_)),\n ValueType::Basic(BasicValueType::Str),\n ) => {}\n (\n Value::Basic(BasicValue::Int64(_)),\n ValueType::Basic(BasicValueType::Int64),\n ) => {}\n (val, expected_type) => panic!(\n \"Field '{}' type mismatch. Got {:?}, expected type compatible with {:?}\",\n field_schema.name,\n val.kind(),\n expected_type\n ),\n }\n }\n }\n _ => panic!(\"Expected Value::Struct, got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/ops/functions/split_recursively.rs", "use anyhow::anyhow;\nuse log::{error, trace};\nuse regex::{Matches, Regex};\nuse std::collections::HashSet;\nuse std::sync::LazyLock;\nuse std::{collections::HashMap, sync::Arc};\nuse unicase::UniCase;\n\nuse crate::base::field_attrs;\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::{fields_value, ops::sdk::*};\n\n#[derive(Deserialize)]\nstruct CustomLanguageSpec {\n language_name: String,\n #[serde(default)]\n aliases: Vec,\n separators_regex: Vec,\n}\n\n#[derive(Deserialize)]\nstruct Spec {\n #[serde(default)]\n custom_languages: Vec,\n}\n\nconst SYNTAX_LEVEL_GAP_COST: usize = 512;\nconst MISSING_OVERLAP_COST: usize = 512;\nconst PER_LINE_BREAK_LEVEL_GAP_COST: usize = 64;\nconst TOO_SMALL_CHUNK_COST: usize = 1048576;\n\npub struct Args {\n text: ResolvedOpArg,\n chunk_size: ResolvedOpArg,\n min_chunk_size: Option,\n chunk_overlap: Option,\n language: Option,\n}\n\nstruct SimpleLanguageConfig {\n name: String,\n aliases: Vec,\n separator_regex: Vec,\n}\n\nstatic DEFAULT_LANGUAGE_CONFIG: LazyLock =\n LazyLock::new(|| SimpleLanguageConfig {\n name: \"_DEFAULT\".to_string(),\n aliases: vec![],\n separator_regex: [r\"\\n\\n+\", r\"\\n\", r\"\\s+\"]\n .into_iter()\n .map(|s| Regex::new(s).unwrap())\n .collect(),\n });\n\nstruct TreesitterLanguageConfig {\n name: String,\n tree_sitter_lang: tree_sitter::Language,\n terminal_node_kind_ids: HashSet,\n}\n\nfn add_treesitter_language<'a>(\n output: &'a mut HashMap, Arc>,\n name: &'static str,\n aliases: impl IntoIterator,\n lang_fn: impl Into,\n terminal_node_kinds: impl IntoIterator,\n) {\n let tree_sitter_lang: tree_sitter::Language = lang_fn.into();\n let terminal_node_kind_ids = terminal_node_kinds\n .into_iter()\n .filter_map(|kind| {\n let id = tree_sitter_lang.id_for_node_kind(kind, true);\n if id != 0 {\n trace!(\"Got id for node kind: `{kind}` -> {id}\");\n Some(id)\n } else {\n error!(\"Failed in getting id for node kind: `{kind}`\");\n None\n }\n })\n .collect();\n\n let config = Arc::new(TreesitterLanguageConfig {\n name: name.to_string(),\n tree_sitter_lang,\n terminal_node_kind_ids,\n });\n for name in std::iter::once(name).chain(aliases.into_iter()) {\n if output.insert(name.into(), config.clone()).is_some() {\n panic!(\"Language `{name}` already exists\");\n }\n }\n}\n\nstatic TREE_SITTER_LANGUAGE_BY_LANG: LazyLock<\n HashMap, Arc>,\n> = LazyLock::new(|| {\n let mut map = HashMap::new();\n add_treesitter_language(&mut map, \"C\", [\".c\"], tree_sitter_c::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"C++\",\n [\".cpp\", \".cc\", \".cxx\", \".h\", \".hpp\", \"cpp\"],\n tree_sitter_c::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"C#\",\n [\".cs\", \"cs\", \"csharp\"],\n tree_sitter_c_sharp::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"CSS\",\n [\".css\", \".scss\"],\n tree_sitter_css::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Fortran\",\n [\".f\", \".f90\", \".f95\", \".f03\", \"f\", \"f90\", \"f95\", \"f03\"],\n tree_sitter_fortran::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Go\",\n [\".go\", \"golang\"],\n tree_sitter_go::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"HTML\",\n [\".html\", \".htm\"],\n tree_sitter_html::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"Java\", [\".java\"], tree_sitter_java::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"JavaScript\",\n [\".js\", \"js\"],\n tree_sitter_javascript::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"JSON\", [\".json\"], tree_sitter_json::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Kotlin\",\n [\".kt\", \".kts\"],\n tree_sitter_kotlin_ng::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Markdown\",\n [\".md\", \".mdx\", \"md\"],\n tree_sitter_md::LANGUAGE,\n [\"inline\"],\n );\n add_treesitter_language(\n &mut map,\n \"Pascal\",\n [\".pas\", \"pas\", \".dpr\", \"dpr\", \"Delphi\"],\n tree_sitter_pascal::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"PHP\", [\".php\"], tree_sitter_php::LANGUAGE_PHP, []);\n add_treesitter_language(\n &mut map,\n \"Python\",\n [\".py\"],\n tree_sitter_python::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"R\", [\".r\"], tree_sitter_r::LANGUAGE, []);\n add_treesitter_language(&mut map, \"Ruby\", [\".rb\"], tree_sitter_ruby::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Rust\",\n [\".rs\", \"rs\"],\n tree_sitter_rust::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Scala\",\n [\".scala\"],\n tree_sitter_scala::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"SQL\", [\".sql\"], tree_sitter_sequel::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Swift\",\n [\".swift\"],\n tree_sitter_swift::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TOML\",\n [\".toml\"],\n tree_sitter_toml_ng::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TSX\",\n [\".tsx\"],\n tree_sitter_typescript::LANGUAGE_TSX,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TypeScript\",\n [\".ts\", \"ts\"],\n tree_sitter_typescript::LANGUAGE_TYPESCRIPT,\n [],\n );\n add_treesitter_language(&mut map, \"XML\", [\".xml\"], tree_sitter_xml::LANGUAGE_XML, []);\n add_treesitter_language(&mut map, \"DTD\", [\".dtd\"], tree_sitter_xml::LANGUAGE_DTD, []);\n add_treesitter_language(\n &mut map,\n \"YAML\",\n [\".yaml\", \".yml\"],\n tree_sitter_yaml::LANGUAGE,\n [],\n );\n map\n});\n\nenum ChunkKind<'t> {\n TreeSitterNode {\n lang_config: &'t TreesitterLanguageConfig,\n node: tree_sitter::Node<'t>,\n },\n RegexpSepChunk {\n lang_config: &'t SimpleLanguageConfig,\n next_regexp_sep_id: usize,\n },\n}\n\nstruct Chunk<'t, 's: 't> {\n full_text: &'s str,\n range: RangeValue,\n kind: ChunkKind<'t>,\n}\n\nimpl<'t, 's: 't> Chunk<'t, 's> {\n fn text(&self) -> &'s str {\n self.range.extract_str(self.full_text)\n }\n}\n\nstruct TextChunksIter<'t, 's: 't> {\n lang_config: &'t SimpleLanguageConfig,\n parent: &'t Chunk<'t, 's>,\n matches_iter: Matches<'t, 's>,\n regexp_sep_id: usize,\n next_start_pos: Option,\n}\n\nimpl<'t, 's: 't> TextChunksIter<'t, 's> {\n fn new(\n lang_config: &'t SimpleLanguageConfig,\n parent: &'t Chunk<'t, 's>,\n regexp_sep_id: usize,\n ) -> Self {\n Self {\n lang_config,\n parent,\n matches_iter: lang_config.separator_regex[regexp_sep_id].find_iter(parent.text()),\n regexp_sep_id,\n next_start_pos: Some(parent.range.start),\n }\n }\n}\n\nimpl<'t, 's: 't> Iterator for TextChunksIter<'t, 's> {\n type Item = Chunk<'t, 's>;\n\n fn next(&mut self) -> Option {\n let start_pos = self.next_start_pos?;\n let end_pos = match self.matches_iter.next() {\n Some(grp) => {\n self.next_start_pos = Some(self.parent.range.start + grp.end());\n self.parent.range.start + grp.start()\n }\n None => {\n self.next_start_pos = None;\n if start_pos >= self.parent.range.end {\n return None;\n }\n self.parent.range.end\n }\n };\n Some(Chunk {\n full_text: self.parent.full_text,\n range: RangeValue::new(start_pos, end_pos),\n kind: ChunkKind::RegexpSepChunk {\n lang_config: self.lang_config,\n next_regexp_sep_id: self.regexp_sep_id + 1,\n },\n })\n }\n}\n\nstruct TreeSitterNodeIter<'t, 's: 't> {\n lang_config: &'t TreesitterLanguageConfig,\n full_text: &'s str,\n cursor: Option>,\n next_start_pos: usize,\n end_pos: usize,\n}\n\nimpl<'t, 's: 't> TreeSitterNodeIter<'t, 's> {\n fn fill_gap(\n next_start_pos: &mut usize,\n gap_end_pos: usize,\n full_text: &'s str,\n ) -> Option> {\n let start_pos = *next_start_pos;\n if start_pos < gap_end_pos {\n *next_start_pos = gap_end_pos;\n Some(Chunk {\n full_text,\n range: RangeValue::new(start_pos, gap_end_pos),\n kind: ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n },\n })\n } else {\n None\n }\n }\n}\n\nimpl<'t, 's: 't> Iterator for TreeSitterNodeIter<'t, 's> {\n type Item = Chunk<'t, 's>;\n\n fn next(&mut self) -> Option {\n let cursor = if let Some(cursor) = &mut self.cursor {\n cursor\n } else {\n return Self::fill_gap(&mut self.next_start_pos, self.end_pos, self.full_text);\n };\n let node = cursor.node();\n if let Some(gap) =\n Self::fill_gap(&mut self.next_start_pos, node.start_byte(), self.full_text)\n {\n return Some(gap);\n }\n if !cursor.goto_next_sibling() {\n self.cursor = None;\n }\n self.next_start_pos = node.end_byte();\n Some(Chunk {\n full_text: self.full_text,\n range: RangeValue::new(node.start_byte(), node.end_byte()),\n kind: ChunkKind::TreeSitterNode {\n lang_config: self.lang_config,\n node,\n },\n })\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]\nenum LineBreakLevel {\n Inline,\n Newline,\n DoubleNewline,\n}\n\nimpl LineBreakLevel {\n fn ord(self) -> usize {\n match self {\n LineBreakLevel::Inline => 0,\n LineBreakLevel::Newline => 1,\n LineBreakLevel::DoubleNewline => 2,\n }\n }\n}\n\nfn line_break_level(c: &str) -> LineBreakLevel {\n let mut lb_level = LineBreakLevel::Inline;\n let mut iter = c.chars();\n while let Some(c) = iter.next() {\n if c == '\\n' || c == '\\r' {\n lb_level = LineBreakLevel::Newline;\n for c2 in iter.by_ref() {\n if c2 == '\\n' || c2 == '\\r' {\n if c == c2 {\n return LineBreakLevel::DoubleNewline;\n }\n } else {\n break;\n }\n }\n }\n }\n lb_level\n}\n\nconst INLINE_SPACE_CHARS: [char; 2] = [' ', '\\t'];\n\nstruct AtomChunk {\n range: RangeValue,\n boundary_syntax_level: usize,\n\n internal_lb_level: LineBreakLevel,\n boundary_lb_level: LineBreakLevel,\n}\n\nstruct AtomChunksCollector<'s> {\n full_text: &'s str,\n\n curr_level: usize,\n min_level: usize,\n atom_chunks: Vec,\n}\nimpl<'s> AtomChunksCollector<'s> {\n fn collect(&mut self, range: RangeValue) {\n // Trim trailing whitespaces.\n let end_trimmed_text = &self.full_text[range.start..range.end].trim_end();\n if end_trimmed_text.is_empty() {\n return;\n }\n\n // Trim leading whitespaces.\n let trimmed_text = end_trimmed_text.trim_start();\n let new_start = range.start + (end_trimmed_text.len() - trimmed_text.len());\n let new_end = new_start + trimmed_text.len();\n\n // Align to beginning of the line if possible.\n let prev_end = self.atom_chunks.last().map_or(0, |chunk| chunk.range.end);\n let gap = &self.full_text[prev_end..new_start];\n let boundary_lb_level = line_break_level(gap);\n let range = if boundary_lb_level != LineBreakLevel::Inline {\n let trimmed_gap = gap.trim_end_matches(INLINE_SPACE_CHARS);\n RangeValue::new(prev_end + trimmed_gap.len(), new_end)\n } else {\n RangeValue::new(new_start, new_end)\n };\n\n self.atom_chunks.push(AtomChunk {\n range,\n boundary_syntax_level: self.min_level,\n internal_lb_level: line_break_level(trimmed_text),\n boundary_lb_level,\n });\n self.min_level = self.curr_level;\n }\n\n fn into_atom_chunks(mut self) -> Vec {\n self.atom_chunks.push(AtomChunk {\n range: RangeValue::new(self.full_text.len(), self.full_text.len()),\n boundary_syntax_level: self.min_level,\n internal_lb_level: LineBreakLevel::Inline,\n boundary_lb_level: LineBreakLevel::DoubleNewline,\n });\n self.atom_chunks\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Eq)]\nstruct OutputPosition {\n char_offset: usize,\n line: u32,\n column: u32,\n}\n\nimpl OutputPosition {\n fn into_output(self) -> value::Value {\n value::Value::Struct(fields_value!(\n self.char_offset as i64,\n self.line as i64,\n self.column as i64\n ))\n }\n}\nstruct Position {\n byte_offset: usize,\n output: Option,\n}\n\nimpl Position {\n fn new(byte_offset: usize) -> Self {\n Self {\n byte_offset,\n output: None,\n }\n }\n}\n\nstruct ChunkOutput<'s> {\n start_pos: Position,\n end_pos: Position,\n text: &'s str,\n}\n\nstruct RecursiveChunker<'s> {\n full_text: &'s str,\n chunk_size: usize,\n chunk_overlap: usize,\n min_chunk_size: usize,\n}\n\nimpl<'t, 's: 't> RecursiveChunker<'s> {\n fn collect_atom_chunks_from_iter(\n &self,\n sub_chunks_iter: impl Iterator>,\n atom_collector: &mut AtomChunksCollector<'s>,\n ) -> Result<()> {\n atom_collector.curr_level += 1;\n for sub_chunk in sub_chunks_iter {\n let range = sub_chunk.range;\n if range.len() <= self.min_chunk_size {\n atom_collector.collect(range);\n } else {\n self.collect_atom_chunks(sub_chunk, atom_collector)?;\n }\n }\n atom_collector.curr_level -= 1;\n if atom_collector.curr_level < atom_collector.min_level {\n atom_collector.min_level = atom_collector.curr_level;\n }\n Ok(())\n }\n\n fn collect_atom_chunks(\n &self,\n chunk: Chunk<'t, 's>,\n atom_collector: &mut AtomChunksCollector<'s>,\n ) -> Result<()> {\n match chunk.kind {\n ChunkKind::TreeSitterNode { lang_config, node } => {\n if !lang_config.terminal_node_kind_ids.contains(&node.kind_id()) {\n let mut cursor = node.walk();\n if cursor.goto_first_child() {\n return self.collect_atom_chunks_from_iter(\n TreeSitterNodeIter {\n lang_config,\n full_text: self.full_text,\n cursor: Some(cursor),\n next_start_pos: node.start_byte(),\n end_pos: node.end_byte(),\n },\n atom_collector,\n );\n }\n }\n self.collect_atom_chunks(\n Chunk {\n full_text: self.full_text,\n range: chunk.range,\n kind: ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n },\n },\n atom_collector,\n )\n }\n ChunkKind::RegexpSepChunk {\n lang_config,\n next_regexp_sep_id,\n } => {\n if next_regexp_sep_id >= lang_config.separator_regex.len() {\n atom_collector.collect(chunk.range);\n Ok(())\n } else {\n self.collect_atom_chunks_from_iter(\n TextChunksIter::new(lang_config, &chunk, next_regexp_sep_id),\n atom_collector,\n )\n }\n }\n }\n }\n\n fn get_overlap_cost_base(&self, offset: usize) -> usize {\n if self.chunk_overlap == 0 {\n 0\n } else {\n (self.full_text.len() - offset) * MISSING_OVERLAP_COST / self.chunk_overlap\n }\n }\n\n fn merge_atom_chunks(&self, atom_chunks: Vec) -> Vec> {\n struct AtomRoutingPlan {\n start_idx: usize, // index of `atom_chunks` for the start chunk\n prev_plan_idx: usize, // index of `plans` for the previous plan\n cost: usize,\n overlap_cost_base: usize,\n }\n type PrevPlanCandidate = (std::cmp::Reverse, usize); // (cost, start_idx)\n\n let mut plans = Vec::with_capacity(atom_chunks.len());\n // Janitor\n plans.push(AtomRoutingPlan {\n start_idx: 0,\n prev_plan_idx: 0,\n cost: 0,\n overlap_cost_base: self.get_overlap_cost_base(0),\n });\n let mut prev_plan_candidates = std::collections::BinaryHeap::::new();\n\n let mut gap_cost_cache = vec![0];\n let mut syntax_level_gap_cost = |boundary: usize, internal: usize| -> usize {\n if boundary > internal {\n let gap = boundary - internal;\n for i in gap_cost_cache.len()..=gap {\n gap_cost_cache.push(gap_cost_cache[i - 1] + SYNTAX_LEVEL_GAP_COST / i);\n }\n gap_cost_cache[gap]\n } else {\n 0\n }\n };\n\n for (i, chunk) in atom_chunks[0..atom_chunks.len() - 1].iter().enumerate() {\n let mut min_cost = usize::MAX;\n let mut arg_min_start_idx: usize = 0;\n let mut arg_min_prev_plan_idx: usize = 0;\n let mut start_idx = i;\n\n let end_syntax_level = atom_chunks[i + 1].boundary_syntax_level;\n let end_lb_level = atom_chunks[i + 1].boundary_lb_level;\n\n let mut internal_syntax_level = usize::MAX;\n let mut internal_lb_level = LineBreakLevel::Inline;\n\n fn lb_level_gap(boundary: LineBreakLevel, internal: LineBreakLevel) -> usize {\n if boundary.ord() < internal.ord() {\n internal.ord() - boundary.ord()\n } else {\n 0\n }\n }\n loop {\n let start_chunk = &atom_chunks[start_idx];\n let chunk_size = chunk.range.end - start_chunk.range.start;\n\n let mut cost = 0;\n cost +=\n syntax_level_gap_cost(start_chunk.boundary_syntax_level, internal_syntax_level);\n cost += syntax_level_gap_cost(end_syntax_level, internal_syntax_level);\n cost += (lb_level_gap(start_chunk.boundary_lb_level, internal_lb_level)\n + lb_level_gap(end_lb_level, internal_lb_level))\n * PER_LINE_BREAK_LEVEL_GAP_COST;\n if chunk_size < self.min_chunk_size {\n cost += TOO_SMALL_CHUNK_COST;\n }\n\n if chunk_size > self.chunk_size {\n if min_cost == usize::MAX {\n min_cost = cost + plans[start_idx].cost;\n arg_min_start_idx = start_idx;\n arg_min_prev_plan_idx = start_idx;\n }\n break;\n }\n\n let prev_plan_idx = if self.chunk_overlap > 0 {\n while let Some(top_prev_plan) = prev_plan_candidates.peek() {\n let overlap_size =\n atom_chunks[top_prev_plan.1].range.end - start_chunk.range.start;\n if overlap_size <= self.chunk_overlap {\n break;\n }\n prev_plan_candidates.pop();\n }\n prev_plan_candidates.push((\n std::cmp::Reverse(\n plans[start_idx].cost + plans[start_idx].overlap_cost_base,\n ),\n start_idx,\n ));\n prev_plan_candidates.peek().unwrap().1\n } else {\n start_idx\n };\n let prev_plan = &plans[prev_plan_idx];\n cost += prev_plan.cost;\n if self.chunk_overlap == 0 {\n cost += MISSING_OVERLAP_COST / 2;\n } else {\n let start_cost_base = self.get_overlap_cost_base(start_chunk.range.start);\n cost += if prev_plan.overlap_cost_base < start_cost_base {\n MISSING_OVERLAP_COST + prev_plan.overlap_cost_base - start_cost_base\n } else {\n MISSING_OVERLAP_COST\n };\n }\n if cost < min_cost {\n min_cost = cost;\n arg_min_start_idx = start_idx;\n arg_min_prev_plan_idx = prev_plan_idx;\n }\n\n if start_idx == 0 {\n break;\n }\n\n start_idx -= 1;\n internal_syntax_level =\n internal_syntax_level.min(start_chunk.boundary_syntax_level);\n internal_lb_level = internal_lb_level.max(start_chunk.internal_lb_level);\n }\n plans.push(AtomRoutingPlan {\n start_idx: arg_min_start_idx,\n prev_plan_idx: arg_min_prev_plan_idx,\n cost: min_cost,\n overlap_cost_base: self.get_overlap_cost_base(chunk.range.end),\n });\n prev_plan_candidates.clear();\n }\n\n let mut output = Vec::new();\n let mut plan_idx = plans.len() - 1;\n while plan_idx > 0 {\n let plan = &plans[plan_idx];\n let start_chunk = &atom_chunks[plan.start_idx];\n let end_chunk = &atom_chunks[plan_idx - 1];\n output.push(ChunkOutput {\n start_pos: Position::new(start_chunk.range.start),\n end_pos: Position::new(end_chunk.range.end),\n text: &self.full_text[start_chunk.range.start..end_chunk.range.end],\n });\n plan_idx = plan.prev_plan_idx;\n }\n output.reverse();\n output\n }\n\n fn split_root_chunk(&self, kind: ChunkKind<'t>) -> Result>> {\n let mut atom_collector = AtomChunksCollector {\n full_text: self.full_text,\n min_level: 0,\n curr_level: 0,\n atom_chunks: Vec::new(),\n };\n self.collect_atom_chunks(\n Chunk {\n full_text: self.full_text,\n range: RangeValue::new(0, self.full_text.len()),\n kind,\n },\n &mut atom_collector,\n )?;\n let atom_chunks = atom_collector.into_atom_chunks();\n let output = self.merge_atom_chunks(atom_chunks);\n Ok(output)\n }\n}\n\nstruct Executor {\n args: Args,\n custom_languages: HashMap, Arc>,\n}\n\nimpl Executor {\n fn new(args: Args, spec: Spec) -> Result {\n let mut custom_languages = HashMap::new();\n for lang in spec.custom_languages {\n let separator_regex = lang\n .separators_regex\n .iter()\n .map(|s| Regex::new(s))\n .collect::>()\n .with_context(|| {\n format!(\n \"failed in parsing regexp for language `{}`\",\n lang.language_name\n )\n })?;\n let language_config = Arc::new(SimpleLanguageConfig {\n name: lang.language_name,\n aliases: lang.aliases,\n separator_regex,\n });\n if custom_languages\n .insert(\n UniCase::new(language_config.name.clone()),\n language_config.clone(),\n )\n .is_some()\n {\n api_bail!(\n \"duplicate language name / alias: `{}`\",\n language_config.name\n );\n }\n for alias in &language_config.aliases {\n if custom_languages\n .insert(UniCase::new(alias.clone()), language_config.clone())\n .is_some()\n {\n api_bail!(\"duplicate language name / alias: `{}`\", alias);\n }\n }\n }\n Ok(Self {\n args,\n custom_languages,\n })\n }\n}\n\nfn set_output_positions<'a>(text: &str, positions: impl Iterator) {\n let mut positions = positions.collect::>();\n positions.sort_by_key(|o| o.byte_offset);\n\n let mut positions_iter = positions.iter_mut();\n let Some(mut next_position) = positions_iter.next() else {\n return;\n };\n\n let mut char_offset = 0;\n let mut line = 1;\n let mut column = 1;\n for (byte_offset, ch) in text.char_indices() {\n while next_position.byte_offset == byte_offset {\n next_position.output = Some(OutputPosition {\n char_offset,\n line,\n column,\n });\n if let Some(position) = positions_iter.next() {\n next_position = position;\n } else {\n return;\n }\n }\n char_offset += 1;\n if ch == '\\n' {\n line += 1;\n column = 1;\n } else {\n column += 1;\n }\n }\n\n // Offsets after the last char.\n loop {\n next_position.output = Some(OutputPosition {\n char_offset,\n line,\n column,\n });\n if let Some(position) = positions_iter.next() {\n next_position = position;\n } else {\n return;\n }\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n async fn evaluate(&self, input: Vec) -> Result {\n let full_text = self.args.text.value(&input)?.as_str()?;\n let chunk_size = self.args.chunk_size.value(&input)?.as_int64()?;\n let recursive_chunker = RecursiveChunker {\n full_text,\n chunk_size: chunk_size as usize,\n chunk_overlap: (self.args.chunk_overlap.value(&input)?)\n .optional()\n .map(|v| v.as_int64())\n .transpose()?\n .unwrap_or(0) as usize,\n min_chunk_size: (self.args.min_chunk_size.value(&input)?)\n .optional()\n .map(|v| v.as_int64())\n .transpose()?\n .unwrap_or(chunk_size / 2) as usize,\n };\n\n let language = UniCase::new(\n (if let Some(language) = self.args.language.value(&input)?.optional() {\n language.as_str()?\n } else {\n \"\"\n })\n .to_string(),\n );\n let mut output = if let Some(lang_config) = self.custom_languages.get(&language) {\n recursive_chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config,\n next_regexp_sep_id: 0,\n })?\n } else if let Some(lang_config) = TREE_SITTER_LANGUAGE_BY_LANG.get(&language) {\n let mut parser = tree_sitter::Parser::new();\n parser.set_language(&lang_config.tree_sitter_lang)?;\n let tree = parser.parse(full_text.as_ref(), None).ok_or_else(|| {\n anyhow!(\"failed in parsing text in language: {}\", lang_config.name)\n })?;\n recursive_chunker.split_root_chunk(ChunkKind::TreeSitterNode {\n lang_config,\n node: tree.root_node(),\n })?\n } else {\n recursive_chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n })?\n };\n\n set_output_positions(\n full_text,\n output.iter_mut().flat_map(|chunk_output| {\n std::iter::once(&mut chunk_output.start_pos)\n .chain(std::iter::once(&mut chunk_output.end_pos))\n }),\n );\n\n let table = output\n .into_iter()\n .map(|chunk_output| {\n let output_start = chunk_output.start_pos.output.unwrap();\n let output_end = chunk_output.end_pos.output.unwrap();\n (\n RangeValue::new(output_start.char_offset, output_end.char_offset).into(),\n fields_value!(\n Arc::::from(chunk_output.text),\n output_start.into_output(),\n output_end.into_output()\n )\n .into(),\n )\n })\n .collect();\n\n Ok(Value::KTable(table))\n }\n}\n\nstruct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"SplitRecursively\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n _spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n chunk_size: args_resolver\n .next_arg(\"chunk_size\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n min_chunk_size: args_resolver\n .next_optional_arg(\"min_chunk_size\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n chunk_overlap: args_resolver\n .next_optional_arg(\"chunk_overlap\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n language: args_resolver\n .next_optional_arg(\"language\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n };\n\n let pos_struct = schema::ValueType::Struct(schema::StructSchema {\n fields: Arc::new(vec![\n schema::FieldSchema::new(\"offset\", make_output_type(BasicValueType::Int64)),\n schema::FieldSchema::new(\"line\", make_output_type(BasicValueType::Int64)),\n schema::FieldSchema::new(\"column\", make_output_type(BasicValueType::Int64)),\n ]),\n description: None,\n });\n\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n schema_builder.add_field(FieldSchema::new(\n \"location\",\n make_output_type(BasicValueType::Range),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"text\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"start\",\n schema::EnrichedValueType {\n typ: pos_struct.clone(),\n nullable: false,\n attrs: Default::default(),\n },\n ));\n schema_builder.add_field(FieldSchema::new(\n \"end\",\n schema::EnrichedValueType {\n typ: pos_struct,\n nullable: false,\n attrs: Default::default(),\n },\n ));\n let output_schema = make_output_type(TableSchema::new(TableKind::KTable, struct_schema))\n .with_attr(\n field_attrs::CHUNK_BASE_TEXT,\n serde_json::to_value(args_resolver.get_analyze_value(&args.text))?,\n );\n Ok((args, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(args, spec)?))\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory.register(registry)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n // Helper function to assert chunk text and its consistency with the range within the original text.\n fn assert_chunk_text_consistency(\n full_text: &str, // Added full text\n actual_chunk: &ChunkOutput<'_>,\n expected_text: &str,\n context: &str,\n ) {\n // Extract text using the chunk's range from the original full text.\n let extracted_text = full_text\n .get(actual_chunk.start_pos.byte_offset..actual_chunk.end_pos.byte_offset)\n .unwrap();\n // Assert that the expected text matches the text provided in the chunk.\n assert_eq!(\n actual_chunk.text, expected_text,\n \"Provided chunk text mismatch - {context}\"\n );\n // Assert that the expected text also matches the text extracted using the chunk's range.\n assert_eq!(\n extracted_text, expected_text,\n \"Range inconsistency: extracted text mismatch - {context}\"\n );\n }\n\n // Creates a default RecursiveChunker for testing, assuming no language-specific parsing.\n fn create_test_chunker(\n text: &str,\n chunk_size: usize,\n min_chunk_size: usize,\n chunk_overlap: usize,\n ) -> RecursiveChunker {\n RecursiveChunker {\n full_text: text,\n chunk_size,\n chunk_overlap,\n min_chunk_size,\n }\n }\n\n #[tokio::test]\n async fn test_split_recursively() {\n let spec = Spec {\n custom_languages: vec![],\n };\n let factory = Arc::new(Factory);\n let text_content = \"Linea 1.\\nLinea 2.\\n\\nLinea 3.\";\n\n let input_args_values = vec![\n text_content.to_string().into(),\n (15i64).into(),\n (5i64).into(),\n (0i64).into(),\n Value::Null,\n ];\n\n let input_arg_schemas = vec![\n build_arg_schema(\"text\", BasicValueType::Str),\n build_arg_schema(\"chunk_size\", BasicValueType::Int64),\n build_arg_schema(\"min_chunk_size\", BasicValueType::Int64),\n build_arg_schema(\"chunk_overlap\", BasicValueType::Int64),\n build_arg_schema(\"language\", BasicValueType::Str),\n ];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed: {:?}\",\n result.err()\n );\n let value = result.unwrap();\n\n match value {\n Value::KTable(table) => {\n let expected_chunks = vec![\n (RangeValue::new(0, 8), \"Linea 1.\"),\n (RangeValue::new(9, 17), \"Linea 2.\"),\n (RangeValue::new(19, 27), \"Linea 3.\"),\n ];\n\n for (range, expected_text) in expected_chunks {\n let key: KeyValue = range.into();\n match table.get(&key) {\n Some(scope_value_ref) => {\n let chunk_text =\n scope_value_ref.0.fields[0].as_str().unwrap_or_else(|_| {\n panic!(\"Chunk text not a string for key {key:?}\")\n });\n assert_eq!(**chunk_text, *expected_text);\n }\n None => panic!(\"Expected row value for key {key:?}, not found\"),\n }\n }\n }\n other => panic!(\"Expected Value::KTable, got {other:?}\"),\n }\n }\n\n #[test]\n fn test_translate_bytes_to_chars_simple() {\n let text = \"abc😄def\";\n let mut start1 = Position::new(0);\n let mut end1 = Position::new(3);\n let mut start2 = Position::new(3);\n let mut end2 = Position::new(7);\n let mut start3 = Position::new(7);\n let mut end3 = Position::new(10);\n let mut end_full = Position::new(text.len());\n\n let offsets = vec![\n &mut start1,\n &mut end1,\n &mut start2,\n &mut end2,\n &mut start3,\n &mut end3,\n &mut end_full,\n ];\n\n set_output_positions(text, offsets.into_iter());\n\n assert_eq!(\n start1.output,\n Some(OutputPosition {\n char_offset: 0,\n line: 1,\n column: 1,\n })\n );\n assert_eq!(\n end1.output,\n Some(OutputPosition {\n char_offset: 3,\n line: 1,\n column: 4,\n })\n );\n assert_eq!(\n start2.output,\n Some(OutputPosition {\n char_offset: 3,\n line: 1,\n column: 4,\n })\n );\n assert_eq!(\n end2.output,\n Some(OutputPosition {\n char_offset: 4,\n line: 1,\n column: 5,\n })\n );\n assert_eq!(\n end3.output,\n Some(OutputPosition {\n char_offset: 7,\n line: 1,\n column: 8,\n })\n );\n assert_eq!(\n end_full.output,\n Some(OutputPosition {\n char_offset: 7,\n line: 1,\n column: 8,\n })\n );\n }\n\n #[test]\n fn test_basic_split_no_overlap() {\n let text = \"Linea 1.\\nLinea 2.\\n\\nLinea 3.\";\n let chunker = create_test_chunker(text, 15, 5, 0);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert_eq!(chunks.len(), 3);\n assert_chunk_text_consistency(text, &chunks[0], \"Linea 1.\", \"Test 1, Chunk 0\");\n assert_chunk_text_consistency(text, &chunks[1], \"Linea 2.\", \"Test 1, Chunk 1\");\n assert_chunk_text_consistency(text, &chunks[2], \"Linea 3.\", \"Test 1, Chunk 2\");\n\n // Test splitting when chunk_size forces breaks within segments.\n let text2 = \"A very very long text that needs to be split.\";\n let chunker2 = create_test_chunker(text2, 20, 12, 0);\n let result2 = chunker2.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result2.is_ok());\n let chunks2 = result2.unwrap();\n\n // Expect multiple chunks, likely split by spaces due to chunk_size.\n assert!(chunks2.len() > 1);\n assert_chunk_text_consistency(text2, &chunks2[0], \"A very very long\", \"Test 2, Chunk 0\");\n assert!(chunks2[0].text.len() <= 20);\n }\n\n #[test]\n fn test_basic_split_with_overlap() {\n let text = \"This is a test text that is a bit longer to see how the overlap works.\";\n let chunker = create_test_chunker(text, 20, 10, 5);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert!(chunks.len() > 1);\n\n if chunks.len() >= 2 {\n assert!(chunks[0].text.len() <= 25);\n }\n }\n\n #[test]\n fn test_split_trims_whitespace() {\n let text = \" \\n First chunk. \\n\\n Second chunk with spaces at the end. \\n\";\n let chunker = create_test_chunker(text, 30, 10, 0);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert_eq!(chunks.len(), 3);\n\n assert_chunk_text_consistency(\n text,\n &chunks[0],\n \" First chunk.\",\n \"Whitespace Test, Chunk 0\",\n );\n assert_chunk_text_consistency(\n text,\n &chunks[1],\n \" Second chunk with spaces\",\n \"Whitespace Test, Chunk 1\",\n );\n assert_chunk_text_consistency(text, &chunks[2], \"at the end.\", \"Whitespace Test, Chunk 2\");\n }\n}\n"], ["/cocoindex/src/ops/py_factory.rs", "use crate::prelude::*;\n\nuse pyo3::{\n IntoPyObjectExt, Py, PyAny, Python, pyclass, pymethods,\n types::{IntoPyDict, PyList, PyString, PyTuple},\n};\nuse pythonize::{depythonize, pythonize};\n\nuse crate::{\n base::{schema, value},\n builder::plan,\n ops::sdk::SetupStateCompatibility,\n py::{self, ToResultWithPyTrace},\n};\nuse anyhow::{Result, anyhow};\n\n#[pyclass(name = \"OpArgSchema\")]\npub struct PyOpArgSchema {\n value_type: crate::py::Pythonized,\n analyzed_value: crate::py::Pythonized,\n}\n\n#[pymethods]\nimpl PyOpArgSchema {\n #[getter]\n fn value_type(&self) -> &crate::py::Pythonized {\n &self.value_type\n }\n\n #[getter]\n fn analyzed_value(&self) -> &crate::py::Pythonized {\n &self.analyzed_value\n }\n}\n\nstruct PyFunctionExecutor {\n py_function_executor: Py,\n py_exec_ctx: Arc,\n\n num_positional_args: usize,\n kw_args_names: Vec>,\n result_type: schema::EnrichedValueType,\n\n enable_cache: bool,\n behavior_version: Option,\n}\n\nimpl PyFunctionExecutor {\n fn call_py_fn<'py>(\n &self,\n py: Python<'py>,\n input: Vec,\n ) -> Result> {\n let mut args = Vec::with_capacity(self.num_positional_args);\n for v in input[0..self.num_positional_args].iter() {\n args.push(py::value_to_py_object(py, v)?);\n }\n\n let kwargs = if self.kw_args_names.is_empty() {\n None\n } else {\n let mut kwargs = Vec::with_capacity(self.kw_args_names.len());\n for (name, v) in self\n .kw_args_names\n .iter()\n .zip(input[self.num_positional_args..].iter())\n {\n kwargs.push((name.bind(py), py::value_to_py_object(py, v)?));\n }\n Some(kwargs)\n };\n\n let result = self\n .py_function_executor\n .call(\n py,\n PyTuple::new(py, args.into_iter())?,\n kwargs\n .map(|kwargs| -> Result<_> { Ok(kwargs.into_py_dict(py)?) })\n .transpose()?\n .as_ref(),\n )\n .to_result_with_py_trace(py)?;\n Ok(result.into_bound(py))\n }\n}\n\n#[async_trait]\nimpl interface::SimpleFunctionExecutor for Arc {\n async fn evaluate(&self, input: Vec) -> Result {\n let self = self.clone();\n let result_fut = Python::with_gil(|py| -> Result<_> {\n let result_coro = self.call_py_fn(py, input)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(self.py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro,\n )?)\n })?;\n let result = result_fut.await;\n Python::with_gil(|py| -> Result<_> {\n let result = result.to_result_with_py_trace(py)?;\n Ok(py::value_from_py_object(\n &self.result_type.typ,\n &result.into_bound(py),\n )?)\n })\n }\n\n fn enable_cache(&self) -> bool {\n self.enable_cache\n }\n\n fn behavior_version(&self) -> Option {\n self.behavior_version\n }\n}\n\npub(crate) struct PyFunctionFactory {\n pub py_function_factory: Py,\n}\n\n#[async_trait]\nimpl interface::SimpleFunctionFactory for PyFunctionFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n schema::EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let (result_type, executor, kw_args_names, num_positional_args) =\n Python::with_gil(|py| -> anyhow::Result<_> {\n let mut args = vec![pythonize(py, &spec)?];\n let mut kwargs = vec![];\n let mut num_positional_args = 0;\n for arg in input_schema.into_iter() {\n let py_arg_schema = PyOpArgSchema {\n value_type: crate::py::Pythonized(arg.value_type.clone()),\n analyzed_value: crate::py::Pythonized(arg.analyzed_value.clone()),\n };\n match arg.name.0 {\n Some(name) => {\n kwargs.push((name.clone(), py_arg_schema));\n }\n None => {\n args.push(py_arg_schema.into_bound_py_any(py)?);\n num_positional_args += 1;\n }\n }\n }\n\n let kw_args_names = kwargs\n .iter()\n .map(|(name, _)| PyString::new(py, name).unbind())\n .collect::>();\n let result = self\n .py_function_factory\n .call(\n py,\n PyTuple::new(py, args.into_iter())?,\n Some(&kwargs.into_py_dict(py)?),\n )\n .to_result_with_py_trace(py)?;\n let (result_type, executor) = result\n .extract::<(crate::py::Pythonized, Py)>(py)?;\n Ok((\n result_type.into_inner(),\n executor,\n kw_args_names,\n num_positional_args,\n ))\n })?;\n\n let executor_fut = {\n let result_type = result_type.clone();\n async move {\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n let (prepare_fut, enable_cache, behavior_version) =\n Python::with_gil(|py| -> anyhow::Result<_> {\n let prepare_coro = executor\n .call_method(py, \"prepare\", (), None)\n .to_result_with_py_trace(py)?;\n let prepare_fut = pyo3_async_runtimes::into_future_with_locals(\n &pyo3_async_runtimes::TaskLocals::new(\n py_exec_ctx.event_loop.bind(py).clone(),\n ),\n prepare_coro.into_bound(py),\n )?;\n let enable_cache = executor\n .call_method(py, \"enable_cache\", (), None)\n .to_result_with_py_trace(py)?\n .extract::(py)?;\n let behavior_version = executor\n .call_method(py, \"behavior_version\", (), None)\n .to_result_with_py_trace(py)?\n .extract::>(py)?;\n Ok((prepare_fut, enable_cache, behavior_version))\n })?;\n prepare_fut.await?;\n Ok(Box::new(Arc::new(PyFunctionExecutor {\n py_function_executor: executor,\n py_exec_ctx,\n num_positional_args,\n kw_args_names,\n result_type,\n enable_cache,\n behavior_version,\n }))\n as Box)\n }\n };\n\n Ok((result_type, executor_fut.boxed()))\n }\n}\n\npub(crate) struct PyExportTargetFactory {\n pub py_target_connector: Py,\n}\n\nstruct PyTargetExecutorContext {\n py_export_ctx: Py,\n py_exec_ctx: Arc,\n}\n\n#[derive(Debug)]\nstruct PyTargetResourceSetupStatus {\n stale_existing_states: IndexSet>,\n desired_state: Option,\n}\n\nimpl setup::ResourceSetupStatus for PyTargetResourceSetupStatus {\n fn describe_changes(&self) -> Vec {\n vec![]\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n if self.stale_existing_states.is_empty() {\n setup::SetupChangeType::NoChange\n } else if self.desired_state.is_some() {\n if self\n .stale_existing_states\n .iter()\n .any(|state| state.is_none())\n {\n setup::SetupChangeType::Create\n } else {\n setup::SetupChangeType::Update\n }\n } else {\n setup::SetupChangeType::Delete\n }\n }\n}\n\n#[async_trait]\nimpl interface::ExportTargetFactory for PyExportTargetFactory {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )> {\n if declarations.len() != 0 {\n api_error!(\"Custom target connector doesn't support declarations yet\");\n }\n\n let mut build_outputs = Vec::with_capacity(data_collections.len());\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n for data_collection in data_collections.into_iter() {\n let (py_export_ctx, persistent_key) =\n Python::with_gil(|py| -> Result<(Py, serde_json::Value)> {\n // Deserialize the spec to Python object.\n let py_export_ctx = self\n .py_target_connector\n .call_method(\n py,\n \"create_export_context\",\n (\n &data_collection.name,\n pythonize(py, &data_collection.spec)?,\n pythonize(py, &data_collection.key_fields_schema)?,\n pythonize(py, &data_collection.value_fields_schema)?,\n ),\n None,\n )\n .to_result_with_py_trace(py)?;\n\n // Call the `get_persistent_key` method to get the persistent key.\n let persistent_key = self\n .py_target_connector\n .call_method(py, \"get_persistent_key\", (&py_export_ctx,), None)\n .to_result_with_py_trace(py)?;\n let persistent_key = depythonize(&persistent_key.into_bound(py))?;\n Ok((py_export_ctx, persistent_key))\n })?;\n\n let py_exec_ctx = py_exec_ctx.clone();\n let build_output = interface::ExportDataCollectionBuildOutput {\n export_context: Box::pin(async move {\n Ok(Arc::new(PyTargetExecutorContext {\n py_export_ctx,\n py_exec_ctx,\n }) as Arc)\n }),\n setup_key: persistent_key,\n desired_setup_state: data_collection.spec,\n };\n build_outputs.push(build_output);\n }\n Ok((build_outputs, vec![]))\n }\n\n async fn check_setup_status(\n &self,\n _key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n _context: Arc,\n ) -> Result> {\n // Collect all possible existing states that are not the desired state.\n let mut stale_existing_states = IndexSet::new();\n if !existing_states.always_exists() && desired_state.is_some() {\n stale_existing_states.insert(None);\n }\n for possible_state in existing_states.possible_versions() {\n if Some(possible_state) != desired_state.as_ref() {\n stale_existing_states.insert(Some(possible_state.clone()));\n }\n }\n\n Ok(Box::new(PyTargetResourceSetupStatus {\n stale_existing_states,\n desired_state,\n }))\n }\n\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result {\n Ok(key.clone())\n }\n\n fn check_state_compatibility(\n &self,\n _desired_state: &serde_json::Value,\n _existing_state: &serde_json::Value,\n ) -> Result {\n // The Python target connector doesn't support state update yet.\n Ok(SetupStateCompatibility::Compatible)\n }\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result {\n Python::with_gil(|py| -> Result {\n let result = self\n .py_target_connector\n .call_method(py, \"describe_resource\", (pythonize(py, key)?,), None)\n .to_result_with_py_trace(py)?;\n let description = result.extract::(py)?;\n Ok(description)\n })\n }\n\n fn extract_additional_key(\n &self,\n _key: &value::KeyValue,\n _value: &value::FieldValues,\n _export_context: &(dyn Any + Send + Sync),\n ) -> Result {\n Ok(serde_json::Value::Null)\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n // Filter the setup changes that are not NoChange, and flatten to\n // `list[tuple[key, list[stale_existing_states | None], desired_state | None]]` for Python.\n let mut setup_changes = Vec::new();\n for item in setup_status.into_iter() {\n let decoded_setup_status = (item.setup_status as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?;\n if ::change_type(decoded_setup_status)\n != setup::SetupChangeType::NoChange\n {\n setup_changes.push((\n item.key,\n &decoded_setup_status.stale_existing_states,\n &decoded_setup_status.desired_state,\n ));\n }\n }\n\n if setup_changes.is_empty() {\n return Ok(());\n }\n\n // Call the `apply_setup_changes_async()` method.\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n let py_result = Python::with_gil(move |py| -> Result<_> {\n let result_coro = self\n .py_target_connector\n .call_method(\n py,\n \"apply_setup_changes_async\",\n (pythonize(py, &setup_changes)?,),\n None,\n )\n .to_result_with_py_trace(py)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro.into_bound(py),\n )?)\n })?\n .await;\n Python::with_gil(move |py| py_result.to_result_with_py_trace(py))?;\n\n Ok(())\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec<\n interface::ExportTargetMutationWithContext<'async_trait, dyn Any + Send + Sync>,\n >,\n ) -> Result<()> {\n if mutations.is_empty() {\n return Ok(());\n }\n\n let py_result = Python::with_gil(|py| -> Result<_> {\n // Create a `list[tuple[export_ctx, list[tuple[key, value | None]]]]` for Python, and collect `py_exec_ctx`.\n let mut py_args = Vec::with_capacity(mutations.len());\n let mut py_exec_ctx: Option<&Arc> = None;\n for mutation in mutations.into_iter() {\n // Downcast export_context to PyTargetExecutorContext.\n let export_context = (mutation.export_context as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?;\n\n let mut flattened_mutations = Vec::with_capacity(\n mutation.mutation.upserts.len() + mutation.mutation.deletes.len(),\n );\n for upsert in mutation.mutation.upserts.into_iter() {\n flattened_mutations.push((\n py::value_to_py_object(py, &upsert.key.into())?,\n py::field_values_to_py_object(py, upsert.value.fields.iter())?,\n ));\n }\n for delete in mutation.mutation.deletes.into_iter() {\n flattened_mutations.push((\n py::value_to_py_object(py, &delete.key.into())?,\n py.None().into_bound(py),\n ));\n }\n py_args.push((\n &export_context.py_export_ctx,\n PyList::new(py, flattened_mutations)?.into_any(),\n ));\n py_exec_ctx = py_exec_ctx.or(Some(&export_context.py_exec_ctx));\n }\n let py_exec_ctx = py_exec_ctx.ok_or_else(invariance_violation)?;\n\n let result_coro = self\n .py_target_connector\n .call_method(py, \"mutate_async\", (py_args,), None)\n .to_result_with_py_trace(py)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro.into_bound(py),\n )?)\n })?\n .await;\n\n Python::with_gil(move |py| py_result.to_result_with_py_trace(py))?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/builder/flow_builder.rs", "use crate::{prelude::*, py::Pythonized};\n\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pyo3_async_runtimes::tokio::future_into_py;\nuse std::{collections::btree_map, ops::Deref};\nuse tokio::task::LocalSet;\n\nuse super::analyzer::{\n AnalyzerContext, CollectorBuilder, DataScopeBuilder, OpScope, build_flow_instance_context,\n};\nuse crate::{\n base::{\n schema::{CollectorSchema, FieldSchema},\n spec::{FieldName, NamedSpec},\n },\n lib_context::LibContext,\n ops::interface::FlowInstanceContext,\n py::IntoPyResult,\n};\nuse crate::{lib_context::FlowContext, py};\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct OpScopeRef(Arc);\n\nimpl From> for OpScopeRef {\n fn from(scope: Arc) -> Self {\n Self(scope)\n }\n}\n\nimpl Deref for OpScopeRef {\n type Target = Arc;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl std::fmt::Display for OpScopeRef {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.0)\n }\n}\n\n#[pymethods]\nimpl OpScopeRef {\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn add_collector(&mut self, name: String) -> PyResult {\n let collector = DataCollector {\n name,\n scope: self.0.clone(),\n collector: Mutex::new(None),\n };\n Ok(collector)\n }\n}\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct DataType {\n schema: schema::EnrichedValueType,\n}\n\nimpl From for DataType {\n fn from(schema: schema::EnrichedValueType) -> Self {\n Self { schema }\n }\n}\n\n#[pymethods]\nimpl DataType {\n pub fn __str__(&self) -> String {\n format!(\"{}\", self.schema)\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn schema(&self) -> Pythonized {\n Pythonized(self.schema.clone())\n }\n}\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct DataSlice {\n scope: Arc,\n value: Arc,\n data_type: DataType,\n}\n\n#[pymethods]\nimpl DataSlice {\n pub fn data_type(&self) -> DataType {\n self.data_type.clone()\n }\n\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn field(&self, field_name: &str) -> PyResult> {\n let field_schema = match &self.data_type.schema.typ {\n schema::ValueType::Struct(struct_type) => {\n match struct_type.fields.iter().find(|f| f.name == field_name) {\n Some(field) => field,\n None => return Ok(None),\n }\n }\n _ => return Err(PyException::new_err(\"expect struct type\")),\n };\n let value_mapping = match self.value.as_ref() {\n spec::ValueMapping::Field(spec::FieldMapping {\n scope,\n field_path: spec::FieldPath(field_path),\n }) => spec::ValueMapping::Field(spec::FieldMapping {\n scope: scope.clone(),\n field_path: spec::FieldPath(\n field_path\n .iter()\n .cloned()\n .chain([field_name.to_string()])\n .collect(),\n ),\n }),\n\n spec::ValueMapping::Struct(v) => v\n .fields\n .iter()\n .find(|f| f.name == field_name)\n .map(|f| f.spec.clone())\n .ok_or_else(|| PyException::new_err(format!(\"field {field_name} not found\")))?,\n\n spec::ValueMapping::Constant { .. } => {\n return Err(PyException::new_err(\n \"field access not supported for literal\",\n ));\n }\n };\n Ok(Some(DataSlice {\n scope: self.scope.clone(),\n value: Arc::new(value_mapping),\n data_type: field_schema.value_type.clone().into(),\n }))\n }\n}\n\nimpl DataSlice {\n fn extract_value_mapping(&self) -> spec::ValueMapping {\n match self.value.as_ref() {\n spec::ValueMapping::Field(v) => spec::ValueMapping::Field(spec::FieldMapping {\n field_path: v.field_path.clone(),\n scope: v.scope.clone().or_else(|| Some(self.scope.name.clone())),\n }),\n v => v.clone(),\n }\n }\n}\n\nimpl std::fmt::Display for DataSlice {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(\n f,\n \"DataSlice({}; {} {}) \",\n self.data_type.schema, self.scope, self.value\n )?;\n Ok(())\n }\n}\n\n#[pyclass]\npub struct DataCollector {\n name: String,\n scope: Arc,\n collector: Mutex>,\n}\n\n#[pymethods]\nimpl DataCollector {\n fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\nimpl std::fmt::Display for DataCollector {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let collector = self.collector.lock().unwrap();\n write!(f, \"DataCollector \\\"{}\\\" ({}\", self.name, self.scope)?;\n if let Some(collector) = collector.as_ref() {\n write!(f, \": {}\", collector.schema)?;\n if collector.is_used {\n write!(f, \" (used)\")?;\n }\n }\n write!(f, \")\")?;\n Ok(())\n }\n}\n\n#[pyclass]\npub struct FlowBuilder {\n lib_context: Arc,\n flow_inst_context: Arc,\n\n root_op_scope: Arc,\n flow_instance_name: String,\n reactive_ops: Vec>,\n\n direct_input_fields: Vec,\n direct_output_value: Option,\n\n import_ops: Vec>,\n export_ops: Vec>,\n\n declarations: Vec,\n\n next_generated_op_id: usize,\n}\n\n#[pymethods]\nimpl FlowBuilder {\n #[new]\n pub fn new(name: &str) -> PyResult {\n let lib_context = get_lib_context().into_py_result()?;\n let root_op_scope = OpScope::new(\n spec::ROOT_SCOPE_NAME.to_string(),\n None,\n Arc::new(Mutex::new(DataScopeBuilder::new())),\n );\n let flow_inst_context = build_flow_instance_context(name, None);\n let result = Self {\n lib_context,\n flow_inst_context,\n root_op_scope,\n flow_instance_name: name.to_string(),\n\n reactive_ops: vec![],\n\n import_ops: vec![],\n export_ops: vec![],\n\n direct_input_fields: vec![],\n direct_output_value: None,\n\n declarations: vec![],\n\n next_generated_op_id: 0,\n };\n Ok(result)\n }\n\n pub fn root_scope(&self) -> OpScopeRef {\n OpScopeRef(self.root_op_scope.clone())\n }\n\n #[pyo3(signature = (kind, op_spec, target_scope, name, refresh_options=None, execution_options=None))]\n #[allow(clippy::too_many_arguments)]\n pub fn add_source(\n &mut self,\n py: Python<'_>,\n kind: String,\n op_spec: py::Pythonized>,\n target_scope: Option,\n name: String,\n refresh_options: Option>,\n execution_options: Option>,\n ) -> PyResult {\n if let Some(target_scope) = target_scope {\n if *target_scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"source can only be added to the root scope\",\n ));\n }\n }\n let import_op = spec::NamedSpec {\n name,\n spec: spec::ImportOpSpec {\n source: spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n },\n refresh_options: refresh_options.map(|o| o.into_inner()).unwrap_or_default(),\n execution_options: execution_options\n .map(|o| o.into_inner())\n .unwrap_or_default(),\n },\n };\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(\n analyzer_ctx.analyze_import_op(&self.root_op_scope, import_op.clone()),\n )\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n let result = Self::last_field_to_data_slice(&self.root_op_scope).into_py_result()?;\n self.import_ops.push(import_op);\n Ok(result)\n }\n\n pub fn constant(\n &self,\n value_type: py::Pythonized,\n value: Bound<'_, PyAny>,\n ) -> PyResult {\n let schema = value_type.into_inner();\n let value = py::value_from_py_object(&schema.typ, &value)?;\n let slice = DataSlice {\n scope: self.root_op_scope.clone(),\n value: Arc::new(spec::ValueMapping::Constant(spec::ConstantMapping {\n schema: schema.clone(),\n value: serde_json::to_value(value).into_py_result()?,\n })),\n data_type: schema.into(),\n };\n Ok(slice)\n }\n\n pub fn add_direct_input(\n &mut self,\n name: String,\n value_type: py::Pythonized,\n ) -> PyResult {\n let value_type = value_type.into_inner();\n {\n let mut root_data_scope = self.root_op_scope.data.lock().unwrap();\n root_data_scope\n .add_field(name.clone(), &value_type)\n .into_py_result()?;\n }\n let result = Self::last_field_to_data_slice(&self.root_op_scope).into_py_result()?;\n self.direct_input_fields\n .push(FieldSchema { name, value_type });\n Ok(result)\n }\n\n pub fn set_direct_output(&mut self, data_slice: DataSlice) -> PyResult<()> {\n if data_slice.scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"direct output must be value in the root scope\",\n ));\n }\n self.direct_output_value = Some(data_slice.extract_value_mapping());\n Ok(())\n }\n\n #[pyo3(signature = (data_slice, execution_options=None))]\n pub fn for_each(\n &mut self,\n data_slice: DataSlice,\n execution_options: Option>,\n ) -> PyResult {\n let parent_scope = &data_slice.scope;\n let field_path = match data_slice.value.as_ref() {\n spec::ValueMapping::Field(v) => &v.field_path,\n _ => return Err(PyException::new_err(\"expect field path\")),\n };\n let num_parent_layers = parent_scope.ancestors().count();\n let scope_name = format!(\n \"{}_{}\",\n field_path.last().map_or(\"\", |s| s.as_str()),\n num_parent_layers\n );\n let (_, child_op_scope) = parent_scope\n .new_foreach_op_scope(scope_name.clone(), field_path)\n .into_py_result()?;\n\n let reactive_op = spec::NamedSpec {\n name: format!(\".for_each.{}\", self.next_generated_op_id),\n spec: spec::ReactiveOpSpec::ForEach(spec::ForEachOpSpec {\n field_path: field_path.clone(),\n op_scope: spec::ReactiveOpScope {\n name: scope_name,\n ops: vec![],\n },\n execution_options: execution_options\n .map(|o| o.into_inner())\n .unwrap_or_default(),\n }),\n };\n self.next_generated_op_id += 1;\n self.get_mut_reactive_ops(parent_scope)\n .into_py_result()?\n .push(reactive_op);\n\n Ok(OpScopeRef(child_op_scope))\n }\n\n #[pyo3(signature = (kind, op_spec, args, target_scope, name))]\n pub fn transform(\n &mut self,\n py: Python<'_>,\n kind: String,\n op_spec: py::Pythonized>,\n args: Vec<(DataSlice, Option)>,\n target_scope: Option,\n name: String,\n ) -> PyResult {\n let spec = spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n };\n let op_scope = Self::minimum_common_scope(\n args.iter().map(|(ds, _)| &ds.scope),\n target_scope.as_ref().map(|s| &s.0),\n )\n .into_py_result()?;\n\n let reactive_op = spec::NamedSpec {\n name,\n spec: spec::ReactiveOpSpec::Transform(spec::TransformOpSpec {\n inputs: args\n .iter()\n .map(|(ds, arg_name)| spec::OpArgBinding {\n arg_name: spec::OpArgName(arg_name.clone()),\n value: ds.extract_value_mapping(),\n })\n .collect(),\n op: spec,\n }),\n };\n\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(analyzer_ctx.analyze_reactive_op(op_scope, &reactive_op))\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n self.get_mut_reactive_ops(op_scope)\n .into_py_result()?\n .push(reactive_op);\n\n let result = Self::last_field_to_data_slice(op_scope).into_py_result()?;\n Ok(result)\n }\n\n #[pyo3(signature = (collector, fields, auto_uuid_field=None))]\n pub fn collect(\n &mut self,\n py: Python<'_>,\n collector: &DataCollector,\n fields: Vec<(FieldName, DataSlice)>,\n auto_uuid_field: Option,\n ) -> PyResult<()> {\n let common_scope = Self::minimum_common_scope(fields.iter().map(|(_, ds)| &ds.scope), None)\n .into_py_result()?;\n let name = format!(\".collect.{}\", self.next_generated_op_id);\n self.next_generated_op_id += 1;\n\n let reactive_op = spec::NamedSpec {\n name,\n spec: spec::ReactiveOpSpec::Collect(spec::CollectOpSpec {\n input: spec::StructMapping {\n fields: fields\n .iter()\n .map(|(name, ds)| NamedSpec {\n name: name.clone(),\n spec: ds.extract_value_mapping(),\n })\n .collect(),\n },\n scope_name: collector.scope.name.clone(),\n collector_name: collector.name.clone(),\n auto_uuid_field: auto_uuid_field.clone(),\n }),\n };\n\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(analyzer_ctx.analyze_reactive_op(common_scope, &reactive_op))\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n self.get_mut_reactive_ops(common_scope)\n .into_py_result()?\n .push(reactive_op);\n\n let collector_schema = CollectorSchema::from_fields(\n fields\n .into_iter()\n .map(|(name, ds)| FieldSchema {\n name,\n value_type: ds.data_type.schema,\n })\n .collect(),\n auto_uuid_field,\n );\n {\n let mut collector = collector.collector.lock().unwrap();\n if let Some(collector) = collector.as_mut() {\n collector.merge_schema(&collector_schema).into_py_result()?;\n } else {\n *collector = Some(CollectorBuilder::new(Arc::new(collector_schema)));\n }\n }\n\n Ok(())\n }\n\n #[pyo3(signature = (name, kind, op_spec, index_options, input, setup_by_user=false))]\n pub fn export(\n &mut self,\n name: String,\n kind: String,\n op_spec: py::Pythonized>,\n index_options: py::Pythonized,\n input: &DataCollector,\n setup_by_user: bool,\n ) -> PyResult<()> {\n let spec = spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n };\n\n if input.scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"Export can only work on collectors belonging to the root scope.\",\n ));\n }\n self.export_ops.push(spec::NamedSpec {\n name,\n spec: spec::ExportOpSpec {\n collector_name: input.name.clone(),\n target: spec,\n index_options: index_options.into_inner(),\n setup_by_user,\n },\n });\n Ok(())\n }\n\n pub fn declare(&mut self, op_spec: py::Pythonized) -> PyResult<()> {\n self.declarations.push(op_spec.into_inner());\n Ok(())\n }\n\n pub fn scope_field(&self, scope: OpScopeRef, field_name: &str) -> PyResult> {\n let field_type = {\n let scope_builder = scope.0.data.lock().unwrap();\n let (_, field_schema) = scope_builder\n .data\n .find_field(field_name)\n .ok_or_else(|| PyException::new_err(format!(\"field {field_name} not found\")))?;\n schema::EnrichedValueType::from_alternative(&field_schema.value_type)\n .into_py_result()?\n };\n Ok(Some(DataSlice {\n scope: scope.0,\n value: Arc::new(spec::ValueMapping::Field(spec::FieldMapping {\n scope: None,\n field_path: spec::FieldPath(vec![field_name.to_string()]),\n })),\n data_type: DataType { schema: field_type },\n }))\n }\n\n pub fn build_flow(&self, py: Python<'_>, py_event_loop: Py) -> PyResult {\n let spec = spec::FlowInstanceSpec {\n name: self.flow_instance_name.clone(),\n import_ops: self.import_ops.clone(),\n reactive_ops: self.reactive_ops.clone(),\n export_ops: self.export_ops.clone(),\n declarations: self.declarations.clone(),\n };\n let flow_instance_ctx = build_flow_instance_context(\n &self.flow_instance_name,\n Some(crate::py::PythonExecutionContext::new(py, py_event_loop)),\n );\n let flow_ctx = py\n .allow_threads(|| {\n get_runtime().block_on(async move {\n let analyzed_flow =\n super::AnalyzedFlow::from_flow_instance(spec, flow_instance_ctx).await?;\n let persistence_ctx = self.lib_context.require_persistence_ctx()?;\n let execution_ctx = {\n let flow_setup_ctx = persistence_ctx.setup_ctx.read().await;\n FlowContext::new(\n Arc::new(analyzed_flow),\n flow_setup_ctx\n .all_setup_states\n .flows\n .get(&self.flow_instance_name),\n )\n .await?\n };\n anyhow::Ok(execution_ctx)\n })\n })\n .into_py_result()?;\n let mut flow_ctxs = self.lib_context.flows.lock().unwrap();\n let flow_ctx = match flow_ctxs.entry(self.flow_instance_name.clone()) {\n btree_map::Entry::Occupied(_) => {\n return Err(PyException::new_err(format!(\n \"flow instance name already exists: {}\",\n self.flow_instance_name\n )));\n }\n btree_map::Entry::Vacant(entry) => {\n let flow_ctx = Arc::new(flow_ctx);\n entry.insert(flow_ctx.clone());\n flow_ctx\n }\n };\n Ok(py::Flow(flow_ctx))\n }\n\n pub fn build_transient_flow_async<'py>(\n &self,\n py: Python<'py>,\n py_event_loop: Py,\n ) -> PyResult> {\n if self.direct_input_fields.is_empty() {\n return Err(PyException::new_err(\"expect at least one direct input\"));\n }\n let direct_output_value = if let Some(direct_output_value) = &self.direct_output_value {\n direct_output_value\n } else {\n return Err(PyException::new_err(\"expect direct output\"));\n };\n let spec = spec::TransientFlowSpec {\n name: self.flow_instance_name.clone(),\n input_fields: self.direct_input_fields.clone(),\n reactive_ops: self.reactive_ops.clone(),\n output_value: direct_output_value.clone(),\n };\n let py_ctx = crate::py::PythonExecutionContext::new(py, py_event_loop);\n\n let analyzed_flow = get_runtime().spawn_blocking(|| {\n let local_set = LocalSet::new();\n local_set.block_on(\n get_runtime(),\n super::AnalyzedTransientFlow::from_transient_flow(spec, Some(py_ctx)),\n )\n });\n future_into_py(py, async move {\n Ok(py::TransientFlow(Arc::new(\n analyzed_flow.await.into_py_result()?.into_py_result()?,\n )))\n })\n }\n\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\nimpl std::fmt::Display for FlowBuilder {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Flow instance name: {}\\n\\n\", self.flow_instance_name)?;\n for op in self.import_ops.iter() {\n write!(\n f,\n \"Source op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n for field in self.direct_input_fields.iter() {\n writeln!(f, \"Direct input {}: {}\", field.name, field.value_type)?;\n }\n if !self.direct_input_fields.is_empty() {\n writeln!(f)?;\n }\n for op in self.reactive_ops.iter() {\n write!(\n f,\n \"Reactive op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n for op in self.export_ops.iter() {\n write!(\n f,\n \"Export op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n if let Some(output) = &self.direct_output_value {\n write!(f, \"Direct output: {output}\\n\\n\")?;\n }\n Ok(())\n }\n}\n\nimpl FlowBuilder {\n fn last_field_to_data_slice(op_scope: &Arc) -> Result {\n let data_scope = op_scope.data.lock().unwrap();\n let last_field = data_scope.last_field().unwrap();\n let result = DataSlice {\n scope: op_scope.clone(),\n value: Arc::new(spec::ValueMapping::Field(spec::FieldMapping {\n scope: None,\n field_path: spec::FieldPath(vec![last_field.name.clone()]),\n })),\n data_type: schema::EnrichedValueType::from_alternative(&last_field.value_type)?.into(),\n };\n Ok(result)\n }\n\n fn minimum_common_scope<'a>(\n scopes: impl Iterator>,\n target_scope: Option<&'a Arc>,\n ) -> Result<&'a Arc> {\n let mut scope_iter = scopes;\n let mut common_scope = scope_iter\n .next()\n .ok_or_else(|| PyException::new_err(\"expect at least one input\"))?;\n for scope in scope_iter {\n if scope.is_op_scope_descendant(common_scope) {\n common_scope = scope;\n } else if !common_scope.is_op_scope_descendant(scope) {\n api_bail!(\n \"expect all arguments share the common scope, got {} and {} exclusive to each other\",\n common_scope,\n scope\n );\n }\n }\n if let Some(target_scope) = target_scope {\n if !target_scope.is_op_scope_descendant(common_scope) {\n api_bail!(\n \"the field can only be attached to a scope or sub-scope of the input value. Target scope: {}, input scope: {}\",\n target_scope,\n common_scope\n );\n }\n common_scope = target_scope;\n }\n Ok(common_scope)\n }\n\n fn get_mut_reactive_ops<'a>(\n &'a mut self,\n op_scope: &OpScope,\n ) -> Result<&'a mut Vec>> {\n Self::get_mut_reactive_ops_internal(op_scope, &mut self.reactive_ops)\n }\n\n fn get_mut_reactive_ops_internal<'a>(\n op_scope: &OpScope,\n root_reactive_ops: &'a mut Vec>,\n ) -> Result<&'a mut Vec>> {\n let result = match &op_scope.parent {\n None => root_reactive_ops,\n Some((parent_op_scope, field_path)) => {\n let parent_reactive_ops =\n Self::get_mut_reactive_ops_internal(parent_op_scope, root_reactive_ops)?;\n // Reuse the last foreach if matched, otherwise create a new one.\n match parent_reactive_ops.last() {\n Some(spec::NamedSpec {\n spec: spec::ReactiveOpSpec::ForEach(foreach_spec),\n ..\n }) if &foreach_spec.field_path == field_path\n && foreach_spec.op_scope.name == op_scope.name => {}\n\n _ => {\n api_bail!(\"already out of op scope `{}`\", op_scope.name);\n }\n }\n match &mut parent_reactive_ops.last_mut().unwrap().spec {\n spec::ReactiveOpSpec::ForEach(foreach_spec) => &mut foreach_spec.op_scope.ops,\n _ => unreachable!(),\n }\n }\n };\n Ok(result)\n }\n}\n"], ["/cocoindex/src/execution/source_indexer.rs", "use crate::{\n prelude::*,\n service::error::{SharedError, SharedResult, SharedResultExt},\n};\n\nuse futures::future::Ready;\nuse sqlx::PgPool;\nuse std::collections::{HashMap, hash_map};\nuse tokio::{sync::Semaphore, task::JoinSet};\n\nuse super::{\n db_tracking,\n evaluator::SourceRowEvaluationContext,\n row_indexer::{self, SkippedOr, SourceVersion},\n stats,\n};\n\nuse crate::ops::interface;\nstruct SourceRowIndexingState {\n source_version: SourceVersion,\n processing_sem: Arc,\n touched_generation: usize,\n}\n\nimpl Default for SourceRowIndexingState {\n fn default() -> Self {\n Self {\n source_version: SourceVersion::default(),\n processing_sem: Arc::new(Semaphore::new(1)),\n touched_generation: 0,\n }\n }\n}\n\nstruct SourceIndexingState {\n rows: HashMap,\n scan_generation: usize,\n}\n\npub struct SourceIndexingContext {\n flow: Arc,\n source_idx: usize,\n pending_update: Mutex>>>>,\n update_sem: Semaphore,\n state: Mutex,\n setup_execution_ctx: Arc,\n}\n\npub const NO_ACK: Option Ready>> = None;\n\nimpl SourceIndexingContext {\n pub async fn load(\n flow: Arc,\n source_idx: usize,\n setup_execution_ctx: Arc,\n pool: &PgPool,\n ) -> Result {\n let plan = flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[source_idx];\n let mut list_state = db_tracking::ListTrackedSourceKeyMetadataState::new();\n let mut rows = HashMap::new();\n let scan_generation = 0;\n {\n let mut key_metadata_stream = list_state.list(\n setup_execution_ctx.import_ops[source_idx].source_id,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n );\n while let Some(key_metadata) = key_metadata_stream.next().await {\n let key_metadata = key_metadata?;\n let source_key = value::Value::::from_json(\n key_metadata.source_key,\n &import_op.primary_key_type,\n )?\n .into_key()?;\n rows.insert(\n source_key,\n SourceRowIndexingState {\n source_version: SourceVersion::from_stored(\n key_metadata.processed_source_ordinal,\n &key_metadata.process_logic_fingerprint,\n plan.logic_fingerprint,\n ),\n processing_sem: Arc::new(Semaphore::new(1)),\n touched_generation: scan_generation,\n },\n );\n }\n }\n Ok(Self {\n flow,\n source_idx,\n state: Mutex::new(SourceIndexingState {\n rows,\n scan_generation,\n }),\n pending_update: Mutex::new(None),\n update_sem: Semaphore::new(1),\n setup_execution_ctx,\n })\n }\n\n pub async fn process_source_key<\n AckFut: Future> + Send + 'static,\n AckFn: FnOnce() -> AckFut,\n >(\n self: Arc,\n key: value::KeyValue,\n source_data: Option,\n update_stats: Arc,\n _concur_permit: concur_control::CombinedConcurrencyControllerPermit,\n ack_fn: Option,\n pool: PgPool,\n ) {\n let process = async {\n let plan = self.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[self.source_idx];\n let schema = &self.flow.data_schema;\n let source_data = match source_data {\n Some(source_data) => source_data,\n None => import_op\n .executor\n .get_value(\n &key,\n &interface::SourceExecutorGetOptions {\n include_value: true,\n include_ordinal: true,\n },\n )\n .await?\n .try_into()?,\n };\n\n let source_version = SourceVersion::from_current_data(&source_data);\n let processing_sem = {\n let mut state = self.state.lock().unwrap();\n let touched_generation = state.scan_generation;\n match state.rows.entry(key.clone()) {\n hash_map::Entry::Occupied(mut entry) => {\n if entry\n .get()\n .source_version\n .should_skip(&source_version, Some(update_stats.as_ref()))\n {\n return anyhow::Ok(());\n }\n let sem = entry.get().processing_sem.clone();\n if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n entry.remove();\n } else {\n entry.get_mut().source_version = source_version.clone();\n }\n sem\n }\n hash_map::Entry::Vacant(entry) => {\n if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n update_stats.num_no_change.inc(1);\n return anyhow::Ok(());\n }\n let new_entry = SourceRowIndexingState {\n source_version: source_version.clone(),\n touched_generation,\n ..Default::default()\n };\n let sem = new_entry.processing_sem.clone();\n entry.insert(new_entry);\n sem\n }\n }\n };\n\n let _processing_permit = processing_sem.acquire().await?;\n let result = row_indexer::update_source_row(\n &SourceRowEvaluationContext {\n plan: &plan,\n import_op,\n schema,\n key: &key,\n import_op_idx: self.source_idx,\n },\n &self.setup_execution_ctx,\n source_data.value,\n &source_version,\n &pool,\n &update_stats,\n )\n .await?;\n let target_source_version = if let SkippedOr::Skipped(existing_source_version) = result\n {\n Some(existing_source_version)\n } else if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n Some(source_version)\n } else {\n None\n };\n if let Some(target_source_version) = target_source_version {\n let mut state = self.state.lock().unwrap();\n let scan_generation = state.scan_generation;\n let entry = state.rows.entry(key.clone());\n match entry {\n hash_map::Entry::Occupied(mut entry) => {\n if !entry\n .get()\n .source_version\n .should_skip(&target_source_version, None)\n {\n if target_source_version.kind\n == row_indexer::SourceVersionKind::NonExistence\n {\n entry.remove();\n } else {\n let mut_entry = entry.get_mut();\n mut_entry.source_version = target_source_version;\n mut_entry.touched_generation = scan_generation;\n }\n }\n }\n hash_map::Entry::Vacant(entry) => {\n if target_source_version.kind\n != row_indexer::SourceVersionKind::NonExistence\n {\n entry.insert(SourceRowIndexingState {\n source_version: target_source_version,\n touched_generation: scan_generation,\n ..Default::default()\n });\n }\n }\n }\n }\n anyhow::Ok(())\n };\n let process_and_ack = async {\n process.await?;\n if let Some(ack_fn) = ack_fn {\n ack_fn().await?;\n }\n anyhow::Ok(())\n };\n if let Err(e) = process_and_ack.await {\n update_stats.num_errors.inc(1);\n error!(\n \"{:?}\",\n e.context(format!(\n \"Error in processing row from source `{source}` with key: {key}\",\n source = self.flow.flow_instance.import_ops[self.source_idx].name\n ))\n );\n }\n }\n\n pub async fn update(\n self: &Arc,\n pool: &PgPool,\n update_stats: &Arc,\n ) -> Result<()> {\n let pending_update_fut = {\n let mut pending_update = self.pending_update.lock().unwrap();\n if let Some(pending_update_fut) = &*pending_update {\n pending_update_fut.clone()\n } else {\n let slf = self.clone();\n let pool = pool.clone();\n let update_stats = update_stats.clone();\n let task = tokio::spawn(async move {\n {\n let _permit = slf.update_sem.acquire().await?;\n {\n let mut pending_update = slf.pending_update.lock().unwrap();\n *pending_update = None;\n }\n slf.update_once(&pool, &update_stats).await?;\n }\n anyhow::Ok(())\n });\n let pending_update_fut = async move {\n task.await\n .map_err(SharedError::from)?\n .map_err(SharedError::new)\n }\n .boxed()\n .shared();\n *pending_update = Some(pending_update_fut.clone());\n pending_update_fut\n }\n };\n pending_update_fut.await.std_result()?;\n Ok(())\n }\n\n async fn update_once(\n self: &Arc,\n pool: &PgPool,\n update_stats: &Arc,\n ) -> Result<()> {\n let plan = self.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[self.source_idx];\n let mut rows_stream = import_op\n .executor\n .list(&interface::SourceExecutorListOptions {\n include_ordinal: true,\n });\n let mut join_set = JoinSet::new();\n let scan_generation = {\n let mut state = self.state.lock().unwrap();\n state.scan_generation += 1;\n state.scan_generation\n };\n while let Some(row) = rows_stream.next().await {\n for row in row? {\n let source_version = SourceVersion::from_current_with_ordinal(\n row.ordinal\n .ok_or_else(|| anyhow::anyhow!(\"ordinal is not available\"))?,\n );\n {\n let mut state = self.state.lock().unwrap();\n let scan_generation = state.scan_generation;\n let row_state = state.rows.entry(row.key.clone()).or_default();\n row_state.touched_generation = scan_generation;\n if row_state\n .source_version\n .should_skip(&source_version, Some(update_stats.as_ref()))\n {\n continue;\n }\n }\n let concur_permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n join_set.spawn(self.clone().process_source_key(\n row.key,\n None,\n update_stats.clone(),\n concur_permit,\n NO_ACK,\n pool.clone(),\n ));\n }\n }\n while let Some(result) = join_set.join_next().await {\n if let Err(e) = result {\n if !e.is_cancelled() {\n error!(\"{e:?}\");\n }\n }\n }\n\n let deleted_key_versions = {\n let mut deleted_key_versions = Vec::new();\n let state = self.state.lock().unwrap();\n for (key, row_state) in state.rows.iter() {\n if row_state.touched_generation < scan_generation {\n deleted_key_versions.push((key.clone(), row_state.source_version.ordinal));\n }\n }\n deleted_key_versions\n };\n for (key, source_ordinal) in deleted_key_versions {\n // If the source ordinal is unavailable, call without source ordinal so that another polling will be triggered to avoid out-of-order.\n let source_data = source_ordinal\n .is_available()\n .then(|| interface::SourceData {\n value: interface::SourceValue::NonExistence,\n ordinal: source_ordinal,\n });\n let concur_permit = import_op.concurrency_controller.acquire(Some(|| 0)).await?;\n join_set.spawn(self.clone().process_source_key(\n key,\n source_data,\n update_stats.clone(),\n concur_permit,\n NO_ACK,\n pool.clone(),\n ));\n }\n while let Some(result) = join_set.join_next().await {\n if let Err(e) = result {\n if !e.is_cancelled() {\n error!(\"{e:?}\");\n }\n }\n }\n\n Ok(())\n }\n}\n"], ["/cocoindex/src/base/value.rs", "use super::schema::*;\nuse crate::base::duration::parse_duration;\nuse crate::prelude::invariance_violation;\nuse crate::{api_bail, api_error};\nuse anyhow::Result;\nuse base64::prelude::*;\nuse bytes::Bytes;\nuse chrono::Offset;\nuse log::warn;\nuse serde::{\n Deserialize, Serialize,\n de::{SeqAccess, Visitor},\n ser::{SerializeMap, SerializeSeq, SerializeTuple},\n};\nuse std::{collections::BTreeMap, ops::Deref, sync::Arc};\n\npub trait EstimatedByteSize: Sized {\n fn estimated_detached_byte_size(&self) -> usize;\n\n fn estimated_byte_size(&self) -> usize {\n self.estimated_detached_byte_size() + std::mem::size_of::()\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]\npub struct RangeValue {\n pub start: usize,\n pub end: usize,\n}\n\nimpl RangeValue {\n pub fn new(start: usize, end: usize) -> Self {\n RangeValue { start, end }\n }\n\n pub fn len(&self) -> usize {\n self.end - self.start\n }\n\n pub fn extract_str<'s>(&self, s: &'s (impl AsRef + ?Sized)) -> &'s str {\n let s = s.as_ref();\n &s[self.start..self.end]\n }\n}\n\nimpl Serialize for RangeValue {\n fn serialize(&self, serializer: S) -> Result {\n let mut tuple = serializer.serialize_tuple(2)?;\n tuple.serialize_element(&self.start)?;\n tuple.serialize_element(&self.end)?;\n tuple.end()\n }\n}\n\nimpl<'de> Deserialize<'de> for RangeValue {\n fn deserialize>(deserializer: D) -> Result {\n struct RangeVisitor;\n\n impl<'de> Visitor<'de> for RangeVisitor {\n type Value = RangeValue;\n\n fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {\n formatter.write_str(\"a tuple of two u64\")\n }\n\n fn visit_seq(self, mut seq: V) -> Result\n where\n V: SeqAccess<'de>,\n {\n let start = seq\n .next_element()?\n .ok_or_else(|| serde::de::Error::missing_field(\"missing begin\"))?;\n let end = seq\n .next_element()?\n .ok_or_else(|| serde::de::Error::missing_field(\"missing end\"))?;\n Ok(RangeValue { start, end })\n }\n }\n deserializer.deserialize_tuple(2, RangeVisitor)\n }\n}\n\n/// Value of key.\n#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize)]\npub enum KeyValue {\n Bytes(Bytes),\n Str(Arc),\n Bool(bool),\n Int64(i64),\n Range(RangeValue),\n Uuid(uuid::Uuid),\n Date(chrono::NaiveDate),\n Struct(Vec),\n}\n\nimpl From for KeyValue {\n fn from(value: Bytes) -> Self {\n KeyValue::Bytes(value)\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Vec) -> Self {\n KeyValue::Bytes(Bytes::from(value))\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Arc) -> Self {\n KeyValue::Str(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: String) -> Self {\n KeyValue::Str(Arc::from(value))\n }\n}\n\nimpl From for KeyValue {\n fn from(value: bool) -> Self {\n KeyValue::Bool(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: i64) -> Self {\n KeyValue::Int64(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: RangeValue) -> Self {\n KeyValue::Range(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: uuid::Uuid) -> Self {\n KeyValue::Uuid(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: chrono::NaiveDate) -> Self {\n KeyValue::Date(value)\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Vec) -> Self {\n KeyValue::Struct(value)\n }\n}\n\nimpl serde::Serialize for KeyValue {\n fn serialize(&self, serializer: S) -> Result {\n Value::from(self.clone()).serialize(serializer)\n }\n}\n\nimpl std::fmt::Display for KeyValue {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n KeyValue::Bytes(v) => write!(f, \"{}\", BASE64_STANDARD.encode(v)),\n KeyValue::Str(v) => write!(f, \"\\\"{}\\\"\", v.escape_default()),\n KeyValue::Bool(v) => write!(f, \"{v}\"),\n KeyValue::Int64(v) => write!(f, \"{v}\"),\n KeyValue::Range(v) => write!(f, \"[{}, {})\", v.start, v.end),\n KeyValue::Uuid(v) => write!(f, \"{v}\"),\n KeyValue::Date(v) => write!(f, \"{v}\"),\n KeyValue::Struct(v) => {\n write!(\n f,\n \"[{}]\",\n v.iter()\n .map(|v| v.to_string())\n .collect::>()\n .join(\", \")\n )\n }\n }\n }\n}\n\nimpl KeyValue {\n pub fn from_json(value: serde_json::Value, fields_schema: &[FieldSchema]) -> Result {\n let value = if fields_schema.len() == 1 {\n Value::from_json(value, &fields_schema[0].value_type.typ)?\n } else {\n let field_values: FieldValues = FieldValues::from_json(value, fields_schema)?;\n Value::Struct(field_values)\n };\n value.as_key()\n }\n\n pub fn from_values<'a>(values: impl ExactSizeIterator) -> Result {\n let key = if values.len() == 1 {\n let mut values = values;\n values.next().ok_or_else(invariance_violation)?.as_key()?\n } else {\n KeyValue::Struct(values.map(|v| v.as_key()).collect::>>()?)\n };\n Ok(key)\n }\n\n pub fn fields_iter(&self, num_fields: usize) -> Result> {\n let slice = if num_fields == 1 {\n std::slice::from_ref(self)\n } else {\n match self {\n KeyValue::Struct(v) => v,\n _ => api_bail!(\"Invalid key value type\"),\n }\n };\n Ok(slice.iter())\n }\n\n fn parts_from_str(\n values_iter: &mut impl Iterator,\n schema: &ValueType,\n ) -> Result {\n let result = match schema {\n ValueType::Basic(basic_type) => {\n let v = values_iter\n .next()\n .ok_or_else(|| api_error!(\"Key parts less than expected\"))?;\n match basic_type {\n BasicValueType::Bytes => {\n KeyValue::Bytes(Bytes::from(BASE64_STANDARD.decode(v)?))\n }\n BasicValueType::Str => KeyValue::Str(Arc::from(v)),\n BasicValueType::Bool => KeyValue::Bool(v.parse()?),\n BasicValueType::Int64 => KeyValue::Int64(v.parse()?),\n BasicValueType::Range => {\n let v2 = values_iter\n .next()\n .ok_or_else(|| api_error!(\"Key parts less than expected\"))?;\n KeyValue::Range(RangeValue {\n start: v.parse()?,\n end: v2.parse()?,\n })\n }\n BasicValueType::Uuid => KeyValue::Uuid(v.parse()?),\n BasicValueType::Date => KeyValue::Date(v.parse()?),\n schema => api_bail!(\"Invalid key type {schema}\"),\n }\n }\n ValueType::Struct(s) => KeyValue::Struct(\n s.fields\n .iter()\n .map(|f| KeyValue::parts_from_str(values_iter, &f.value_type.typ))\n .collect::>>()?,\n ),\n _ => api_bail!(\"Invalid key type {schema}\"),\n };\n Ok(result)\n }\n\n fn parts_to_strs(&self, output: &mut Vec) {\n match self {\n KeyValue::Bytes(v) => output.push(BASE64_STANDARD.encode(v)),\n KeyValue::Str(v) => output.push(v.to_string()),\n KeyValue::Bool(v) => output.push(v.to_string()),\n KeyValue::Int64(v) => output.push(v.to_string()),\n KeyValue::Range(v) => {\n output.push(v.start.to_string());\n output.push(v.end.to_string());\n }\n KeyValue::Uuid(v) => output.push(v.to_string()),\n KeyValue::Date(v) => output.push(v.to_string()),\n KeyValue::Struct(v) => {\n for part in v {\n part.parts_to_strs(output);\n }\n }\n }\n }\n\n pub fn from_strs(value: impl IntoIterator, schema: &ValueType) -> Result {\n let mut values_iter = value.into_iter();\n let result = Self::parts_from_str(&mut values_iter, schema)?;\n if values_iter.next().is_some() {\n api_bail!(\"Key parts more than expected\");\n }\n Ok(result)\n }\n\n pub fn to_strs(&self) -> Vec {\n let mut output = Vec::with_capacity(self.num_parts());\n self.parts_to_strs(&mut output);\n output\n }\n\n pub fn kind_str(&self) -> &'static str {\n match self {\n KeyValue::Bytes(_) => \"bytes\",\n KeyValue::Str(_) => \"str\",\n KeyValue::Bool(_) => \"bool\",\n KeyValue::Int64(_) => \"int64\",\n KeyValue::Range { .. } => \"range\",\n KeyValue::Uuid(_) => \"uuid\",\n KeyValue::Date(_) => \"date\",\n KeyValue::Struct(_) => \"struct\",\n }\n }\n\n pub fn bytes_value(&self) -> Result<&Bytes> {\n match self {\n KeyValue::Bytes(v) => Ok(v),\n _ => anyhow::bail!(\"expected bytes value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn str_value(&self) -> Result<&Arc> {\n match self {\n KeyValue::Str(v) => Ok(v),\n _ => anyhow::bail!(\"expected str value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn bool_value(&self) -> Result {\n match self {\n KeyValue::Bool(v) => Ok(*v),\n _ => anyhow::bail!(\"expected bool value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn int64_value(&self) -> Result {\n match self {\n KeyValue::Int64(v) => Ok(*v),\n _ => anyhow::bail!(\"expected int64 value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn range_value(&self) -> Result {\n match self {\n KeyValue::Range(v) => Ok(*v),\n _ => anyhow::bail!(\"expected range value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn uuid_value(&self) -> Result {\n match self {\n KeyValue::Uuid(v) => Ok(*v),\n _ => anyhow::bail!(\"expected uuid value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn date_value(&self) -> Result {\n match self {\n KeyValue::Date(v) => Ok(*v),\n _ => anyhow::bail!(\"expected date value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn struct_value(&self) -> Result<&Vec> {\n match self {\n KeyValue::Struct(v) => Ok(v),\n _ => anyhow::bail!(\"expected struct value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn num_parts(&self) -> usize {\n match self {\n KeyValue::Range(_) => 2,\n KeyValue::Struct(v) => v.iter().map(|v| v.num_parts()).sum(),\n _ => 1,\n }\n }\n\n fn estimated_detached_byte_size(&self) -> usize {\n match self {\n KeyValue::Bytes(v) => v.len(),\n KeyValue::Str(v) => v.len(),\n KeyValue::Struct(v) => {\n v.iter()\n .map(KeyValue::estimated_detached_byte_size)\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n KeyValue::Bool(_)\n | KeyValue::Int64(_)\n | KeyValue::Range(_)\n | KeyValue::Uuid(_)\n | KeyValue::Date(_) => 0,\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Deserialize)]\npub enum BasicValue {\n Bytes(Bytes),\n Str(Arc),\n Bool(bool),\n Int64(i64),\n Float32(f32),\n Float64(f64),\n Range(RangeValue),\n Uuid(uuid::Uuid),\n Date(chrono::NaiveDate),\n Time(chrono::NaiveTime),\n LocalDateTime(chrono::NaiveDateTime),\n OffsetDateTime(chrono::DateTime),\n TimeDelta(chrono::Duration),\n Json(Arc),\n Vector(Arc<[BasicValue]>),\n UnionVariant {\n tag_id: usize,\n value: Box,\n },\n}\n\nimpl From for BasicValue {\n fn from(value: Bytes) -> Self {\n BasicValue::Bytes(value)\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: Vec) -> Self {\n BasicValue::Bytes(Bytes::from(value))\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: Arc) -> Self {\n BasicValue::Str(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: String) -> Self {\n BasicValue::Str(Arc::from(value))\n }\n}\n\nimpl From for BasicValue {\n fn from(value: bool) -> Self {\n BasicValue::Bool(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: i64) -> Self {\n BasicValue::Int64(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: f32) -> Self {\n BasicValue::Float32(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: f64) -> Self {\n BasicValue::Float64(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: uuid::Uuid) -> Self {\n BasicValue::Uuid(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveDate) -> Self {\n BasicValue::Date(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveTime) -> Self {\n BasicValue::Time(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveDateTime) -> Self {\n BasicValue::LocalDateTime(value)\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: chrono::DateTime) -> Self {\n BasicValue::OffsetDateTime(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::Duration) -> Self {\n BasicValue::TimeDelta(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: serde_json::Value) -> Self {\n BasicValue::Json(Arc::from(value))\n }\n}\n\nimpl> From> for BasicValue {\n fn from(value: Vec) -> Self {\n BasicValue::Vector(Arc::from(\n value.into_iter().map(|v| v.into()).collect::>(),\n ))\n }\n}\n\nimpl BasicValue {\n pub fn into_key(self) -> Result {\n let result = match self {\n BasicValue::Bytes(v) => KeyValue::Bytes(v),\n BasicValue::Str(v) => KeyValue::Str(v),\n BasicValue::Bool(v) => KeyValue::Bool(v),\n BasicValue::Int64(v) => KeyValue::Int64(v),\n BasicValue::Range(v) => KeyValue::Range(v),\n BasicValue::Uuid(v) => KeyValue::Uuid(v),\n BasicValue::Date(v) => KeyValue::Date(v),\n BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_)\n | BasicValue::Json(_)\n | BasicValue::Vector(_)\n | BasicValue::UnionVariant { .. } => api_bail!(\"invalid key value type\"),\n };\n Ok(result)\n }\n\n pub fn as_key(&self) -> Result {\n let result = match self {\n BasicValue::Bytes(v) => KeyValue::Bytes(v.clone()),\n BasicValue::Str(v) => KeyValue::Str(v.clone()),\n BasicValue::Bool(v) => KeyValue::Bool(*v),\n BasicValue::Int64(v) => KeyValue::Int64(*v),\n BasicValue::Range(v) => KeyValue::Range(*v),\n BasicValue::Uuid(v) => KeyValue::Uuid(*v),\n BasicValue::Date(v) => KeyValue::Date(*v),\n BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_)\n | BasicValue::Json(_)\n | BasicValue::Vector(_)\n | BasicValue::UnionVariant { .. } => api_bail!(\"invalid key value type\"),\n };\n Ok(result)\n }\n\n pub fn kind(&self) -> &'static str {\n match &self {\n BasicValue::Bytes(_) => \"bytes\",\n BasicValue::Str(_) => \"str\",\n BasicValue::Bool(_) => \"bool\",\n BasicValue::Int64(_) => \"int64\",\n BasicValue::Float32(_) => \"float32\",\n BasicValue::Float64(_) => \"float64\",\n BasicValue::Range(_) => \"range\",\n BasicValue::Uuid(_) => \"uuid\",\n BasicValue::Date(_) => \"date\",\n BasicValue::Time(_) => \"time\",\n BasicValue::LocalDateTime(_) => \"local_datetime\",\n BasicValue::OffsetDateTime(_) => \"offset_datetime\",\n BasicValue::TimeDelta(_) => \"timedelta\",\n BasicValue::Json(_) => \"json\",\n BasicValue::Vector(_) => \"vector\",\n BasicValue::UnionVariant { .. } => \"union\",\n }\n }\n\n /// Returns the estimated byte size of the value, for detached data (i.e. allocated on heap).\n fn estimated_detached_byte_size(&self) -> usize {\n fn json_estimated_detached_byte_size(val: &serde_json::Value) -> usize {\n match val {\n serde_json::Value::String(s) => s.len(),\n serde_json::Value::Array(arr) => {\n arr.iter()\n .map(json_estimated_detached_byte_size)\n .sum::()\n + arr.len() * std::mem::size_of::()\n }\n serde_json::Value::Object(map) => map\n .iter()\n .map(|(k, v)| {\n std::mem::size_of::()\n + k.len()\n + json_estimated_detached_byte_size(v)\n })\n .sum(),\n serde_json::Value::Null\n | serde_json::Value::Bool(_)\n | serde_json::Value::Number(_) => 0,\n }\n }\n match self {\n BasicValue::Bytes(v) => v.len(),\n BasicValue::Str(v) => v.len(),\n BasicValue::Json(v) => json_estimated_detached_byte_size(v),\n BasicValue::Vector(v) => {\n v.iter()\n .map(BasicValue::estimated_detached_byte_size)\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n BasicValue::UnionVariant { value, .. } => {\n value.estimated_detached_byte_size() + std::mem::size_of::()\n }\n BasicValue::Bool(_)\n | BasicValue::Int64(_)\n | BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Range(_)\n | BasicValue::Uuid(_)\n | BasicValue::Date(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_) => 0,\n }\n }\n}\n\n#[derive(Debug, Clone, Default, PartialEq, Deserialize)]\npub enum Value {\n #[default]\n Null,\n Basic(BasicValue),\n Struct(FieldValues),\n UTable(Vec),\n KTable(BTreeMap),\n LTable(Vec),\n}\n\nimpl> From for Value {\n fn from(value: T) -> Self {\n Value::Basic(value.into())\n }\n}\n\nimpl From for Value {\n fn from(value: KeyValue) -> Self {\n match value {\n KeyValue::Bytes(v) => Value::Basic(BasicValue::Bytes(v)),\n KeyValue::Str(v) => Value::Basic(BasicValue::Str(v)),\n KeyValue::Bool(v) => Value::Basic(BasicValue::Bool(v)),\n KeyValue::Int64(v) => Value::Basic(BasicValue::Int64(v)),\n KeyValue::Range(v) => Value::Basic(BasicValue::Range(v)),\n KeyValue::Uuid(v) => Value::Basic(BasicValue::Uuid(v)),\n KeyValue::Date(v) => Value::Basic(BasicValue::Date(v)),\n KeyValue::Struct(v) => Value::Struct(FieldValues {\n fields: v.into_iter().map(Value::from).collect(),\n }),\n }\n }\n}\n\nimpl From<&KeyValue> for Value {\n fn from(value: &KeyValue) -> Self {\n match value {\n KeyValue::Bytes(v) => Value::Basic(BasicValue::Bytes(v.clone())),\n KeyValue::Str(v) => Value::Basic(BasicValue::Str(v.clone())),\n KeyValue::Bool(v) => Value::Basic(BasicValue::Bool(*v)),\n KeyValue::Int64(v) => Value::Basic(BasicValue::Int64(*v)),\n KeyValue::Range(v) => Value::Basic(BasicValue::Range(*v)),\n KeyValue::Uuid(v) => Value::Basic(BasicValue::Uuid(*v)),\n KeyValue::Date(v) => Value::Basic(BasicValue::Date(*v)),\n KeyValue::Struct(v) => Value::Struct(FieldValues {\n fields: v.iter().map(Value::from).collect(),\n }),\n }\n }\n}\n\nimpl From for Value {\n fn from(value: FieldValues) -> Self {\n Value::Struct(value)\n }\n}\n\nimpl> From> for Value {\n fn from(value: Option) -> Self {\n match value {\n Some(v) => v.into(),\n None => Value::Null,\n }\n }\n}\n\nimpl Value {\n pub fn from_alternative(value: Value) -> Self\n where\n AltVS: Into,\n {\n match value {\n Value::Null => Value::Null,\n Value::Basic(v) => Value::Basic(v),\n Value::Struct(v) => Value::Struct(FieldValues:: {\n fields: v\n .fields\n .into_iter()\n .map(|v| Value::::from_alternative(v))\n .collect(),\n }),\n Value::UTable(v) => Value::UTable(v.into_iter().map(|v| v.into()).collect()),\n Value::KTable(v) => {\n Value::KTable(v.into_iter().map(|(k, v)| (k.clone(), v.into())).collect())\n }\n Value::LTable(v) => Value::LTable(v.into_iter().map(|v| v.into()).collect()),\n }\n }\n\n pub fn from_alternative_ref(value: &Value) -> Self\n where\n for<'a> &'a AltVS: Into,\n {\n match value {\n Value::Null => Value::Null,\n Value::Basic(v) => Value::Basic(v.clone()),\n Value::Struct(v) => Value::Struct(FieldValues:: {\n fields: v\n .fields\n .iter()\n .map(|v| Value::::from_alternative_ref(v))\n .collect(),\n }),\n Value::UTable(v) => Value::UTable(v.iter().map(|v| v.into()).collect()),\n Value::KTable(v) => {\n Value::KTable(v.iter().map(|(k, v)| (k.clone(), v.into())).collect())\n }\n Value::LTable(v) => Value::LTable(v.iter().map(|v| v.into()).collect()),\n }\n }\n\n pub fn is_null(&self) -> bool {\n matches!(self, Value::Null)\n }\n\n pub fn into_key(self) -> Result {\n let result = match self {\n Value::Basic(v) => v.into_key()?,\n Value::Struct(v) => KeyValue::Struct(\n v.fields\n .into_iter()\n .map(|v| v.into_key())\n .collect::>>()?,\n ),\n Value::Null | Value::UTable(_) | Value::KTable(_) | Value::LTable(_) => {\n anyhow::bail!(\"invalid key value type\")\n }\n };\n Ok(result)\n }\n\n pub fn as_key(&self) -> Result {\n let result = match self {\n Value::Basic(v) => v.as_key()?,\n Value::Struct(v) => KeyValue::Struct(\n v.fields\n .iter()\n .map(|v| v.as_key())\n .collect::>>()?,\n ),\n Value::Null | Value::UTable(_) | Value::KTable(_) | Value::LTable(_) => {\n anyhow::bail!(\"invalid key value type\")\n }\n };\n Ok(result)\n }\n\n pub fn kind(&self) -> &'static str {\n match self {\n Value::Null => \"null\",\n Value::Basic(v) => v.kind(),\n Value::Struct(_) => \"Struct\",\n Value::UTable(_) => \"UTable\",\n Value::KTable(_) => \"KTable\",\n Value::LTable(_) => \"LTable\",\n }\n }\n\n pub fn optional(&self) -> Option<&Self> {\n match self {\n Value::Null => None,\n _ => Some(self),\n }\n }\n\n pub fn as_bytes(&self) -> Result<&Bytes> {\n match self {\n Value::Basic(BasicValue::Bytes(v)) => Ok(v),\n _ => anyhow::bail!(\"expected bytes value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_str(&self) -> Result<&Arc> {\n match self {\n Value::Basic(BasicValue::Str(v)) => Ok(v),\n _ => anyhow::bail!(\"expected str value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_bool(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Bool(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected bool value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_int64(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Int64(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected int64 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_float32(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Float32(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected float32 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_float64(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Float64(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected float64 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_range(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Range(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected range value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_json(&self) -> Result<&Arc> {\n match self {\n Value::Basic(BasicValue::Json(v)) => Ok(v),\n _ => anyhow::bail!(\"expected json value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_vector(&self) -> Result<&Arc<[BasicValue]>> {\n match self {\n Value::Basic(BasicValue::Vector(v)) => Ok(v),\n _ => anyhow::bail!(\"expected vector value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_struct(&self) -> Result<&FieldValues> {\n match self {\n Value::Struct(v) => Ok(v),\n _ => anyhow::bail!(\"expected struct value, but got {}\", self.kind()),\n }\n }\n}\n\nimpl Value {\n pub fn estimated_byte_size(&self) -> usize {\n std::mem::size_of::()\n + match self {\n Value::Null => 0,\n Value::Basic(v) => v.estimated_detached_byte_size(),\n Value::Struct(v) => v.estimated_detached_byte_size(),\n Value::UTable(v) | Value::LTable(v) => {\n v.iter()\n .map(|v| v.estimated_detached_byte_size())\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n Value::KTable(v) => {\n v.iter()\n .map(|(k, v)| {\n k.estimated_detached_byte_size() + v.estimated_detached_byte_size()\n })\n .sum::()\n + v.len() * std::mem::size_of::<(String, ScopeValue)>()\n }\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Deserialize)]\npub struct FieldValues {\n pub fields: Vec>,\n}\n\nimpl EstimatedByteSize for FieldValues {\n fn estimated_detached_byte_size(&self) -> usize {\n self.fields\n .iter()\n .map(Value::::estimated_byte_size)\n .sum::()\n + self.fields.len() * std::mem::size_of::>()\n }\n}\n\nimpl serde::Serialize for FieldValues {\n fn serialize(&self, serializer: S) -> Result {\n self.fields.serialize(serializer)\n }\n}\n\nimpl FieldValues\nwhere\n FieldValues: Into,\n{\n pub fn new(num_fields: usize) -> Self {\n let mut fields = Vec::with_capacity(num_fields);\n fields.resize(num_fields, Value::::Null);\n Self { fields }\n }\n\n fn from_json_values<'a>(\n fields: impl Iterator,\n ) -> Result {\n Ok(Self {\n fields: fields\n .map(|(s, v)| {\n let value = Value::::from_json(v, &s.value_type.typ)?;\n if value.is_null() && !s.value_type.nullable {\n api_bail!(\"expected non-null value for `{}`\", s.name);\n }\n Ok(value)\n })\n .collect::>>()?,\n })\n }\n\n fn from_json_object<'a>(\n values: serde_json::Map,\n fields_schema: impl Iterator,\n ) -> Result {\n let mut values = values;\n Ok(Self {\n fields: fields_schema\n .map(|field| {\n let value = match values.get_mut(&field.name) {\n Some(v) => {\n Value::::from_json(std::mem::take(v), &field.value_type.typ)?\n }\n None => Value::::default(),\n };\n if value.is_null() && !field.value_type.nullable {\n api_bail!(\"expected non-null value for `{}`\", field.name);\n }\n Ok(value)\n })\n .collect::>>()?,\n })\n }\n\n pub fn from_json(value: serde_json::Value, fields_schema: &[FieldSchema]) -> Result {\n match value {\n serde_json::Value::Array(v) => {\n if v.len() != fields_schema.len() {\n api_bail!(\"unmatched value length\");\n }\n Self::from_json_values(fields_schema.iter().zip(v))\n }\n serde_json::Value::Object(v) => Self::from_json_object(v, fields_schema.iter()),\n _ => api_bail!(\"invalid value type\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct ScopeValue(pub FieldValues);\n\nimpl EstimatedByteSize for ScopeValue {\n fn estimated_detached_byte_size(&self) -> usize {\n self.0.estimated_detached_byte_size()\n }\n}\n\nimpl Deref for ScopeValue {\n type Target = FieldValues;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl From for ScopeValue {\n fn from(value: FieldValues) -> Self {\n Self(value)\n }\n}\n\nimpl serde::Serialize for BasicValue {\n fn serialize(&self, serializer: S) -> Result {\n match self {\n BasicValue::Bytes(v) => serializer.serialize_str(&BASE64_STANDARD.encode(v)),\n BasicValue::Str(v) => serializer.serialize_str(v),\n BasicValue::Bool(v) => serializer.serialize_bool(*v),\n BasicValue::Int64(v) => serializer.serialize_i64(*v),\n BasicValue::Float32(v) => serializer.serialize_f32(*v),\n BasicValue::Float64(v) => serializer.serialize_f64(*v),\n BasicValue::Range(v) => v.serialize(serializer),\n BasicValue::Uuid(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Date(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Time(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::LocalDateTime(v) => {\n serializer.serialize_str(&v.format(\"%Y-%m-%dT%H:%M:%S%.6f\").to_string())\n }\n BasicValue::OffsetDateTime(v) => {\n serializer.serialize_str(&v.to_rfc3339_opts(chrono::SecondsFormat::AutoSi, true))\n }\n BasicValue::TimeDelta(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Json(v) => v.serialize(serializer),\n BasicValue::Vector(v) => v.serialize(serializer),\n BasicValue::UnionVariant { tag_id, value } => {\n let mut s = serializer.serialize_tuple(2)?;\n s.serialize_element(tag_id)?;\n s.serialize_element(value)?;\n s.end()\n }\n }\n }\n}\n\nimpl BasicValue {\n pub fn from_json(value: serde_json::Value, schema: &BasicValueType) -> Result {\n let result = match (value, schema) {\n (serde_json::Value::String(v), BasicValueType::Bytes) => {\n BasicValue::Bytes(Bytes::from(BASE64_STANDARD.decode(v)?))\n }\n (serde_json::Value::String(v), BasicValueType::Str) => BasicValue::Str(Arc::from(v)),\n (serde_json::Value::Bool(v), BasicValueType::Bool) => BasicValue::Bool(v),\n (serde_json::Value::Number(v), BasicValueType::Int64) => BasicValue::Int64(\n v.as_i64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid int64 value {v}\"))?,\n ),\n (serde_json::Value::Number(v), BasicValueType::Float32) => BasicValue::Float32(\n v.as_f64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid fp32 value {v}\"))?\n as f32,\n ),\n (serde_json::Value::Number(v), BasicValueType::Float64) => BasicValue::Float64(\n v.as_f64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid fp64 value {v}\"))?,\n ),\n (v, BasicValueType::Range) => BasicValue::Range(serde_json::from_value(v)?),\n (serde_json::Value::String(v), BasicValueType::Uuid) => BasicValue::Uuid(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::Date) => BasicValue::Date(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::Time) => BasicValue::Time(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::LocalDateTime) => {\n BasicValue::LocalDateTime(v.parse()?)\n }\n (serde_json::Value::String(v), BasicValueType::OffsetDateTime) => {\n match chrono::DateTime::parse_from_rfc3339(&v) {\n Ok(dt) => BasicValue::OffsetDateTime(dt),\n Err(e) => {\n if let Ok(dt) = v.parse::() {\n warn!(\"Datetime without timezone offset, assuming UTC\");\n BasicValue::OffsetDateTime(chrono::DateTime::from_naive_utc_and_offset(\n dt,\n chrono::Utc.fix(),\n ))\n } else {\n Err(e)?\n }\n }\n }\n }\n (serde_json::Value::String(v), BasicValueType::TimeDelta) => {\n BasicValue::TimeDelta(parse_duration(&v)?)\n }\n (v, BasicValueType::Json) => BasicValue::Json(Arc::from(v)),\n (\n serde_json::Value::Array(v),\n BasicValueType::Vector(VectorTypeSchema { element_type, .. }),\n ) => {\n let vec = v\n .into_iter()\n .map(|v| BasicValue::from_json(v, element_type))\n .collect::>>()?;\n BasicValue::Vector(Arc::from(vec))\n }\n (v, BasicValueType::Union(typ)) => {\n let arr = match v {\n serde_json::Value::Array(arr) => arr,\n _ => anyhow::bail!(\"Invalid JSON value for union, expect array\"),\n };\n\n if arr.len() != 2 {\n anyhow::bail!(\n \"Invalid union tuple: expect 2 values, received {}\",\n arr.len()\n );\n }\n\n let mut obj_iter = arr.into_iter();\n\n // Take first element\n let tag_id = obj_iter\n .next()\n .and_then(|value| value.as_u64().map(|num_u64| num_u64 as usize))\n .unwrap();\n\n // Take second element\n let value = obj_iter.next().unwrap();\n\n let cur_type = typ\n .types\n .get(tag_id)\n .ok_or_else(|| anyhow::anyhow!(\"No type in `tag_id` \\\"{tag_id}\\\" found\"))?;\n\n BasicValue::UnionVariant {\n tag_id,\n value: Box::new(BasicValue::from_json(value, cur_type)?),\n }\n }\n (v, t) => {\n anyhow::bail!(\"Value and type not matched.\\nTarget type {t:?}\\nJSON value: {v}\\n\")\n }\n };\n Ok(result)\n }\n}\n\nstruct TableEntry<'a>(&'a KeyValue, &'a ScopeValue);\n\nimpl serde::Serialize for Value {\n fn serialize(&self, serializer: S) -> Result {\n match self {\n Value::Null => serializer.serialize_none(),\n Value::Basic(v) => v.serialize(serializer),\n Value::Struct(v) => v.serialize(serializer),\n Value::UTable(v) => v.serialize(serializer),\n Value::KTable(m) => {\n let mut seq = serializer.serialize_seq(Some(m.len()))?;\n for (k, v) in m.iter() {\n seq.serialize_element(&TableEntry(k, v))?;\n }\n seq.end()\n }\n Value::LTable(v) => v.serialize(serializer),\n }\n }\n}\n\nimpl serde::Serialize for TableEntry<'_> {\n fn serialize(&self, serializer: S) -> Result {\n let &TableEntry(key, value) = self;\n let mut seq = serializer.serialize_seq(Some(value.0.fields.len() + 1))?;\n seq.serialize_element(key)?;\n for item in value.0.fields.iter() {\n seq.serialize_element(item)?;\n }\n seq.end()\n }\n}\n\nimpl Value\nwhere\n FieldValues: Into,\n{\n pub fn from_json(value: serde_json::Value, schema: &ValueType) -> Result {\n let result = match (value, schema) {\n (serde_json::Value::Null, _) => Value::::Null,\n (v, ValueType::Basic(t)) => Value::::Basic(BasicValue::from_json(v, t)?),\n (v, ValueType::Struct(s)) => {\n Value::::Struct(FieldValues::::from_json(v, &s.fields)?)\n }\n (serde_json::Value::Array(v), ValueType::Table(s)) => match s.kind {\n TableKind::UTable => {\n let rows = v\n .into_iter()\n .map(|v| Ok(FieldValues::from_json(v, &s.row.fields)?.into()))\n .collect::>>()?;\n Value::LTable(rows)\n }\n TableKind::KTable => {\n let rows = v\n .into_iter()\n .map(|v| {\n let mut fields_iter = s.row.fields.iter();\n let key_field = fields_iter\n .next()\n .ok_or_else(|| api_error!(\"Empty struct field values\"))?;\n\n match v {\n serde_json::Value::Array(v) => {\n let mut field_vals_iter = v.into_iter();\n let key = Self::from_json(\n field_vals_iter.next().ok_or_else(|| {\n api_error!(\"Empty struct field values\")\n })?,\n &key_field.value_type.typ,\n )?\n .into_key()?;\n let values = FieldValues::from_json_values(\n fields_iter.zip(field_vals_iter),\n )?;\n Ok((key, values.into()))\n }\n serde_json::Value::Object(mut v) => {\n let key = Self::from_json(\n std::mem::take(v.get_mut(&key_field.name).ok_or_else(\n || {\n api_error!(\n \"key field `{}` doesn't exist in value\",\n key_field.name\n )\n },\n )?),\n &key_field.value_type.typ,\n )?\n .into_key()?;\n let values = FieldValues::from_json_object(v, fields_iter)?;\n Ok((key, values.into()))\n }\n _ => api_bail!(\"Table value must be a JSON array or object\"),\n }\n })\n .collect::>>()?;\n Value::KTable(rows)\n }\n TableKind::LTable => {\n let rows = v\n .into_iter()\n .map(|v| Ok(FieldValues::from_json(v, &s.row.fields)?.into()))\n .collect::>>()?;\n Value::LTable(rows)\n }\n },\n (v, t) => {\n anyhow::bail!(\"Value and type not matched.\\nTarget type {t:?}\\nJSON value: {v}\\n\")\n }\n };\n Ok(result)\n }\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct TypedValue<'a> {\n pub t: &'a ValueType,\n pub v: &'a Value,\n}\n\nimpl Serialize for TypedValue<'_> {\n fn serialize(&self, serializer: S) -> Result {\n match (self.t, self.v) {\n (_, Value::Null) => serializer.serialize_none(),\n (ValueType::Basic(t), v) => match t {\n BasicValueType::Union(_) => match v {\n Value::Basic(BasicValue::UnionVariant { value, .. }) => {\n value.serialize(serializer)\n }\n _ => Err(serde::ser::Error::custom(\n \"Unmatched union type and value for `TypedValue`\",\n )),\n },\n _ => v.serialize(serializer),\n },\n (ValueType::Struct(s), Value::Struct(field_values)) => TypedFieldsValue {\n schema: &s.fields,\n values_iter: field_values.fields.iter(),\n }\n .serialize(serializer),\n (ValueType::Table(c), Value::UTable(rows) | Value::LTable(rows)) => {\n let mut seq = serializer.serialize_seq(Some(rows.len()))?;\n for row in rows {\n seq.serialize_element(&TypedFieldsValue {\n schema: &c.row.fields,\n values_iter: row.fields.iter(),\n })?;\n }\n seq.end()\n }\n (ValueType::Table(c), Value::KTable(rows)) => {\n let mut seq = serializer.serialize_seq(Some(rows.len()))?;\n for (k, v) in rows {\n seq.serialize_element(&TypedFieldsValue {\n schema: &c.row.fields,\n values_iter: std::iter::once(&Value::from(k.clone()))\n .chain(v.fields.iter()),\n })?;\n }\n seq.end()\n }\n _ => Err(serde::ser::Error::custom(format!(\n \"Incompatible value type: {:?} {:?}\",\n self.t, self.v\n ))),\n }\n }\n}\n\npub struct TypedFieldsValue<'a, I: Iterator + Clone> {\n pub schema: &'a [FieldSchema],\n pub values_iter: I,\n}\n\nimpl<'a, I: Iterator + Clone> Serialize for TypedFieldsValue<'a, I> {\n fn serialize(&self, serializer: S) -> Result {\n let mut map = serializer.serialize_map(Some(self.schema.len()))?;\n let values_iter = self.values_iter.clone();\n for (field, value) in self.schema.iter().zip(values_iter) {\n map.serialize_entry(\n &field.name,\n &TypedValue {\n t: &field.value_type.typ,\n v: value,\n },\n )?;\n }\n map.end()\n }\n}\n\npub mod test_util {\n use super::*;\n\n pub fn seder_roundtrip(value: &Value, typ: &ValueType) -> Result {\n let json_value = serde_json::to_value(value)?;\n let roundtrip_value = Value::from_json(json_value, typ)?;\n Ok(roundtrip_value)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use std::collections::BTreeMap;\n\n #[test]\n fn test_estimated_byte_size_null() {\n let value = Value::::Null;\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_primitive() {\n // Test primitives that should have 0 detached byte size\n let value = Value::::Basic(BasicValue::Bool(true));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n let value = Value::::Basic(BasicValue::Int64(42));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n let value = Value::::Basic(BasicValue::Float64(3.14));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_string() {\n let test_str = \"hello world\";\n let value = Value::::Basic(BasicValue::Str(Arc::from(test_str)));\n let size = value.estimated_byte_size();\n\n let expected_size = std::mem::size_of::>() + test_str.len();\n assert_eq!(size, expected_size);\n }\n\n #[test]\n fn test_estimated_byte_size_basic_bytes() {\n let test_bytes = b\"hello world\";\n let value = Value::::Basic(BasicValue::Bytes(Bytes::from(test_bytes.to_vec())));\n let size = value.estimated_byte_size();\n\n let expected_size = std::mem::size_of::>() + test_bytes.len();\n assert_eq!(size, expected_size);\n }\n\n #[test]\n fn test_estimated_byte_size_basic_json() {\n let json_val = serde_json::json!({\"key\": \"value\", \"number\": 42});\n let value = Value::::Basic(BasicValue::Json(Arc::from(json_val)));\n let size = value.estimated_byte_size();\n\n // Should include the size of the JSON structure\n // The exact size depends on the internal JSON representation\n assert!(size > std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_vector() {\n let vec_elements = vec![\n BasicValue::Str(Arc::from(\"hello\")),\n BasicValue::Str(Arc::from(\"world\")),\n BasicValue::Int64(42),\n ];\n let value = Value::::Basic(BasicValue::Vector(Arc::from(vec_elements)));\n let size = value.estimated_byte_size();\n\n // Should include the size of the vector elements\n let expected_min_size = std::mem::size_of::>()\n + \"hello\".len()\n + \"world\".len()\n + 3 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_struct() {\n let fields = vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"test\"))),\n Value::::Basic(BasicValue::Int64(123)),\n ];\n let field_values = FieldValues { fields };\n let value = Value::::Struct(field_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"test\".len()\n + 2 * std::mem::size_of::>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_utable() {\n let scope_values = vec![\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"item1\",\n )))],\n }),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"item2\",\n )))],\n }),\n ];\n let value = Value::::UTable(scope_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"item1\".len()\n + \"item2\".len()\n + 2 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_ltable() {\n let scope_values = vec![\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"list1\",\n )))],\n }),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"list2\",\n )))],\n }),\n ];\n let value = Value::::LTable(scope_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"list1\".len()\n + \"list2\".len()\n + 2 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_ktable() {\n let mut map = BTreeMap::new();\n map.insert(\n KeyValue::Str(Arc::from(\"key1\")),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"value1\",\n )))],\n }),\n );\n map.insert(\n KeyValue::Str(Arc::from(\"key2\")),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"value2\",\n )))],\n }),\n );\n let value = Value::::KTable(map);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"key1\".len()\n + \"key2\".len()\n + \"value1\".len()\n + \"value2\".len()\n + 2 * std::mem::size_of::<(String, ScopeValue)>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_nested_struct() {\n let inner_struct = Value::::Struct(FieldValues {\n fields: vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"inner\"))),\n Value::::Basic(BasicValue::Int64(456)),\n ],\n });\n\n let outer_struct = Value::::Struct(FieldValues {\n fields: vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"outer\"))),\n inner_struct,\n ],\n });\n\n let size = outer_struct.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"outer\".len()\n + \"inner\".len()\n + 4 * std::mem::size_of::>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_empty_collections() {\n // Empty UTable\n let value = Value::::UTable(vec![]);\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty LTable\n let value = Value::::LTable(vec![]);\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty KTable\n let value = Value::::KTable(BTreeMap::new());\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty Struct\n let value = Value::::Struct(FieldValues { fields: vec![] });\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n}\n"], ["/cocoindex/src/llm/gemini.rs", "use crate::prelude::*;\n\nuse crate::llm::{\n LlmEmbeddingClient, LlmGenerateRequest, LlmGenerateResponse, LlmGenerationClient, OutputFormat,\n ToJsonSchemaOptions, detect_image_mime_type,\n};\nuse base64::prelude::*;\nuse google_cloud_aiplatform_v1 as vertexai;\nuse serde_json::Value;\nuse urlencoding::encode;\n\nfn get_embedding_dimension(model: &str) -> Option {\n let model = model.to_ascii_lowercase();\n if model.starts_with(\"gemini-embedding-\") {\n Some(3072)\n } else if model.starts_with(\"text-embedding-\") {\n Some(768)\n } else if model.starts_with(\"embedding-\") {\n Some(768)\n } else if model.starts_with(\"text-multilingual-embedding-\") {\n Some(768)\n } else {\n None\n }\n}\n\npub struct AiStudioClient {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl AiStudioClient {\n pub fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Gemini doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"GEMINI_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"GEMINI_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n// Recursively remove all `additionalProperties` fields from a JSON value\nfn remove_additional_properties(value: &mut Value) {\n match value {\n Value::Object(map) => {\n map.remove(\"additionalProperties\");\n for v in map.values_mut() {\n remove_additional_properties(v);\n }\n }\n Value::Array(arr) => {\n for v in arr {\n remove_additional_properties(v);\n }\n }\n _ => {}\n }\n}\n\nimpl AiStudioClient {\n fn get_api_url(&self, model: &str, api_name: &str) -> String {\n format!(\n \"https://generativelanguage.googleapis.com/v1beta/models/{}:{}?key={}\",\n encode(model),\n api_name,\n encode(&self.api_key)\n )\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for AiStudioClient {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result {\n let mut user_parts: Vec = Vec::new();\n\n // Add text part first\n user_parts.push(serde_json::json!({ \"text\": request.user_prompt }));\n\n // Add image part if present\n if let Some(image_bytes) = &request.image {\n let base64_image = BASE64_STANDARD.encode(image_bytes.as_ref());\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n user_parts.push(serde_json::json!({\n \"inlineData\": {\n \"mimeType\": mime_type,\n \"data\": base64_image\n }\n }));\n }\n\n // Compose the contents\n let contents = vec![serde_json::json!({\n \"role\": \"user\",\n \"parts\": user_parts\n })];\n\n // Prepare payload\n let mut payload = serde_json::json!({ \"contents\": contents });\n if let Some(system) = request.system_prompt {\n payload[\"systemInstruction\"] = serde_json::json!({\n \"parts\": [ { \"text\": system } ]\n });\n }\n\n // If structured output is requested, add schema and responseMimeType\n if let Some(OutputFormat::JsonSchema { schema, .. }) = &request.output_format {\n let mut schema_json = serde_json::to_value(schema)?;\n remove_additional_properties(&mut schema_json);\n payload[\"generationConfig\"] = serde_json::json!({\n \"responseMimeType\": \"application/json\",\n \"responseSchema\": schema_json\n });\n }\n\n let url = self.get_api_url(request.model, \"generateContent\");\n let resp = retryable::run(\n || self.client.post(&url).json(&payload).send(),\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Gemini API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let resp_json: Value = resp.json().await.context(\"Invalid JSON\")?;\n\n if let Some(error) = resp_json.get(\"error\") {\n bail!(\"Gemini API error: {:?}\", error);\n }\n let mut resp_json = resp_json;\n let text = match &mut resp_json[\"candidates\"][0][\"content\"][\"parts\"][0][\"text\"] {\n Value::String(s) => std::mem::take(s),\n _ => bail!(\"No text in response\"),\n };\n\n Ok(LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[derive(Deserialize)]\nstruct ContentEmbedding {\n values: Vec,\n}\n#[derive(Deserialize)]\nstruct EmbedContentResponse {\n embedding: ContentEmbedding,\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for AiStudioClient {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let url = self.get_api_url(request.model, \"embedContent\");\n let mut payload = serde_json::json!({\n \"model\": request.model,\n \"content\": { \"parts\": [{ \"text\": request.text }] },\n });\n if let Some(task_type) = request.task_type {\n payload[\"taskType\"] = serde_json::Value::String(task_type.into());\n }\n let resp = retryable::run(\n || self.client.post(&url).json(&payload).send(),\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Gemini API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let embedding_resp: EmbedContentResponse = resp.json().await.context(\"Invalid JSON\")?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding_resp.embedding.values,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n\npub struct VertexAiClient {\n client: vertexai::client::PredictionService,\n config: super::VertexAiConfig,\n}\n\nimpl VertexAiClient {\n pub async fn new(\n address: Option,\n api_config: Option,\n ) -> Result {\n if address.is_some() {\n api_bail!(\"VertexAi API address is not supported for VertexAi API type\");\n }\n let Some(super::LlmApiConfig::VertexAi(config)) = api_config else {\n api_bail!(\"VertexAi API config is required for VertexAi API type\");\n };\n let client = vertexai::client::PredictionService::builder()\n .build()\n .await?;\n Ok(Self { client, config })\n }\n\n fn get_model_path(&self, model: &str) -> String {\n format!(\n \"projects/{}/locations/{}/publishers/google/models/{}\",\n self.config.project,\n self.config.region.as_deref().unwrap_or(\"global\"),\n model\n )\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for VertexAiClient {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n use vertexai::model::{Blob, Content, GenerationConfig, Part, Schema, part::Data};\n\n // Compose parts\n let mut parts = Vec::new();\n // Add text part\n parts.push(Part::new().set_text(request.user_prompt.to_string()));\n // Add image part if present\n if let Some(image_bytes) = request.image {\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n parts.push(\n Part::new().set_inline_data(\n Blob::new()\n .set_data(image_bytes.into_owned())\n .set_mime_type(mime_type.to_string()),\n ),\n );\n }\n // Compose content\n let mut contents = Vec::new();\n contents.push(Content::new().set_role(\"user\".to_string()).set_parts(parts));\n // Compose system instruction if present\n let system_instruction = request.system_prompt.as_ref().map(|sys| {\n Content::new()\n .set_role(\"system\".to_string())\n .set_parts(vec![Part::new().set_text(sys.to_string())])\n });\n\n // Compose generation config\n let mut generation_config = None;\n if let Some(OutputFormat::JsonSchema { schema, .. }) = &request.output_format {\n let schema_json = serde_json::to_value(schema)?;\n generation_config = Some(\n GenerationConfig::new()\n .set_response_mime_type(\"application/json\".to_string())\n .set_response_schema(serde_json::from_value::(schema_json)?),\n );\n }\n\n let mut req = self\n .client\n .generate_content()\n .set_model(self.get_model_path(request.model))\n .set_contents(contents);\n if let Some(sys) = system_instruction {\n req = req.set_system_instruction(sys);\n }\n if let Some(config) = generation_config {\n req = req.set_generation_config(config);\n }\n\n // Call the API\n let resp = req.send().await?;\n // Extract text from response\n let Some(Data::Text(text)) = resp\n .candidates\n .into_iter()\n .next()\n .and_then(|c| c.content)\n .and_then(|content| content.parts.into_iter().next())\n .and_then(|part| part.data)\n else {\n bail!(\"No text in response\");\n };\n Ok(super::LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for VertexAiClient {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n // Create the instances for the request\n let mut instance = serde_json::json!({\n \"content\": request.text\n });\n // Add task type if specified\n if let Some(task_type) = &request.task_type {\n instance[\"task_type\"] = serde_json::Value::String(task_type.to_string());\n }\n\n let instances = vec![instance];\n\n // Prepare the request parameters\n let mut parameters = serde_json::json!({});\n if let Some(output_dimension) = request.output_dimension {\n parameters[\"outputDimensionality\"] = serde_json::Value::Number(output_dimension.into());\n }\n\n // Build the prediction request using the raw predict builder\n let response = self\n .client\n .predict()\n .set_endpoint(self.get_model_path(request.model))\n .set_instances(instances)\n .set_parameters(parameters)\n .send()\n .await?;\n\n // Extract the embedding from the response\n let embeddings = response\n .predictions\n .into_iter()\n .next()\n .and_then(|mut e| e.get_mut(\"embeddings\").map(|v| v.take()))\n .ok_or_else(|| anyhow::anyhow!(\"No embeddings in response\"))?;\n let embedding: ContentEmbedding = serde_json::from_value(embeddings)?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding.values,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n"], ["/cocoindex/src/execution/live_updater.rs", "use crate::{execution::stats::UpdateStats, prelude::*};\n\nuse super::stats;\nuse futures::future::try_join_all;\nuse sqlx::PgPool;\nuse tokio::{sync::watch, task::JoinSet, time::MissedTickBehavior};\n\npub struct FlowLiveUpdaterUpdates {\n pub active_sources: Vec,\n pub updated_sources: Vec,\n}\nstruct FlowLiveUpdaterStatus {\n pub active_source_idx: BTreeSet,\n pub source_updates_num: Vec,\n}\n\nstruct UpdateReceiveState {\n status_rx: watch::Receiver,\n last_num_source_updates: Vec,\n is_done: bool,\n}\n\npub struct FlowLiveUpdater {\n flow_ctx: Arc,\n join_set: Mutex>>>,\n stats_per_task: Vec>,\n recv_state: tokio::sync::Mutex,\n num_remaining_tasks_rx: watch::Receiver,\n\n // Hold tx to avoid dropping the sender.\n _status_tx: watch::Sender,\n _num_remaining_tasks_tx: watch::Sender,\n}\n\n#[derive(Debug, Clone, Default, Serialize, Deserialize)]\npub struct FlowLiveUpdaterOptions {\n /// If true, the updater will keep refreshing the index.\n /// Otherwise, it will only apply changes from the source up to the current time.\n pub live_mode: bool,\n\n /// If true, stats will be printed to the console.\n pub print_stats: bool,\n}\n\nconst REPORT_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10);\n\nstruct SharedAckFn Result<()>> {\n count: usize,\n ack_fn: Option,\n}\n\nimpl Result<()>> SharedAckFn {\n fn new(count: usize, ack_fn: AckAsyncFn) -> Self {\n Self {\n count,\n ack_fn: Some(ack_fn),\n }\n }\n\n async fn ack(v: &Mutex) -> Result<()> {\n let ack_fn = {\n let mut v = v.lock().unwrap();\n v.count -= 1;\n if v.count > 0 { None } else { v.ack_fn.take() }\n };\n if let Some(ack_fn) = ack_fn {\n ack_fn().await?;\n }\n Ok(())\n }\n}\n\nstruct SourceUpdateTask {\n source_idx: usize,\n\n flow: Arc,\n plan: Arc,\n execution_ctx: Arc>,\n source_update_stats: Arc,\n pool: PgPool,\n options: FlowLiveUpdaterOptions,\n\n status_tx: watch::Sender,\n num_remaining_tasks_tx: watch::Sender,\n}\n\nimpl Drop for SourceUpdateTask {\n fn drop(&mut self) {\n self.status_tx.send_modify(|update| {\n update.active_source_idx.remove(&self.source_idx);\n });\n self.num_remaining_tasks_tx.send_modify(|update| {\n *update -= 1;\n });\n }\n}\n\nimpl SourceUpdateTask {\n async fn run(self) -> Result<()> {\n let source_idx = self.source_idx;\n let source_context = self\n .execution_ctx\n .get_source_indexing_context(&self.flow, source_idx, &self.pool)\n .await?;\n\n let import_op = &self.plan.import_ops[source_idx];\n\n let report_stats = |stats: &stats::UpdateStats, kind: &str| {\n self.source_update_stats.merge(stats);\n if self.options.print_stats {\n println!(\n \"{}.{} ({kind}): {}\",\n self.flow.flow_instance.name, import_op.name, stats\n );\n } else {\n trace!(\n \"{}.{} ({kind}): {}\",\n self.flow.flow_instance.name, import_op.name, stats\n );\n }\n };\n\n let mut futs: Vec>> = Vec::new();\n\n // Deal with change streams.\n if self.options.live_mode {\n if let Some(change_stream) = import_op.executor.change_stream().await? {\n let change_stream_stats = Arc::new(stats::UpdateStats::default());\n futs.push(\n {\n let change_stream_stats = change_stream_stats.clone();\n let pool = self.pool.clone();\n let status_tx = self.status_tx.clone();\n async move {\n let mut change_stream = change_stream;\n let retry_options = retryable::RetryOptions {\n max_retries: None,\n initial_backoff: std::time::Duration::from_secs(5),\n max_backoff: std::time::Duration::from_secs(60),\n };\n loop {\n // Workaround as AsyncFnMut isn't mature yet.\n // Should be changed to use AsyncFnMut once it is.\n let change_stream = tokio::sync::Mutex::new(&mut change_stream);\n let change_msg = retryable::run(\n || async {\n let mut change_stream = change_stream.lock().await;\n change_stream\n .next()\n .await\n .transpose()\n .map_err(retryable::Error::always_retryable)\n },\n &retry_options,\n )\n .await?;\n let change_msg = if let Some(change_msg) = change_msg {\n change_msg\n } else {\n break;\n };\n\n let update_stats = Arc::new(stats::UpdateStats::default());\n let ack_fn = {\n let status_tx = status_tx.clone();\n let update_stats = update_stats.clone();\n let change_stream_stats = change_stream_stats.clone();\n async move || {\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n change_stream_stats.merge(&update_stats);\n }\n if let Some(ack_fn) = change_msg.ack_fn {\n ack_fn().await\n } else {\n Ok(())\n }\n }\n };\n let shared_ack_fn = Arc::new(Mutex::new(SharedAckFn::new(\n change_msg.changes.iter().len(),\n ack_fn,\n )));\n for change in change_msg.changes {\n let shared_ack_fn = shared_ack_fn.clone();\n let concur_permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n tokio::spawn(source_context.clone().process_source_key(\n change.key,\n change.data,\n update_stats.clone(),\n concur_permit,\n Some(move || async move {\n SharedAckFn::ack(&shared_ack_fn).await\n }),\n pool.clone(),\n ));\n }\n }\n Ok(())\n }\n }\n .boxed(),\n );\n\n futs.push(\n async move {\n let mut interval = tokio::time::interval(REPORT_INTERVAL);\n let mut last_change_stream_stats: UpdateStats =\n change_stream_stats.as_ref().clone();\n interval.set_missed_tick_behavior(MissedTickBehavior::Delay);\n interval.tick().await;\n loop {\n interval.tick().await;\n let curr_change_stream_stats = change_stream_stats.as_ref().clone();\n let delta = curr_change_stream_stats.delta(&last_change_stream_stats);\n if delta.has_any_change() {\n report_stats(&delta, \"change stream\");\n last_change_stream_stats = curr_change_stream_stats;\n }\n }\n }\n .boxed(),\n );\n }\n }\n\n // The main update loop.\n futs.push({\n let status_tx = self.status_tx.clone();\n let pool = self.pool.clone();\n let live_mode = self.options.live_mode;\n async move {\n let update_stats = Arc::new(stats::UpdateStats::default());\n source_context.update(&pool, &update_stats).await?;\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n }\n report_stats(&update_stats, \"batch update\");\n\n if let (true, Some(refresh_interval)) =\n (live_mode, import_op.refresh_options.refresh_interval)\n {\n let mut interval = tokio::time::interval(refresh_interval);\n interval.set_missed_tick_behavior(MissedTickBehavior::Delay);\n interval.tick().await;\n loop {\n interval.tick().await;\n\n let update_stats = Arc::new(stats::UpdateStats::default());\n source_context.update(&pool, &update_stats).await?;\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n }\n report_stats(&update_stats, \"interval refresh\");\n }\n }\n Ok(())\n }\n .boxed()\n });\n\n let join_result = try_join_all(futs).await;\n if let Err(err) = join_result {\n error!(\"Error in source `{}`: {:?}\", import_op.name, err);\n return Err(err);\n }\n Ok(())\n }\n}\n\nimpl FlowLiveUpdater {\n pub async fn start(\n flow_ctx: Arc,\n pool: &PgPool,\n options: FlowLiveUpdaterOptions,\n ) -> Result {\n let plan = flow_ctx.flow.get_execution_plan().await?;\n let execution_ctx = Arc::new(flow_ctx.use_owned_execution_ctx().await?);\n\n let (status_tx, status_rx) = watch::channel(FlowLiveUpdaterStatus {\n active_source_idx: BTreeSet::from_iter(0..plan.import_ops.len()),\n source_updates_num: vec![0; plan.import_ops.len()],\n });\n\n let (num_remaining_tasks_tx, num_remaining_tasks_rx) =\n watch::channel(plan.import_ops.len());\n\n let mut join_set = JoinSet::new();\n let mut stats_per_task = Vec::new();\n\n for source_idx in 0..plan.import_ops.len() {\n let source_update_stats = Arc::new(stats::UpdateStats::default());\n let source_update_task = SourceUpdateTask {\n source_idx,\n flow: flow_ctx.flow.clone(),\n plan: plan.clone(),\n execution_ctx: execution_ctx.clone(),\n source_update_stats: source_update_stats.clone(),\n pool: pool.clone(),\n options: options.clone(),\n status_tx: status_tx.clone(),\n num_remaining_tasks_tx: num_remaining_tasks_tx.clone(),\n };\n join_set.spawn(source_update_task.run());\n stats_per_task.push(source_update_stats);\n }\n Ok(Self {\n flow_ctx,\n join_set: Mutex::new(Some(join_set)),\n stats_per_task,\n recv_state: tokio::sync::Mutex::new(UpdateReceiveState {\n status_rx,\n last_num_source_updates: vec![0; plan.import_ops.len()],\n is_done: false,\n }),\n num_remaining_tasks_rx,\n\n _status_tx: status_tx,\n _num_remaining_tasks_tx: num_remaining_tasks_tx,\n })\n }\n\n pub async fn wait(&self) -> Result<()> {\n {\n let mut rx = self.num_remaining_tasks_rx.clone();\n rx.wait_for(|v| *v == 0).await?;\n }\n\n let Some(mut join_set) = self.join_set.lock().unwrap().take() else {\n return Ok(());\n };\n while let Some(task_result) = join_set.join_next().await {\n match task_result {\n Ok(Ok(_)) => {}\n Ok(Err(err)) => {\n return Err(err);\n }\n Err(err) if err.is_cancelled() => {}\n Err(err) => {\n return Err(err.into());\n }\n }\n }\n Ok(())\n }\n\n pub fn abort(&self) {\n let mut join_set = self.join_set.lock().unwrap();\n if let Some(join_set) = &mut *join_set {\n join_set.abort_all();\n }\n }\n\n pub fn index_update_info(&self) -> stats::IndexUpdateInfo {\n stats::IndexUpdateInfo {\n sources: std::iter::zip(\n self.flow_ctx.flow.flow_instance.import_ops.iter(),\n self.stats_per_task.iter(),\n )\n .map(|(import_op, stats)| stats::SourceUpdateInfo {\n source_name: import_op.name.clone(),\n stats: stats.as_ref().clone(),\n })\n .collect(),\n }\n }\n\n pub async fn next_status_updates(&self) -> Result {\n let mut recv_state = self.recv_state.lock().await;\n let recv_state = &mut *recv_state;\n\n if recv_state.is_done {\n return Ok(FlowLiveUpdaterUpdates {\n active_sources: vec![],\n updated_sources: vec![],\n });\n }\n\n recv_state.status_rx.changed().await?;\n let status = recv_state.status_rx.borrow_and_update();\n let updates = FlowLiveUpdaterUpdates {\n active_sources: status\n .active_source_idx\n .iter()\n .map(|idx| {\n self.flow_ctx.flow.flow_instance.import_ops[*idx]\n .name\n .clone()\n })\n .collect(),\n updated_sources: status\n .source_updates_num\n .iter()\n .enumerate()\n .filter_map(|(idx, num_updates)| {\n if num_updates > &recv_state.last_num_source_updates[idx] {\n Some(\n self.flow_ctx.flow.flow_instance.import_ops[idx]\n .name\n .clone(),\n )\n } else {\n None\n }\n })\n .collect(),\n };\n recv_state.last_num_source_updates = status.source_updates_num.clone();\n if status.active_source_idx.is_empty() {\n recv_state.is_done = true;\n }\n Ok(updates)\n }\n}\n"], ["/cocoindex/src/ops/targets/shared/property_graph.rs", "use crate::prelude::*;\n\nuse crate::ops::sdk::{AuthEntryReference, FieldSchema};\n\n#[derive(Debug, Deserialize)]\npub struct TargetFieldMapping {\n pub source: spec::FieldName,\n\n /// Field name for the node in the Knowledge Graph.\n /// If unspecified, it's the same as `field_name`.\n #[serde(default)]\n pub target: Option,\n}\n\nimpl TargetFieldMapping {\n pub fn get_target(&self) -> &spec::FieldName {\n self.target.as_ref().unwrap_or(&self.source)\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct NodeFromFieldsSpec {\n pub label: String,\n pub fields: Vec,\n}\n\n#[derive(Debug, Deserialize)]\npub struct NodesSpec {\n pub label: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct RelationshipsSpec {\n pub rel_type: String,\n pub source: NodeFromFieldsSpec,\n pub target: NodeFromFieldsSpec,\n}\n\n#[derive(Debug, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum GraphElementMapping {\n Relationship(RelationshipsSpec),\n Node(NodesSpec),\n}\n\n#[derive(Debug, Deserialize)]\npub struct GraphDeclaration {\n pub nodes_label: String,\n\n #[serde(flatten)]\n pub index_options: spec::IndexOptions,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Clone)]\npub enum ElementType {\n Node(String),\n Relationship(String),\n}\n\nimpl ElementType {\n pub fn label(&self) -> &str {\n match self {\n ElementType::Node(label) => label,\n ElementType::Relationship(label) => label,\n }\n }\n\n pub fn from_mapping_spec(spec: &GraphElementMapping) -> Self {\n match spec {\n GraphElementMapping::Relationship(spec) => {\n ElementType::Relationship(spec.rel_type.clone())\n }\n GraphElementMapping::Node(spec) => ElementType::Node(spec.label.clone()),\n }\n }\n\n pub fn matcher(&self, var_name: &str) -> String {\n match self {\n ElementType::Relationship(label) => format!(\"()-[{var_name}:{label}]->()\"),\n ElementType::Node(label) => format!(\"({var_name}:{label})\"),\n }\n }\n}\n\nimpl std::fmt::Display for ElementType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ElementType::Node(label) => write!(f, \"Node(label:{label})\"),\n ElementType::Relationship(rel_type) => write!(f, \"Relationship(type:{rel_type})\"),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Derivative)]\n#[derivative(\n Clone(bound = \"\"),\n PartialEq(bound = \"\"),\n Eq(bound = \"\"),\n Hash(bound = \"\")\n)]\npub struct GraphElementType {\n #[serde(bound = \"\")]\n pub connection: AuthEntryReference,\n pub typ: ElementType,\n}\n\nimpl std::fmt::Display for GraphElementType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}/{}\", self.connection.key, self.typ)\n }\n}\n\npub struct GraphElementSchema {\n pub elem_type: ElementType,\n pub key_fields: Vec,\n pub value_fields: Vec,\n}\n\npub struct GraphElementInputFieldsIdx {\n pub key: Vec,\n pub value: Vec,\n}\n\nimpl GraphElementInputFieldsIdx {\n pub fn extract_key(&self, fields: &[value::Value]) -> Result {\n value::KeyValue::from_values(self.key.iter().map(|idx| &fields[*idx]))\n }\n}\n\npub struct AnalyzedGraphElementFieldMapping {\n pub schema: Arc,\n pub fields_input_idx: GraphElementInputFieldsIdx,\n}\n\nimpl AnalyzedGraphElementFieldMapping {\n pub fn has_value_fields(&self) -> bool {\n !self.fields_input_idx.value.is_empty()\n }\n}\n\npub struct AnalyzedRelationshipInfo {\n pub source: AnalyzedGraphElementFieldMapping,\n pub target: AnalyzedGraphElementFieldMapping,\n}\n\npub struct AnalyzedDataCollection {\n pub schema: Arc,\n pub value_fields_input_idx: Vec,\n\n pub rel: Option,\n}\n\nimpl AnalyzedDataCollection {\n pub fn dependent_node_labels(&self) -> IndexSet<&str> {\n let mut dependent_node_labels = IndexSet::new();\n if let Some(rel) = &self.rel {\n dependent_node_labels.insert(rel.source.schema.elem_type.label());\n dependent_node_labels.insert(rel.target.schema.elem_type.label());\n }\n dependent_node_labels\n }\n}\n\nstruct GraphElementSchemaBuilder {\n elem_type: ElementType,\n key_fields: Vec,\n value_fields: Vec,\n}\n\nimpl GraphElementSchemaBuilder {\n fn new(elem_type: ElementType) -> Self {\n Self {\n elem_type,\n key_fields: vec![],\n value_fields: vec![],\n }\n }\n\n fn merge_fields(\n elem_type: &ElementType,\n kind: &str,\n existing_fields: &mut Vec,\n fields: Vec<(usize, schema::FieldSchema)>,\n ) -> Result> {\n if fields.is_empty() {\n return Ok(vec![]);\n }\n let result: Vec = if existing_fields.is_empty() {\n let fields_idx: Vec = fields.iter().map(|(idx, _)| *idx).collect();\n existing_fields.extend(fields.into_iter().map(|(_, f)| f));\n fields_idx\n } else {\n if existing_fields.len() != fields.len() {\n bail!(\n \"{elem_type} {kind} fields number mismatch: {} vs {}\",\n existing_fields.len(),\n fields.len()\n );\n }\n let mut fields_map: HashMap<_, _> = fields\n .into_iter()\n .map(|(idx, schema)| (schema.name, (idx, schema.value_type)))\n .collect();\n // Follow the order of existing fields\n existing_fields\n .iter()\n .map(|existing_field| {\n let (idx, typ) = fields_map.remove(&existing_field.name).ok_or_else(|| {\n anyhow!(\n \"{elem_type} {kind} field `{}` not found in some collector\",\n existing_field.name\n )\n })?;\n if typ != existing_field.value_type {\n bail!(\n \"{elem_type} {kind} field `{}` type mismatch: {} vs {}\",\n existing_field.name,\n typ,\n existing_field.value_type\n )\n }\n Ok(idx)\n })\n .collect::>>()?\n };\n Ok(result)\n }\n\n fn merge(\n &mut self,\n key_fields: Vec<(usize, schema::FieldSchema)>,\n value_fields: Vec<(usize, schema::FieldSchema)>,\n ) -> Result {\n let key_fields_idx =\n Self::merge_fields(&self.elem_type, \"key\", &mut self.key_fields, key_fields)?;\n let value_fields_idx = Self::merge_fields(\n &self.elem_type,\n \"value\",\n &mut self.value_fields,\n value_fields,\n )?;\n Ok(GraphElementInputFieldsIdx {\n key: key_fields_idx,\n value: value_fields_idx,\n })\n }\n\n fn build_schema(self) -> Result {\n if self.key_fields.is_empty() {\n bail!(\n \"No key fields specified for Node label `{}`\",\n self.elem_type\n );\n }\n Ok(GraphElementSchema {\n elem_type: self.elem_type,\n key_fields: self.key_fields,\n value_fields: self.value_fields,\n })\n }\n}\nstruct DependentNodeLabelAnalyzer<'a, AuthEntry> {\n graph_elem_type: GraphElementType,\n fields: IndexMap,\n remaining_fields: HashMap<&'a str, &'a TargetFieldMapping>,\n primary_key_fields: &'a [String],\n}\n\nimpl<'a, AuthEntry> DependentNodeLabelAnalyzer<'a, AuthEntry> {\n fn new(\n conn: &'a spec::AuthEntryReference,\n rel_end_spec: &'a NodeFromFieldsSpec,\n primary_key_fields_map: &'a HashMap<&'a GraphElementType, &'a [String]>,\n ) -> Result {\n let graph_elem_type = GraphElementType {\n connection: conn.clone(),\n typ: ElementType::Node(rel_end_spec.label.clone()),\n };\n let primary_key_fields = primary_key_fields_map\n .get(&graph_elem_type)\n .ok_or_else(invariance_violation)?;\n Ok(Self {\n graph_elem_type,\n fields: IndexMap::new(),\n remaining_fields: rel_end_spec\n .fields\n .iter()\n .map(|f| (f.source.as_str(), f))\n .collect(),\n primary_key_fields,\n })\n }\n\n fn process_field(&mut self, field_idx: usize, field_schema: &schema::FieldSchema) -> bool {\n let field_mapping = match self.remaining_fields.remove(field_schema.name.as_str()) {\n Some(field_mapping) => field_mapping,\n None => return false,\n };\n self.fields.insert(\n field_mapping.get_target().clone(),\n (field_idx, field_schema.value_type.clone()),\n );\n true\n }\n\n fn build(\n self,\n schema_builders: &mut HashMap, GraphElementSchemaBuilder>,\n ) -> Result<(GraphElementType, GraphElementInputFieldsIdx)> {\n if !self.remaining_fields.is_empty() {\n anyhow::bail!(\n \"Fields not mapped for {}: {}\",\n self.graph_elem_type,\n self.remaining_fields.keys().join(\", \")\n );\n }\n\n let (mut key_fields, value_fields): (Vec<_>, Vec<_>) = self\n .fields\n .into_iter()\n .map(|(field_name, (idx, typ))| (idx, FieldSchema::new(field_name, typ)))\n .partition(|(_, f)| self.primary_key_fields.contains(&f.name));\n if key_fields.len() != self.primary_key_fields.len() {\n bail!(\n \"Primary key fields number mismatch: {} vs {}\",\n key_fields.iter().map(|(_, f)| &f.name).join(\", \"),\n self.primary_key_fields.iter().join(\", \")\n );\n }\n key_fields.sort_by_key(|(_, f)| {\n self.primary_key_fields\n .iter()\n .position(|k| k == &f.name)\n .unwrap()\n });\n\n let fields_idx = schema_builders\n .entry(self.graph_elem_type.clone())\n .or_insert_with(|| GraphElementSchemaBuilder::new(self.graph_elem_type.typ.clone()))\n .merge(key_fields, value_fields)?;\n Ok((self.graph_elem_type, fields_idx))\n }\n}\n\npub struct DataCollectionGraphMappingInput<'a, AuthEntry> {\n pub auth_ref: &'a spec::AuthEntryReference,\n pub mapping: &'a GraphElementMapping,\n pub index_options: &'a spec::IndexOptions,\n\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n}\n\npub fn analyze_graph_mappings<'a, AuthEntry: 'a>(\n data_coll_inputs: impl Iterator>,\n declarations: impl Iterator<\n Item = (\n &'a spec::AuthEntryReference,\n &'a GraphDeclaration,\n ),\n >,\n) -> Result<(Vec, Vec>)> {\n let data_coll_inputs: Vec<_> = data_coll_inputs.collect();\n let decls: Vec<_> = declarations.collect();\n\n // 1a. Prepare graph element types\n let graph_elem_types = data_coll_inputs\n .iter()\n .map(|d| GraphElementType {\n connection: d.auth_ref.clone(),\n typ: ElementType::from_mapping_spec(d.mapping),\n })\n .collect::>();\n let decl_graph_elem_types = decls\n .iter()\n .map(|(auth_ref, decl)| GraphElementType {\n connection: (*auth_ref).clone(),\n typ: ElementType::Node(decl.nodes_label.clone()),\n })\n .collect::>();\n\n // 1b. Prepare primary key fields map\n let primary_key_fields_map: HashMap<&GraphElementType, &[spec::FieldName]> =\n std::iter::zip(data_coll_inputs.iter(), graph_elem_types.iter())\n .map(|(data_coll_input, graph_elem_type)| {\n (\n graph_elem_type,\n data_coll_input.index_options.primary_key_fields(),\n )\n })\n .chain(\n std::iter::zip(decl_graph_elem_types.iter(), decls.iter()).map(\n |(graph_elem_type, (_, decl))| {\n (graph_elem_type, decl.index_options.primary_key_fields())\n },\n ),\n )\n .map(|(graph_elem_type, primary_key_fields)| {\n Ok((\n graph_elem_type,\n primary_key_fields.with_context(|| {\n format!(\"Primary key fields are not set for {graph_elem_type}\")\n })?,\n ))\n })\n .collect::>()?;\n\n // 2. Analyze data collection graph mappings and build target schema\n let mut node_schema_builders =\n HashMap::, GraphElementSchemaBuilder>::new();\n struct RelationshipProcessedInfo {\n rel_schema: GraphElementSchema,\n source_typ: GraphElementType,\n source_fields_idx: GraphElementInputFieldsIdx,\n target_typ: GraphElementType,\n target_fields_idx: GraphElementInputFieldsIdx,\n }\n struct DataCollectionProcessedInfo {\n value_input_fields_idx: Vec,\n rel_specific: Option>,\n }\n let data_collection_processed_info = std::iter::zip(data_coll_inputs, graph_elem_types.iter())\n .map(|(data_coll_input, graph_elem_type)| -> Result<_> {\n let processed_info = match data_coll_input.mapping {\n GraphElementMapping::Node(_) => {\n let input_fields_idx = node_schema_builders\n .entry(graph_elem_type.clone())\n .or_insert_with_key(|graph_elem| {\n GraphElementSchemaBuilder::new(graph_elem.typ.clone())\n })\n .merge(\n data_coll_input\n .key_fields_schema\n .into_iter()\n .enumerate()\n .collect(),\n data_coll_input\n .value_fields_schema\n .into_iter()\n .enumerate()\n .collect(),\n )?;\n\n if !(0..input_fields_idx.key.len()).eq(input_fields_idx.key.into_iter()) {\n return Err(invariance_violation());\n }\n DataCollectionProcessedInfo {\n value_input_fields_idx: input_fields_idx.value,\n rel_specific: None,\n }\n }\n GraphElementMapping::Relationship(rel_spec) => {\n let mut src_analyzer = DependentNodeLabelAnalyzer::new(\n data_coll_input.auth_ref,\n &rel_spec.source,\n &primary_key_fields_map,\n )?;\n let mut tgt_analyzer = DependentNodeLabelAnalyzer::new(\n data_coll_input.auth_ref,\n &rel_spec.target,\n &primary_key_fields_map,\n )?;\n\n let mut value_fields_schema = vec![];\n let mut value_input_fields_idx = vec![];\n for (field_idx, field_schema) in\n data_coll_input.value_fields_schema.into_iter().enumerate()\n {\n if !src_analyzer.process_field(field_idx, &field_schema)\n && !tgt_analyzer.process_field(field_idx, &field_schema)\n {\n value_fields_schema.push(field_schema.clone());\n value_input_fields_idx.push(field_idx);\n }\n }\n\n let rel_schema = GraphElementSchema {\n elem_type: graph_elem_type.typ.clone(),\n key_fields: data_coll_input.key_fields_schema,\n value_fields: value_fields_schema,\n };\n let (source_typ, source_fields_idx) =\n src_analyzer.build(&mut node_schema_builders)?;\n let (target_typ, target_fields_idx) =\n tgt_analyzer.build(&mut node_schema_builders)?;\n DataCollectionProcessedInfo {\n value_input_fields_idx,\n rel_specific: Some(RelationshipProcessedInfo {\n rel_schema,\n source_typ,\n source_fields_idx,\n target_typ,\n target_fields_idx,\n }),\n }\n }\n };\n Ok(processed_info)\n })\n .collect::>>()?;\n\n let node_schemas: HashMap, Arc> =\n node_schema_builders\n .into_iter()\n .map(|(graph_elem_type, schema_builder)| {\n Ok((graph_elem_type, Arc::new(schema_builder.build_schema()?)))\n })\n .collect::>()?;\n\n // 3. Build output\n let analyzed_data_colls: Vec =\n std::iter::zip(data_collection_processed_info, graph_elem_types.iter())\n .map(|(processed_info, graph_elem_type)| {\n let result = match processed_info.rel_specific {\n // Node\n None => AnalyzedDataCollection {\n schema: node_schemas\n .get(graph_elem_type)\n .ok_or_else(invariance_violation)?\n .clone(),\n value_fields_input_idx: processed_info.value_input_fields_idx,\n rel: None,\n },\n // Relationship\n Some(rel_info) => AnalyzedDataCollection {\n schema: Arc::new(rel_info.rel_schema),\n value_fields_input_idx: processed_info.value_input_fields_idx,\n rel: Some(AnalyzedRelationshipInfo {\n source: AnalyzedGraphElementFieldMapping {\n schema: node_schemas\n .get(&rel_info.source_typ)\n .ok_or_else(invariance_violation)?\n .clone(),\n fields_input_idx: rel_info.source_fields_idx,\n },\n target: AnalyzedGraphElementFieldMapping {\n schema: node_schemas\n .get(&rel_info.target_typ)\n .ok_or_else(invariance_violation)?\n .clone(),\n fields_input_idx: rel_info.target_fields_idx,\n },\n }),\n },\n };\n Ok(result)\n })\n .collect::>()?;\n let decl_schemas: Vec> = decl_graph_elem_types\n .iter()\n .map(|graph_elem_type| {\n Ok(node_schemas\n .get(graph_elem_type)\n .ok_or_else(invariance_violation)?\n .clone())\n })\n .collect::>()?;\n Ok((analyzed_data_colls, decl_schemas))\n}\n"], ["/cocoindex/src/ops/factory_bases.rs", "use crate::prelude::*;\nuse crate::setup::ResourceSetupStatus;\nuse std::fmt::Debug;\nuse std::hash::Hash;\n\nuse super::interface::*;\nuse super::registry::*;\nuse crate::api_bail;\nuse crate::api_error;\nuse crate::base::schema::*;\nuse crate::base::spec::*;\nuse crate::builder::plan::AnalyzedValueMapping;\nuse crate::setup;\n// SourceFactoryBase\npub struct ResolvedOpArg {\n pub name: String,\n pub typ: EnrichedValueType,\n pub idx: usize,\n}\n\npub trait ResolvedOpArgExt: Sized {\n fn expect_type(self, expected_type: &ValueType) -> Result;\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value>;\n fn take_value(&self, args: &mut [value::Value]) -> Result;\n}\n\nimpl ResolvedOpArgExt for ResolvedOpArg {\n fn expect_type(self, expected_type: &ValueType) -> Result {\n if &self.typ.typ != expected_type {\n api_bail!(\n \"Expected argument `{}` to be of type `{}`, got `{}`\",\n self.name,\n expected_type,\n self.typ.typ\n );\n }\n Ok(self)\n }\n\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value> {\n if self.idx >= args.len() {\n api_bail!(\n \"Two few arguments, {} provided, expected at least {} for `{}`\",\n args.len(),\n self.idx + 1,\n self.name\n );\n }\n Ok(&args[self.idx])\n }\n\n fn take_value(&self, args: &mut [value::Value]) -> Result {\n if self.idx >= args.len() {\n api_bail!(\n \"Two few arguments, {} provided, expected at least {} for `{}`\",\n args.len(),\n self.idx + 1,\n self.name\n );\n }\n Ok(std::mem::take(&mut args[self.idx]))\n }\n}\n\nimpl ResolvedOpArgExt for Option {\n fn expect_type(self, expected_type: &ValueType) -> Result {\n self.map(|arg| arg.expect_type(expected_type)).transpose()\n }\n\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value> {\n Ok(self\n .as_ref()\n .map(|arg| arg.value(args))\n .transpose()?\n .unwrap_or(&value::Value::Null))\n }\n\n fn take_value(&self, args: &mut [value::Value]) -> Result {\n Ok(self\n .as_ref()\n .map(|arg| arg.take_value(args))\n .transpose()?\n .unwrap_or(value::Value::Null))\n }\n}\n\npub struct OpArgsResolver<'a> {\n args: &'a [OpArgSchema],\n num_positional_args: usize,\n next_positional_idx: usize,\n remaining_kwargs: HashMap<&'a str, usize>,\n}\n\nimpl<'a> OpArgsResolver<'a> {\n pub fn new(args: &'a [OpArgSchema]) -> Result {\n let mut num_positional_args = 0;\n let mut kwargs = HashMap::new();\n for (idx, arg) in args.iter().enumerate() {\n if let Some(name) = &arg.name.0 {\n kwargs.insert(name.as_str(), idx);\n } else {\n if !kwargs.is_empty() {\n api_bail!(\"Positional arguments must be provided before keyword arguments\");\n }\n num_positional_args += 1;\n }\n }\n Ok(Self {\n args,\n num_positional_args,\n next_positional_idx: 0,\n remaining_kwargs: kwargs,\n })\n }\n\n pub fn next_optional_arg(&mut self, name: &str) -> Result> {\n let idx = if let Some(idx) = self.remaining_kwargs.remove(name) {\n if self.next_positional_idx < self.num_positional_args {\n api_bail!(\"`{name}` is provided as both positional and keyword arguments\");\n } else {\n Some(idx)\n }\n } else if self.next_positional_idx < self.num_positional_args {\n let idx = self.next_positional_idx;\n self.next_positional_idx += 1;\n Some(idx)\n } else {\n None\n };\n Ok(idx.map(|idx| ResolvedOpArg {\n name: name.to_string(),\n typ: self.args[idx].value_type.clone(),\n idx,\n }))\n }\n\n pub fn next_arg(&mut self, name: &str) -> Result {\n Ok(self\n .next_optional_arg(name)?\n .ok_or_else(|| api_error!(\"Required argument `{name}` is missing\",))?)\n }\n\n pub fn done(self) -> Result<()> {\n if self.next_positional_idx < self.num_positional_args {\n api_bail!(\n \"Expected {} positional arguments, got {}\",\n self.next_positional_idx,\n self.num_positional_args\n );\n }\n if !self.remaining_kwargs.is_empty() {\n api_bail!(\n \"Unexpected keyword arguments: {}\",\n self.remaining_kwargs\n .keys()\n .map(|k| format!(\"`{k}`\"))\n .collect::>()\n .join(\", \")\n )\n }\n Ok(())\n }\n\n pub fn get_analyze_value(&self, resolved_arg: &ResolvedOpArg) -> &AnalyzedValueMapping {\n &self.args[resolved_arg.idx].analyzed_value\n }\n}\n\n#[async_trait]\npub trait SourceFactoryBase: SourceFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n\n fn name(&self) -> &str;\n\n async fn get_output_schema(\n &self,\n spec: &Self::Spec,\n context: &FlowInstanceContext,\n ) -> Result;\n\n async fn build_executor(\n self: Arc,\n spec: Self::Spec,\n context: Arc,\n ) -> Result>;\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::Source(Arc::new(self)),\n )\n }\n}\n\n#[async_trait]\nimpl SourceFactory for T {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let spec: T::Spec = serde_json::from_value(spec)?;\n let output_schema = self.get_output_schema(&spec, &context).await?;\n let executor = self.build_executor(spec, context);\n Ok((output_schema, executor))\n }\n}\n\n// SimpleFunctionFactoryBase\n\n#[async_trait]\npub trait SimpleFunctionFactoryBase: SimpleFunctionFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n type ResolvedArgs: Send + Sync;\n\n fn name(&self) -> &str;\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Self::Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n context: &FlowInstanceContext,\n ) -> Result<(Self::ResolvedArgs, EnrichedValueType)>;\n\n async fn build_executor(\n self: Arc,\n spec: Self::Spec,\n resolved_input_schema: Self::ResolvedArgs,\n context: Arc,\n ) -> Result>;\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::SimpleFunction(Arc::new(self)),\n )\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionFactory for T {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let spec: T::Spec = serde_json::from_value(spec)?;\n let mut args_resolver = OpArgsResolver::new(&input_schema)?;\n let (resolved_input_schema, output_schema) = self\n .resolve_schema(&spec, &mut args_resolver, &context)\n .await?;\n args_resolver.done()?;\n let executor = self.build_executor(spec, resolved_input_schema, context);\n Ok((output_schema, executor))\n }\n}\n\npub struct TypedExportDataCollectionBuildOutput {\n pub export_context: BoxFuture<'static, Result>>,\n pub setup_key: F::Key,\n pub desired_setup_state: F::SetupState,\n}\npub struct TypedExportDataCollectionSpec {\n pub name: String,\n pub spec: F::Spec,\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n pub index_options: IndexOptions,\n}\n\npub struct TypedResourceSetupChangeItem<'a, F: StorageFactoryBase + ?Sized> {\n pub key: F::Key,\n pub setup_status: &'a F::SetupStatus,\n}\n\n#[async_trait]\npub trait StorageFactoryBase: ExportTargetFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n type DeclarationSpec: DeserializeOwned + Send + Sync;\n type Key: Debug + Clone + Serialize + DeserializeOwned + Eq + Hash + Send + Sync;\n type SetupState: Debug + Clone + Serialize + DeserializeOwned + Send + Sync;\n type SetupStatus: ResourceSetupStatus;\n type ExportContext: Send + Sync + 'static;\n\n fn name(&self) -> &str;\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(Self::Key, Self::SetupState)>,\n )>;\n\n /// Deserialize the setup key from a JSON value.\n /// You can override this method to provide a custom deserialization logic, e.g. to perform backward compatible deserialization.\n fn deserialize_setup_key(key: serde_json::Value) -> Result {\n Ok(serde_json::from_value(key)?)\n }\n\n /// Will not be called if it's setup by user.\n /// It returns an error if the target only supports setup by user.\n async fn check_setup_status(\n &self,\n key: Self::Key,\n desired_state: Option,\n existing_states: setup::CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result;\n\n fn check_state_compatibility(\n &self,\n desired_state: &Self::SetupState,\n existing_state: &Self::SetupState,\n ) -> Result;\n\n fn describe_resource(&self, key: &Self::Key) -> Result;\n\n fn extract_additional_key(\n &self,\n _key: &value::KeyValue,\n _value: &value::FieldValues,\n _export_context: &Self::ExportContext,\n ) -> Result {\n Ok(serde_json::Value::Null)\n }\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::ExportTarget(Arc::new(self)),\n )\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()>;\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()>;\n}\n\n#[async_trait]\nimpl ExportTargetFactory for T {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )> {\n let (data_coll_output, decl_output) = StorageFactoryBase::build(\n self,\n data_collections\n .into_iter()\n .map(|d| {\n anyhow::Ok(TypedExportDataCollectionSpec {\n name: d.name,\n spec: serde_json::from_value(d.spec)?,\n key_fields_schema: d.key_fields_schema,\n value_fields_schema: d.value_fields_schema,\n index_options: d.index_options,\n })\n })\n .collect::>>()?,\n declarations\n .into_iter()\n .map(|d| anyhow::Ok(serde_json::from_value(d)?))\n .collect::>>()?,\n context,\n )\n .await?;\n\n let data_coll_output = data_coll_output\n .into_iter()\n .map(|d| {\n Ok(interface::ExportDataCollectionBuildOutput {\n export_context: async move {\n Ok(d.export_context.await? as Arc)\n }\n .boxed(),\n setup_key: serde_json::to_value(d.setup_key)?,\n desired_setup_state: serde_json::to_value(d.desired_setup_state)?,\n })\n })\n .collect::>>()?;\n let decl_output = decl_output\n .into_iter()\n .map(|(key, state)| Ok((serde_json::to_value(key)?, serde_json::to_value(state)?)))\n .collect::>>()?;\n Ok((data_coll_output, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result> {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n let desired_state: Option = desired_state\n .map(|v| serde_json::from_value(v.clone()))\n .transpose()?;\n let existing_states = from_json_combined_state(existing_states)?;\n let setup_status = StorageFactoryBase::check_setup_status(\n self,\n key,\n desired_state,\n existing_states,\n flow_instance_ctx,\n )\n .await?;\n Ok(Box::new(setup_status))\n }\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n StorageFactoryBase::describe_resource(self, &key)\n }\n\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n Ok(serde_json::to_value(key)?)\n }\n\n fn check_state_compatibility(\n &self,\n desired_state: &serde_json::Value,\n existing_state: &serde_json::Value,\n ) -> Result {\n let result = StorageFactoryBase::check_state_compatibility(\n self,\n &serde_json::from_value(desired_state.clone())?,\n &serde_json::from_value(existing_state.clone())?,\n )?;\n Ok(result)\n }\n\n fn extract_additional_key(\n &self,\n key: &value::KeyValue,\n value: &value::FieldValues,\n export_context: &(dyn Any + Send + Sync),\n ) -> Result {\n StorageFactoryBase::extract_additional_key(\n self,\n key,\n value,\n export_context\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n )\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mutations = mutations\n .into_iter()\n .map(|m| {\n anyhow::Ok(ExportTargetMutationWithContext {\n mutation: m.mutation,\n export_context: m\n .export_context\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n })\n })\n .collect::>()?;\n StorageFactoryBase::apply_mutation(self, mutations).await\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n StorageFactoryBase::apply_setup_changes(\n self,\n setup_status\n .into_iter()\n .map(|item| -> anyhow::Result<_> {\n Ok(TypedResourceSetupChangeItem {\n key: serde_json::from_value(item.key.clone())?,\n setup_status: (item.setup_status as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n })\n })\n .collect::>>()?,\n context,\n )\n .await\n }\n}\nfn from_json_combined_state(\n existing_states: setup::CombinedState,\n) -> Result> {\n Ok(setup::CombinedState {\n current: existing_states\n .current\n .map(|v| serde_json::from_value(v))\n .transpose()?,\n staging: existing_states\n .staging\n .into_iter()\n .map(|v| {\n anyhow::Ok(match v {\n setup::StateChange::Upsert(v) => {\n setup::StateChange::Upsert(serde_json::from_value(v)?)\n }\n setup::StateChange::Delete => setup::StateChange::Delete,\n })\n })\n .collect::>()?,\n legacy_state_key: existing_states.legacy_state_key,\n })\n}\n"], ["/cocoindex/src/execution/evaluator.rs", "use crate::prelude::*;\n\nuse anyhow::{Context, Ok};\nuse futures::future::try_join_all;\n\nuse crate::base::value::EstimatedByteSize;\nuse crate::builder::{AnalyzedTransientFlow, plan::*};\nuse crate::py::IntoPyResult;\nuse crate::{\n base::{schema, value},\n utils::immutable::RefList,\n};\n\nuse super::memoization::{EvaluationMemory, EvaluationMemoryOptions, evaluate_with_cell};\n\n#[derive(Debug)]\npub struct ScopeValueBuilder {\n // TODO: Share the same lock for values produced in the same execution scope, for stricter atomicity.\n pub fields: Vec>>,\n}\n\nimpl value::EstimatedByteSize for ScopeValueBuilder {\n fn estimated_detached_byte_size(&self) -> usize {\n self.fields\n .iter()\n .map(|f| f.get().map_or(0, |v| v.estimated_byte_size()))\n .sum()\n }\n}\n\nimpl From<&ScopeValueBuilder> for value::ScopeValue {\n fn from(val: &ScopeValueBuilder) -> Self {\n value::ScopeValue(value::FieldValues {\n fields: val\n .fields\n .iter()\n .map(|f| value::Value::from_alternative_ref(f.get().unwrap()))\n .collect(),\n })\n }\n}\n\nimpl From for value::ScopeValue {\n fn from(val: ScopeValueBuilder) -> Self {\n value::ScopeValue(value::FieldValues {\n fields: val\n .fields\n .into_iter()\n .map(|f| value::Value::from_alternative(f.into_inner().unwrap()))\n .collect(),\n })\n }\n}\n\nimpl ScopeValueBuilder {\n fn new(num_fields: usize) -> Self {\n let mut fields = Vec::with_capacity(num_fields);\n fields.resize_with(num_fields, OnceLock::new);\n Self { fields }\n }\n\n fn augmented_from(source: &value::ScopeValue, schema: &schema::TableSchema) -> Result {\n let val_index_base = if schema.has_key() { 1 } else { 0 };\n let len = schema.row.fields.len() - val_index_base;\n\n let mut builder = Self::new(len);\n\n let value::ScopeValue(source_fields) = source;\n for ((v, t), r) in source_fields\n .fields\n .iter()\n .zip(schema.row.fields[val_index_base..(val_index_base + len)].iter())\n .zip(&mut builder.fields)\n {\n r.set(augmented_value(v, &t.value_type.typ)?)\n .into_py_result()?;\n }\n Ok(builder)\n }\n}\n\nfn augmented_value(\n val: &value::Value,\n val_type: &schema::ValueType,\n) -> Result> {\n let value = match (val, val_type) {\n (value::Value::Null, _) => value::Value::Null,\n (value::Value::Basic(v), _) => value::Value::Basic(v.clone()),\n (value::Value::Struct(v), schema::ValueType::Struct(t)) => {\n value::Value::Struct(value::FieldValues {\n fields: v\n .fields\n .iter()\n .enumerate()\n .map(|(i, v)| augmented_value(v, &t.fields[i].value_type.typ))\n .collect::>>()?,\n })\n }\n (value::Value::UTable(v), schema::ValueType::Table(t)) => value::Value::UTable(\n v.iter()\n .map(|v| ScopeValueBuilder::augmented_from(v, t))\n .collect::>>()?,\n ),\n (value::Value::KTable(v), schema::ValueType::Table(t)) => value::Value::KTable(\n v.iter()\n .map(|(k, v)| Ok((k.clone(), ScopeValueBuilder::augmented_from(v, t)?)))\n .collect::>>()?,\n ),\n (value::Value::LTable(v), schema::ValueType::Table(t)) => value::Value::LTable(\n v.iter()\n .map(|v| ScopeValueBuilder::augmented_from(v, t))\n .collect::>>()?,\n ),\n (val, _) => bail!(\"Value kind doesn't match the type {val_type}: {val:?}\"),\n };\n Ok(value)\n}\n\nenum ScopeKey<'a> {\n /// For root struct and UTable.\n None,\n /// For KTable row.\n MapKey(&'a value::KeyValue),\n /// For LTable row.\n ListIndex(usize),\n}\n\nimpl<'a> ScopeKey<'a> {\n pub fn key(&self) -> Option> {\n match self {\n ScopeKey::None => None,\n ScopeKey::MapKey(k) => Some(Cow::Borrowed(k)),\n ScopeKey::ListIndex(i) => Some(Cow::Owned(value::KeyValue::Int64(*i as i64))),\n }\n }\n\n pub fn value_field_index_base(&self) -> u32 {\n match *self {\n ScopeKey::None => 0,\n ScopeKey::MapKey(_) => 1,\n ScopeKey::ListIndex(_) => 0,\n }\n }\n}\n\nimpl std::fmt::Display for ScopeKey<'_> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ScopeKey::None => write!(f, \"()\"),\n ScopeKey::MapKey(k) => write!(f, \"{{{k}}}\"),\n ScopeKey::ListIndex(i) => write!(f, \"[{i}]\"),\n }\n }\n}\n\nstruct ScopeEntry<'a> {\n key: ScopeKey<'a>,\n value: &'a ScopeValueBuilder,\n schema: &'a schema::StructSchema,\n collected_values: Vec>>,\n}\n\nimpl<'a> ScopeEntry<'a> {\n fn new(\n key: ScopeKey<'a>,\n value: &'a ScopeValueBuilder,\n schema: &'a schema::StructSchema,\n analyzed_op_scope: &AnalyzedOpScope,\n ) -> Self {\n let mut collected_values = Vec::with_capacity(analyzed_op_scope.collector_len);\n collected_values.resize_with(analyzed_op_scope.collector_len, Default::default);\n\n Self {\n key,\n value,\n schema,\n collected_values,\n }\n }\n\n fn get_local_field_schema<'b>(\n schema: &'b schema::StructSchema,\n indices: &[u32],\n ) -> Result<&'b schema::FieldSchema> {\n let field_idx = indices[0] as usize;\n let field_schema = &schema.fields[field_idx];\n let result = if indices.len() == 1 {\n field_schema\n } else {\n let struct_field_schema = match &field_schema.value_type.typ {\n schema::ValueType::Struct(s) => s,\n _ => bail!(\"Expect struct field\"),\n };\n Self::get_local_field_schema(struct_field_schema, &indices[1..])?\n };\n Ok(result)\n }\n\n fn get_local_key_field<'b>(\n key_val: &'b value::KeyValue,\n indices: &'_ [u32],\n ) -> &'b value::KeyValue {\n if indices.is_empty() {\n key_val\n } else if let value::KeyValue::Struct(fields) = key_val {\n Self::get_local_key_field(&fields[indices[0] as usize], &indices[1..])\n } else {\n panic!(\"Only struct can be accessed by sub field\");\n }\n }\n\n fn get_local_field<'b>(\n val: &'b value::Value,\n indices: &'_ [u32],\n ) -> &'b value::Value {\n if indices.is_empty() {\n val\n } else if let value::Value::Struct(fields) = val {\n Self::get_local_field(&fields.fields[indices[0] as usize], &indices[1..])\n } else {\n panic!(\"Only struct can be accessed by sub field\");\n }\n }\n\n fn get_value_field_builder(\n &self,\n field_ref: &AnalyzedLocalFieldReference,\n ) -> &value::Value {\n let first_index = field_ref.fields_idx[0];\n let index_base = self.key.value_field_index_base();\n let val = self.value.fields[(first_index - index_base) as usize]\n .get()\n .unwrap();\n Self::get_local_field(val, &field_ref.fields_idx[1..])\n }\n\n fn get_field(&self, field_ref: &AnalyzedLocalFieldReference) -> value::Value {\n let first_index = field_ref.fields_idx[0];\n let index_base = self.key.value_field_index_base();\n if first_index < index_base {\n let key_val = self.key.key().unwrap().into_owned();\n let key_part = Self::get_local_key_field(&key_val, &field_ref.fields_idx[1..]);\n key_part.clone().into()\n } else {\n let val = self.value.fields[(first_index - index_base) as usize]\n .get()\n .unwrap();\n let val_part = Self::get_local_field(val, &field_ref.fields_idx[1..]);\n value::Value::from_alternative_ref(val_part)\n }\n }\n\n fn get_field_schema(\n &self,\n field_ref: &AnalyzedLocalFieldReference,\n ) -> Result<&schema::FieldSchema> {\n Ok(Self::get_local_field_schema(\n self.schema,\n &field_ref.fields_idx,\n )?)\n }\n\n fn define_field_w_builder(\n &self,\n output_field: &AnalyzedOpOutput,\n val: value::Value,\n ) -> Result<()> {\n let field_index = output_field.field_idx as usize;\n let index_base = self.key.value_field_index_base() as usize;\n self.value.fields[field_index - index_base].set(val).map_err(|_| {\n anyhow!(\"Field {field_index} for scope is already set, violating single-definition rule.\")\n })?;\n Ok(())\n }\n\n fn define_field(&self, output_field: &AnalyzedOpOutput, val: &value::Value) -> Result<()> {\n let field_index = output_field.field_idx as usize;\n let field_schema = &self.schema.fields[field_index];\n let val = augmented_value(val, &field_schema.value_type.typ)?;\n self.define_field_w_builder(output_field, val)?;\n Ok(())\n }\n}\n\nfn assemble_value(\n value_mapping: &AnalyzedValueMapping,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n) -> value::Value {\n match value_mapping {\n AnalyzedValueMapping::Constant { value } => value.clone(),\n AnalyzedValueMapping::Field(field_ref) => scoped_entries\n .headn(field_ref.scope_up_level as usize)\n .unwrap()\n .get_field(&field_ref.local),\n AnalyzedValueMapping::Struct(mapping) => {\n let fields = mapping\n .fields\n .iter()\n .map(|f| assemble_value(f, scoped_entries))\n .collect();\n value::Value::Struct(value::FieldValues { fields })\n }\n }\n}\n\nfn assemble_input_values<'a>(\n value_mappings: &'a [AnalyzedValueMapping],\n scoped_entries: RefList<'a, &ScopeEntry<'a>>,\n) -> impl Iterator + 'a {\n value_mappings\n .iter()\n .map(move |value_mapping| assemble_value(value_mapping, scoped_entries))\n}\n\nasync fn evaluate_child_op_scope(\n op_scope: &AnalyzedOpScope,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n child_scope_entry: ScopeEntry<'_>,\n concurrency_controller: &concur_control::ConcurrencyController,\n memory: &EvaluationMemory,\n) -> Result<()> {\n let _permit = concurrency_controller\n .acquire(Some(|| {\n child_scope_entry\n .value\n .fields\n .iter()\n .map(|f| f.get().map_or(0, |v| v.estimated_byte_size()))\n .sum()\n }))\n .await?;\n evaluate_op_scope(op_scope, scoped_entries.prepend(&child_scope_entry), memory)\n .await\n .with_context(|| {\n format!(\n \"Evaluating in scope with key {}\",\n match child_scope_entry.key.key() {\n Some(k) => k.to_string(),\n None => \"()\".to_string(),\n }\n )\n })\n}\n\nasync fn evaluate_op_scope(\n op_scope: &AnalyzedOpScope,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n memory: &EvaluationMemory,\n) -> Result<()> {\n let head_scope = *scoped_entries.head().unwrap();\n for reactive_op in op_scope.reactive_ops.iter() {\n match reactive_op {\n AnalyzedReactiveOp::Transform(op) => {\n let mut input_values = Vec::with_capacity(op.inputs.len());\n input_values\n .extend(assemble_input_values(&op.inputs, scoped_entries).collect::>());\n let output_value_cell = memory.get_cache_entry(\n || {\n Ok(op\n .function_exec_info\n .fingerprinter\n .clone()\n .with(&input_values)?\n .into_fingerprint())\n },\n &op.function_exec_info.output_type,\n /*ttl=*/ None,\n )?;\n let output_value = evaluate_with_cell(output_value_cell.as_ref(), move || {\n op.executor.evaluate(input_values)\n })\n .await\n .with_context(|| format!(\"Evaluating Transform op `{}`\", op.name,))?;\n head_scope.define_field(&op.output, &output_value)?;\n }\n\n AnalyzedReactiveOp::ForEach(op) => {\n let target_field_schema = head_scope.get_field_schema(&op.local_field_ref)?;\n let table_schema = match &target_field_schema.value_type.typ {\n schema::ValueType::Table(cs) => cs,\n _ => bail!(\"Expect target field to be a table\"),\n };\n\n let target_field = head_scope.get_value_field_builder(&op.local_field_ref);\n let task_futs = match target_field {\n value::Value::UTable(v) => v\n .iter()\n .map(|item| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::None,\n item,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n value::Value::KTable(v) => v\n .iter()\n .map(|(k, v)| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::MapKey(k),\n v,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n value::Value::LTable(v) => v\n .iter()\n .enumerate()\n .map(|(i, item)| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::ListIndex(i),\n item,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n _ => {\n bail!(\"Target field type is expected to be a table\");\n }\n };\n try_join_all(task_futs)\n .await\n .with_context(|| format!(\"Evaluating ForEach op `{}`\", op.name,))?;\n }\n\n AnalyzedReactiveOp::Collect(op) => {\n let mut field_values = Vec::with_capacity(\n op.input.fields.len() + if op.has_auto_uuid_field { 1 } else { 0 },\n );\n let field_values_iter = assemble_input_values(&op.input.fields, scoped_entries);\n if op.has_auto_uuid_field {\n field_values.push(value::Value::Null);\n field_values.extend(field_values_iter);\n let uuid = memory.next_uuid(\n op.fingerprinter\n .clone()\n .with(&field_values[1..])?\n .into_fingerprint(),\n )?;\n field_values[0] = value::Value::Basic(value::BasicValue::Uuid(uuid));\n } else {\n field_values.extend(field_values_iter);\n };\n let collector_entry = scoped_entries\n .headn(op.collector_ref.scope_up_level as usize)\n .ok_or_else(|| anyhow::anyhow!(\"Collector level out of bound\"))?;\n {\n let mut collected_records = collector_entry.collected_values\n [op.collector_ref.local.collector_idx as usize]\n .lock()\n .unwrap();\n collected_records.push(value::FieldValues {\n fields: field_values,\n });\n }\n }\n }\n }\n Ok(())\n}\n\npub struct SourceRowEvaluationContext<'a> {\n pub plan: &'a ExecutionPlan,\n pub import_op: &'a AnalyzedImportOp,\n pub schema: &'a schema::FlowSchema,\n pub key: &'a value::KeyValue,\n pub import_op_idx: usize,\n}\n\n#[derive(Debug)]\npub struct EvaluateSourceEntryOutput {\n pub data_scope: ScopeValueBuilder,\n pub collected_values: Vec>,\n}\n\npub async fn evaluate_source_entry(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n source_value: value::FieldValues,\n memory: &EvaluationMemory,\n) -> Result {\n let _permit = src_eval_ctx\n .import_op\n .concurrency_controller\n .acquire_bytes_with_reservation(|| source_value.estimated_byte_size())\n .await?;\n let root_schema = &src_eval_ctx.schema.schema;\n let root_scope_value = ScopeValueBuilder::new(root_schema.fields.len());\n let root_scope_entry = ScopeEntry::new(\n ScopeKey::None,\n &root_scope_value,\n root_schema,\n &src_eval_ctx.plan.op_scope,\n );\n\n let table_schema = match &root_schema.fields[src_eval_ctx.import_op.output.field_idx as usize]\n .value_type\n .typ\n {\n schema::ValueType::Table(cs) => cs,\n _ => {\n bail!(\"Expect source output to be a table\")\n }\n };\n\n let scope_value =\n ScopeValueBuilder::augmented_from(&value::ScopeValue(source_value), table_schema)?;\n root_scope_entry.define_field_w_builder(\n &src_eval_ctx.import_op.output,\n value::Value::KTable(BTreeMap::from([(src_eval_ctx.key.clone(), scope_value)])),\n )?;\n\n evaluate_op_scope(\n &src_eval_ctx.plan.op_scope,\n RefList::Nil.prepend(&root_scope_entry),\n memory,\n )\n .await?;\n let collected_values = root_scope_entry\n .collected_values\n .into_iter()\n .map(|v| v.into_inner().unwrap())\n .collect::>();\n Ok(EvaluateSourceEntryOutput {\n data_scope: root_scope_value,\n collected_values,\n })\n}\n\npub async fn evaluate_transient_flow(\n flow: &AnalyzedTransientFlow,\n input_values: &Vec,\n) -> Result {\n let root_schema = &flow.data_schema.schema;\n let root_scope_value = ScopeValueBuilder::new(root_schema.fields.len());\n let root_scope_entry = ScopeEntry::new(\n ScopeKey::None,\n &root_scope_value,\n root_schema,\n &flow.execution_plan.op_scope,\n );\n\n if input_values.len() != flow.execution_plan.input_fields.len() {\n bail!(\n \"Input values length mismatch: expect {}, got {}\",\n flow.execution_plan.input_fields.len(),\n input_values.len()\n );\n }\n for (field, value) in flow.execution_plan.input_fields.iter().zip(input_values) {\n root_scope_entry.define_field(field, value)?;\n }\n let eval_memory = EvaluationMemory::new(\n chrono::Utc::now(),\n None,\n EvaluationMemoryOptions {\n enable_cache: false,\n evaluation_only: true,\n },\n );\n evaluate_op_scope(\n &flow.execution_plan.op_scope,\n RefList::Nil.prepend(&root_scope_entry),\n &eval_memory,\n )\n .await?;\n let output_value = assemble_value(\n &flow.execution_plan.output_value,\n RefList::Nil.prepend(&root_scope_entry),\n );\n Ok(output_value)\n}\n"], ["/cocoindex/src/ops/interface.rs", "use std::time::SystemTime;\n\nuse crate::base::{schema::*, spec::IndexOptions, value::*};\nuse crate::prelude::*;\nuse crate::setup;\nuse chrono::TimeZone;\nuse serde::Serialize;\n\npub struct FlowInstanceContext {\n pub flow_instance_name: String,\n pub auth_registry: Arc,\n pub py_exec_ctx: Option>,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Default)]\npub struct Ordinal(pub Option);\n\nimpl Ordinal {\n pub fn unavailable() -> Self {\n Self(None)\n }\n\n pub fn is_available(&self) -> bool {\n self.0.is_some()\n }\n}\n\nimpl From for Option {\n fn from(val: Ordinal) -> Self {\n val.0\n }\n}\n\nimpl TryFrom for Ordinal {\n type Error = anyhow::Error;\n\n fn try_from(time: SystemTime) -> Result {\n let duration = time.duration_since(std::time::UNIX_EPOCH)?;\n Ok(Ordinal(Some(duration.as_micros().try_into()?)))\n }\n}\n\nimpl TryFrom> for Ordinal {\n type Error = anyhow::Error;\n\n fn try_from(time: chrono::DateTime) -> Result {\n Ok(Ordinal(Some(time.timestamp_micros())))\n }\n}\n\npub struct PartialSourceRowMetadata {\n pub key: KeyValue,\n pub ordinal: Option,\n}\n\n#[derive(Debug)]\npub enum SourceValue {\n Existence(FieldValues),\n NonExistence,\n}\n\nimpl SourceValue {\n pub fn is_existent(&self) -> bool {\n matches!(self, Self::Existence(_))\n }\n\n pub fn as_optional(&self) -> Option<&FieldValues> {\n match self {\n Self::Existence(value) => Some(value),\n Self::NonExistence => None,\n }\n }\n\n pub fn into_optional(self) -> Option {\n match self {\n Self::Existence(value) => Some(value),\n Self::NonExistence => None,\n }\n }\n}\n\npub struct SourceData {\n pub value: SourceValue,\n pub ordinal: Ordinal,\n}\n\npub struct SourceChange {\n pub key: KeyValue,\n\n /// If None, the engine will poll to get the latest existence state and value.\n pub data: Option,\n}\n\npub struct SourceChangeMessage {\n pub changes: Vec,\n pub ack_fn: Option BoxFuture<'static, Result<()>> + Send + Sync>>,\n}\n\n#[derive(Debug, Default)]\npub struct SourceExecutorListOptions {\n pub include_ordinal: bool,\n}\n\n#[derive(Debug, Default)]\npub struct SourceExecutorGetOptions {\n pub include_ordinal: bool,\n pub include_value: bool,\n}\n\n#[derive(Debug)]\npub struct PartialSourceRowData {\n pub value: Option,\n pub ordinal: Option,\n}\n\nimpl TryFrom for SourceData {\n type Error = anyhow::Error;\n\n fn try_from(data: PartialSourceRowData) -> Result {\n Ok(Self {\n value: data\n .value\n .ok_or_else(|| anyhow::anyhow!(\"value is missing\"))?,\n ordinal: data\n .ordinal\n .ok_or_else(|| anyhow::anyhow!(\"ordinal is missing\"))?,\n })\n }\n}\n#[async_trait]\npub trait SourceExecutor: Send + Sync {\n /// Get the list of keys for the source.\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>>;\n\n // Get the value for the given key.\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result;\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n Ok(None)\n }\n}\n\n#[async_trait]\npub trait SourceFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )>;\n}\n\n#[async_trait]\npub trait SimpleFunctionExecutor: Send + Sync {\n /// Evaluate the operation.\n async fn evaluate(&self, args: Vec) -> Result;\n\n fn enable_cache(&self) -> bool {\n false\n }\n\n /// Must be Some if `enable_cache` is true.\n /// If it changes, the cache will be invalidated.\n fn behavior_version(&self) -> Option {\n None\n }\n}\n\n#[async_trait]\npub trait SimpleFunctionFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )>;\n}\n\n#[derive(Debug)]\npub struct ExportTargetUpsertEntry {\n pub key: KeyValue,\n pub additional_key: serde_json::Value,\n pub value: FieldValues,\n}\n\n#[derive(Debug)]\npub struct ExportTargetDeleteEntry {\n pub key: KeyValue,\n pub additional_key: serde_json::Value,\n}\n\n#[derive(Debug, Default)]\npub struct ExportTargetMutation {\n pub upserts: Vec,\n pub deletes: Vec,\n}\n\nimpl ExportTargetMutation {\n pub fn is_empty(&self) -> bool {\n self.upserts.is_empty() && self.deletes.is_empty()\n }\n}\n\n#[derive(Debug)]\npub struct ExportTargetMutationWithContext<'ctx, T: ?Sized + Send + Sync> {\n pub mutation: ExportTargetMutation,\n pub export_context: &'ctx T,\n}\n\npub struct ResourceSetupChangeItem<'a> {\n pub key: &'a serde_json::Value,\n pub setup_status: &'a dyn setup::ResourceSetupStatus,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum SetupStateCompatibility {\n /// The resource is fully compatible with the desired state.\n /// This means the resource can be updated to the desired state without any loss of data.\n Compatible,\n /// The resource is partially compatible with the desired state.\n /// This means data from some existing fields will be lost after applying the setup change.\n /// But at least their key fields of all rows are still preserved.\n PartialCompatible,\n /// The resource needs to be rebuilt. After applying the setup change, all data will be gone.\n NotCompatible,\n}\n\npub struct ExportDataCollectionBuildOutput {\n pub export_context: BoxFuture<'static, Result>>,\n pub setup_key: serde_json::Value,\n pub desired_setup_state: serde_json::Value,\n}\n\npub struct ExportDataCollectionSpec {\n pub name: String,\n pub spec: serde_json::Value,\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n pub index_options: IndexOptions,\n}\n\n#[async_trait]\npub trait ExportTargetFactory: Send + Sync {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )>;\n\n /// Will not be called if it's setup by user.\n /// It returns an error if the target only supports setup by user.\n async fn check_setup_status(\n &self,\n key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n context: Arc,\n ) -> Result>;\n\n /// Normalize the key. e.g. the JSON format may change (after code change, e.g. new optional field or field ordering), even if the underlying value is not changed.\n /// This should always return the canonical serialized form.\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result;\n\n fn check_state_compatibility(\n &self,\n desired_state: &serde_json::Value,\n existing_state: &serde_json::Value,\n ) -> Result;\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result;\n\n fn extract_additional_key(\n &self,\n key: &KeyValue,\n value: &FieldValues,\n export_context: &(dyn Any + Send + Sync),\n ) -> Result;\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()>;\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()>;\n}\n\n#[derive(Clone)]\npub enum ExecutorFactory {\n Source(Arc),\n SimpleFunction(Arc),\n ExportTarget(Arc),\n}\n"], ["/cocoindex/src/py/mod.rs", "use crate::execution::evaluator::evaluate_transient_flow;\nuse crate::prelude::*;\n\nuse crate::base::schema::{FieldSchema, ValueType};\nuse crate::base::spec::{NamedSpec, OutputMode, ReactiveOpSpec, SpecFormatter};\nuse crate::lib_context::{clear_lib_context, get_auth_registry, init_lib_context};\nuse crate::ops::py_factory::{PyExportTargetFactory, PyOpArgSchema};\nuse crate::ops::{interface::ExecutorFactory, py_factory::PyFunctionFactory, register_factory};\nuse crate::server::{self, ServerSettings};\nuse crate::settings::Settings;\nuse crate::setup::{self};\nuse pyo3::IntoPyObjectExt;\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pyo3_async_runtimes::tokio::future_into_py;\nuse std::fmt::Write;\nuse std::sync::Arc;\n\nmod convert;\npub(crate) use convert::*;\n\npub struct PythonExecutionContext {\n pub event_loop: Py,\n}\n\nimpl PythonExecutionContext {\n pub fn new(_py: Python<'_>, event_loop: Py) -> Self {\n Self { event_loop }\n }\n}\n\npub trait ToResultWithPyTrace {\n fn to_result_with_py_trace(self, py: Python<'_>) -> anyhow::Result;\n}\n\nimpl ToResultWithPyTrace for Result {\n fn to_result_with_py_trace(self, py: Python<'_>) -> anyhow::Result {\n match self {\n Ok(value) => Ok(value),\n Err(err) => {\n let mut err_str = format!(\"Error calling Python function: {err}\");\n if let Some(tb) = err.traceback(py) {\n write!(&mut err_str, \"\\n{}\", tb.format()?)?;\n }\n Err(anyhow::anyhow!(err_str))\n }\n }\n }\n}\npub trait IntoPyResult {\n fn into_py_result(self) -> PyResult;\n}\n\nimpl IntoPyResult for Result {\n fn into_py_result(self) -> PyResult {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(PyException::new_err(format!(\"{err:?}\"))),\n }\n }\n}\n\n#[pyfunction]\nfn init(py: Python<'_>, settings: Pythonized) -> PyResult<()> {\n py.allow_threads(|| -> anyhow::Result<()> {\n init_lib_context(settings.into_inner())?;\n Ok(())\n })\n .into_py_result()\n}\n\n#[pyfunction]\nfn start_server(py: Python<'_>, settings: Pythonized) -> PyResult<()> {\n py.allow_threads(|| -> anyhow::Result<()> {\n let server = get_runtime().block_on(server::init_server(\n get_lib_context()?,\n settings.into_inner(),\n ))?;\n get_runtime().spawn(server);\n Ok(())\n })\n .into_py_result()\n}\n\n#[pyfunction]\nfn stop(py: Python<'_>) -> PyResult<()> {\n py.allow_threads(clear_lib_context);\n Ok(())\n}\n\n#[pyfunction]\nfn register_function_factory(name: String, py_function_factory: Py) -> PyResult<()> {\n let factory = PyFunctionFactory {\n py_function_factory,\n };\n register_factory(name, ExecutorFactory::SimpleFunction(Arc::new(factory))).into_py_result()\n}\n\n#[pyfunction]\nfn register_target_connector(name: String, py_target_connector: Py) -> PyResult<()> {\n let factory = PyExportTargetFactory {\n py_target_connector,\n };\n register_factory(name, ExecutorFactory::ExportTarget(Arc::new(factory))).into_py_result()\n}\n\n#[pyclass]\npub struct IndexUpdateInfo(pub execution::stats::IndexUpdateInfo);\n\n#[pymethods]\nimpl IndexUpdateInfo {\n pub fn __str__(&self) -> String {\n format!(\"{}\", self.0)\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\n#[pyclass]\npub struct Flow(pub Arc);\n\n/// A single line in the rendered spec, with hierarchical children\n#[pyclass(get_all, set_all)]\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct RenderedSpecLine {\n /// The formatted content of the line (e.g., \"Import: name=documents, source=LocalFile\")\n pub content: String,\n /// Child lines in the hierarchy\n pub children: Vec,\n}\n\n/// A rendered specification, grouped by sections\n#[pyclass(get_all, set_all)]\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct RenderedSpec {\n /// List of (section_name, lines) pairs\n pub sections: Vec<(String, Vec)>,\n}\n\n#[pyclass]\npub struct FlowLiveUpdaterUpdates(execution::FlowLiveUpdaterUpdates);\n\n#[pymethods]\nimpl FlowLiveUpdaterUpdates {\n #[getter]\n pub fn active_sources(&self) -> Vec {\n self.0.active_sources.clone()\n }\n\n #[getter]\n pub fn updated_sources(&self) -> Vec {\n self.0.updated_sources.clone()\n }\n}\n\n#[pyclass]\npub struct FlowLiveUpdater(pub Arc);\n\n#[pymethods]\nimpl FlowLiveUpdater {\n #[staticmethod]\n pub fn create<'py>(\n py: Python<'py>,\n flow: &Flow,\n options: Pythonized,\n ) -> PyResult> {\n let flow = flow.0.clone();\n future_into_py(py, async move {\n let lib_context = get_lib_context().into_py_result()?;\n let live_updater = execution::FlowLiveUpdater::start(\n flow,\n lib_context.require_builtin_db_pool().into_py_result()?,\n options.into_inner(),\n )\n .await\n .into_py_result()?;\n Ok(Self(Arc::new(live_updater)))\n })\n }\n\n pub fn wait_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let live_updater = self.0.clone();\n future_into_py(\n py,\n async move { live_updater.wait().await.into_py_result() },\n )\n }\n\n pub fn next_status_updates_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let live_updater = self.0.clone();\n future_into_py(py, async move {\n let updates = live_updater.next_status_updates().await.into_py_result()?;\n Ok(FlowLiveUpdaterUpdates(updates))\n })\n }\n\n pub fn abort(&self) {\n self.0.abort();\n }\n\n pub fn index_update_info(&self) -> IndexUpdateInfo {\n IndexUpdateInfo(self.0.index_update_info())\n }\n}\n\n#[pymethods]\nimpl Flow {\n pub fn __str__(&self) -> String {\n serde_json::to_string_pretty(&self.0.flow.flow_instance).unwrap()\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn name(&self) -> &str {\n &self.0.flow.flow_instance.name\n }\n\n pub fn evaluate_and_dump(\n &self,\n py: Python<'_>,\n options: Pythonized,\n ) -> PyResult<()> {\n py.allow_threads(|| {\n get_runtime()\n .block_on(async {\n let exec_plan = self.0.flow.get_execution_plan().await?;\n let lib_context = get_lib_context()?;\n let execution_ctx = self.0.use_execution_ctx().await?;\n execution::dumper::evaluate_and_dump(\n &exec_plan,\n &execution_ctx.setup_execution_context,\n &self.0.flow.data_schema,\n options.into_inner(),\n lib_context.require_builtin_db_pool()?,\n )\n .await\n })\n .into_py_result()?;\n Ok(())\n })\n }\n\n #[pyo3(signature = (output_mode=None))]\n pub fn get_spec(&self, output_mode: Option>) -> PyResult {\n let mode = output_mode.map_or(OutputMode::Concise, |m| m.into_inner());\n let spec = &self.0.flow.flow_instance;\n let mut sections: IndexMap> = IndexMap::new();\n\n // Sources\n sections.insert(\n \"Source\".to_string(),\n spec.import_ops\n .iter()\n .map(|op| RenderedSpecLine {\n content: format!(\"Import: name={}, {}\", op.name, op.spec.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n // Processing\n fn walk(op: &NamedSpec, mode: OutputMode) -> RenderedSpecLine {\n let content = format!(\"{}: {}\", op.name, op.spec.format(mode));\n\n let children = match &op.spec {\n ReactiveOpSpec::ForEach(fe) => fe\n .op_scope\n .ops\n .iter()\n .map(|nested| walk(nested, mode))\n .collect(),\n _ => vec![],\n };\n\n RenderedSpecLine { content, children }\n }\n\n sections.insert(\n \"Processing\".to_string(),\n spec.reactive_ops.iter().map(|op| walk(op, mode)).collect(),\n );\n\n // Targets\n sections.insert(\n \"Targets\".to_string(),\n spec.export_ops\n .iter()\n .map(|op| RenderedSpecLine {\n content: format!(\"Export: name={}, {}\", op.name, op.spec.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n // Declarations\n sections.insert(\n \"Declarations\".to_string(),\n spec.declarations\n .iter()\n .map(|decl| RenderedSpecLine {\n content: format!(\"Declaration: {}\", decl.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n Ok(RenderedSpec {\n sections: sections.into_iter().collect(),\n })\n }\n\n pub fn get_schema(&self) -> Vec<(String, String, String)> {\n let schema = &self.0.flow.data_schema;\n let mut result = Vec::new();\n\n fn process_fields(\n fields: &[FieldSchema],\n prefix: &str,\n result: &mut Vec<(String, String, String)>,\n ) {\n for field in fields {\n let field_name = format!(\"{}{}\", prefix, field.name);\n\n let mut field_type = match &field.value_type.typ {\n ValueType::Basic(basic) => format!(\"{basic}\"),\n ValueType::Table(t) => format!(\"{}\", t.kind),\n ValueType::Struct(_) => \"Struct\".to_string(),\n };\n\n if field.value_type.nullable {\n field_type.push('?');\n }\n\n let attr_str = if field.value_type.attrs.is_empty() {\n String::new()\n } else {\n field\n .value_type\n .attrs\n .keys()\n .map(|k| k.to_string())\n .collect::>()\n .join(\", \")\n };\n\n result.push((field_name.clone(), field_type, attr_str));\n\n match &field.value_type.typ {\n ValueType::Struct(s) => {\n process_fields(&s.fields, &format!(\"{field_name}.\"), result);\n }\n ValueType::Table(t) => {\n process_fields(&t.row.fields, &format!(\"{field_name}[].\"), result);\n }\n ValueType::Basic(_) => {}\n }\n }\n }\n\n process_fields(&schema.schema.fields, \"\", &mut result);\n result\n }\n\n pub fn make_setup_action(&self) -> SetupChangeBundle {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Setup,\n flow_names: vec![self.name().to_string()],\n };\n SetupChangeBundle(Arc::new(bundle))\n }\n\n pub fn make_drop_action(&self) -> SetupChangeBundle {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Drop,\n flow_names: vec![self.name().to_string()],\n };\n SetupChangeBundle(Arc::new(bundle))\n }\n}\n\n#[pyclass]\npub struct TransientFlow(pub Arc);\n\n#[pymethods]\nimpl TransientFlow {\n pub fn __str__(&self) -> String {\n serde_json::to_string_pretty(&self.0.transient_flow_instance).unwrap()\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn evaluate_async<'py>(\n &self,\n py: Python<'py>,\n args: Vec>,\n ) -> PyResult> {\n let flow = self.0.clone();\n let input_values: Vec = std::iter::zip(\n self.0.transient_flow_instance.input_fields.iter(),\n args.into_iter(),\n )\n .map(|(input_schema, arg)| value_from_py_object(&input_schema.value_type.typ, &arg))\n .collect::>()?;\n\n future_into_py(py, async move {\n let result = evaluate_transient_flow(&flow, &input_values)\n .await\n .into_py_result()?;\n Python::with_gil(|py| value_to_py_object(py, &result)?.into_py_any(py))\n })\n }\n}\n\n#[pyclass]\npub struct SetupChangeBundle(Arc);\n\n#[pymethods]\nimpl SetupChangeBundle {\n pub fn describe_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let lib_context = get_lib_context().into_py_result()?;\n let bundle = self.0.clone();\n future_into_py(py, async move {\n bundle.describe(&lib_context).await.into_py_result()\n })\n }\n\n pub fn apply_async<'py>(\n &self,\n py: Python<'py>,\n report_to_stdout: bool,\n ) -> PyResult> {\n let lib_context = get_lib_context().into_py_result()?;\n let bundle = self.0.clone();\n\n future_into_py(py, async move {\n let mut stdout = None;\n let mut sink = None;\n bundle\n .apply(\n &lib_context,\n if report_to_stdout {\n stdout.insert(std::io::stdout())\n } else {\n sink.insert(std::io::sink())\n },\n )\n .await\n .into_py_result()\n })\n }\n}\n\n#[pyfunction]\nfn flow_names_with_setup_async(py: Python<'_>) -> PyResult> {\n future_into_py(py, async move {\n let lib_context = get_lib_context().into_py_result()?;\n let setup_ctx = lib_context\n .require_persistence_ctx()\n .into_py_result()?\n .setup_ctx\n .read()\n .await;\n let flow_names: Vec = setup_ctx.all_setup_states.flows.keys().cloned().collect();\n PyResult::Ok(flow_names)\n })\n}\n\n#[pyfunction]\nfn make_setup_bundle(flow_names: Vec) -> PyResult {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Setup,\n flow_names,\n };\n Ok(SetupChangeBundle(Arc::new(bundle)))\n}\n\n#[pyfunction]\nfn make_drop_bundle(flow_names: Vec) -> PyResult {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Drop,\n flow_names,\n };\n Ok(SetupChangeBundle(Arc::new(bundle)))\n}\n\n#[pyfunction]\nfn remove_flow_context(flow_name: String) {\n let lib_context_locked = crate::lib_context::LIB_CONTEXT.read().unwrap();\n if let Some(lib_context) = lib_context_locked.as_ref() {\n lib_context.remove_flow_context(&flow_name)\n }\n}\n\n#[pyfunction]\nfn add_auth_entry(key: String, value: Pythonized) -> PyResult<()> {\n get_auth_registry()\n .add(key, value.into_inner())\n .into_py_result()?;\n Ok(())\n}\n\n#[pyfunction]\nfn seder_roundtrip<'py>(\n py: Python<'py>,\n value: Bound<'py, PyAny>,\n typ: Pythonized,\n) -> PyResult> {\n let typ = typ.into_inner();\n let value = value_from_py_object(&typ, &value)?;\n let value = value::test_util::seder_roundtrip(&value, &typ).into_py_result()?;\n value_to_py_object(py, &value)\n}\n\n/// A Python module implemented in Rust.\n#[pymodule]\n#[pyo3(name = \"_engine\")]\nfn cocoindex_engine(m: &Bound<'_, PyModule>) -> PyResult<()> {\n m.add_function(wrap_pyfunction!(init, m)?)?;\n m.add_function(wrap_pyfunction!(start_server, m)?)?;\n m.add_function(wrap_pyfunction!(stop, m)?)?;\n m.add_function(wrap_pyfunction!(register_function_factory, m)?)?;\n m.add_function(wrap_pyfunction!(register_target_connector, m)?)?;\n m.add_function(wrap_pyfunction!(flow_names_with_setup_async, m)?)?;\n m.add_function(wrap_pyfunction!(make_setup_bundle, m)?)?;\n m.add_function(wrap_pyfunction!(make_drop_bundle, m)?)?;\n m.add_function(wrap_pyfunction!(remove_flow_context, m)?)?;\n m.add_function(wrap_pyfunction!(add_auth_entry, m)?)?;\n\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n\n let testutil_module = PyModule::new(m.py(), \"testutil\")?;\n testutil_module.add_function(wrap_pyfunction!(seder_roundtrip, &testutil_module)?)?;\n m.add_submodule(&testutil_module)?;\n\n Ok(())\n}\n"], ["/cocoindex/src/base/json_schema.rs", "use crate::prelude::*;\n\nuse crate::utils::immutable::RefList;\nuse schemars::schema::{\n ArrayValidation, InstanceType, ObjectValidation, Schema, SchemaObject, SingleOrVec,\n SubschemaValidation,\n};\nuse std::fmt::Write;\n\npub struct ToJsonSchemaOptions {\n /// If true, mark all fields as required.\n /// Use union type (with `null`) for optional fields instead.\n /// Models like OpenAI will reject the schema if a field is not required.\n pub fields_always_required: bool,\n\n /// If true, the JSON schema supports the `format` keyword.\n pub supports_format: bool,\n\n /// If true, extract descriptions to a separate extra instruction.\n pub extract_descriptions: bool,\n\n /// If true, the top level must be a JSON object.\n pub top_level_must_be_object: bool,\n}\n\nstruct JsonSchemaBuilder {\n options: ToJsonSchemaOptions,\n extra_instructions_per_field: IndexMap,\n}\n\nimpl JsonSchemaBuilder {\n fn new(options: ToJsonSchemaOptions) -> Self {\n Self {\n options,\n extra_instructions_per_field: IndexMap::new(),\n }\n }\n\n fn set_description(\n &mut self,\n schema: &mut SchemaObject,\n description: impl ToString,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) {\n if self.options.extract_descriptions {\n let mut fields: Vec<_> = field_path.iter().map(|f| f.as_str()).collect();\n fields.reverse();\n self.extra_instructions_per_field\n .insert(fields.join(\".\"), description.to_string());\n } else {\n schema.metadata.get_or_insert_default().description = Some(description.to_string());\n }\n }\n\n fn for_basic_value_type(\n &mut self,\n basic_type: &schema::BasicValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n let mut schema = SchemaObject::default();\n match basic_type {\n schema::BasicValueType::Str => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n }\n schema::BasicValueType::Bytes => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n }\n schema::BasicValueType::Bool => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Boolean)));\n }\n schema::BasicValueType::Int64 => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Integer)));\n }\n schema::BasicValueType::Float32 | schema::BasicValueType::Float64 => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Number)));\n }\n schema::BasicValueType::Range => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Array)));\n schema.array = Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n SchemaObject {\n instance_type: Some(SingleOrVec::Single(Box::new(\n InstanceType::Integer,\n ))),\n ..Default::default()\n }\n .into(),\n ))),\n min_items: Some(2),\n max_items: Some(2),\n ..Default::default()\n }));\n self.set_description(\n &mut schema,\n \"A range represented by a list of two positions, start pos (inclusive), end pos (exclusive).\",\n field_path,\n );\n }\n schema::BasicValueType::Uuid => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"uuid\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A UUID, e.g. 123e4567-e89b-12d3-a456-426614174000\",\n field_path,\n );\n }\n schema::BasicValueType::Date => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A date in YYYY-MM-DD format, e.g. 2025-03-27\",\n field_path,\n );\n }\n schema::BasicValueType::Time => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A time in HH:MM:SS format, e.g. 13:32:12\",\n field_path,\n );\n }\n schema::BasicValueType::LocalDateTime => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date-time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"Date time without timezone offset in YYYY-MM-DDTHH:MM:SS format, e.g. 2025-03-27T13:32:12\",\n field_path,\n );\n }\n schema::BasicValueType::OffsetDateTime => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date-time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"Date time with timezone offset in RFC3339, e.g. 2025-03-27T13:32:12Z, 2025-03-27T07:32:12.313-06:00\",\n field_path,\n );\n }\n &schema::BasicValueType::TimeDelta => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"duration\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A duration, e.g. 'PT1H2M3S' (ISO 8601) or '1 day 2 hours 3 seconds'\",\n field_path,\n );\n }\n schema::BasicValueType::Json => {\n // Can be any value. No type constraint.\n }\n schema::BasicValueType::Vector(s) => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Array)));\n schema.array = Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n self.for_basic_value_type(&s.element_type, field_path)\n .into(),\n ))),\n min_items: s.dimension.and_then(|d| u32::try_from(d).ok()),\n max_items: s.dimension.and_then(|d| u32::try_from(d).ok()),\n ..Default::default()\n }));\n }\n schema::BasicValueType::Union(s) => {\n schema.subschemas = Some(Box::new(SubschemaValidation {\n one_of: Some(\n s.types\n .iter()\n .map(|t| Schema::Object(self.for_basic_value_type(t, field_path)))\n .collect(),\n ),\n ..Default::default()\n }));\n }\n }\n schema\n }\n\n fn for_struct_schema(\n &mut self,\n struct_schema: &schema::StructSchema,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n let mut schema = SchemaObject::default();\n if let Some(description) = &struct_schema.description {\n self.set_description(&mut schema, description, field_path);\n }\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Object)));\n schema.object = Some(Box::new(ObjectValidation {\n properties: struct_schema\n .fields\n .iter()\n .map(|f| {\n let mut schema =\n self.for_enriched_value_type(&f.value_type, field_path.prepend(&f.name));\n if self.options.fields_always_required && f.value_type.nullable {\n if let Some(instance_type) = &mut schema.instance_type {\n let mut types = match instance_type {\n SingleOrVec::Single(t) => vec![**t],\n SingleOrVec::Vec(t) => std::mem::take(t),\n };\n types.push(InstanceType::Null);\n *instance_type = SingleOrVec::Vec(types);\n }\n }\n (f.name.to_string(), schema.into())\n })\n .collect(),\n required: struct_schema\n .fields\n .iter()\n .filter(|&f| (self.options.fields_always_required || !f.value_type.nullable))\n .map(|f| f.name.to_string())\n .collect(),\n additional_properties: Some(Schema::Bool(false).into()),\n ..Default::default()\n }));\n schema\n }\n\n fn for_value_type(\n &mut self,\n value_type: &schema::ValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n match value_type {\n schema::ValueType::Basic(b) => self.for_basic_value_type(b, field_path),\n schema::ValueType::Struct(s) => self.for_struct_schema(s, field_path),\n schema::ValueType::Table(c) => SchemaObject {\n instance_type: Some(SingleOrVec::Single(Box::new(InstanceType::Array))),\n array: Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n self.for_struct_schema(&c.row, field_path).into(),\n ))),\n ..Default::default()\n })),\n ..Default::default()\n },\n }\n }\n\n fn for_enriched_value_type(\n &mut self,\n enriched_value_type: &schema::EnrichedValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n self.for_value_type(&enriched_value_type.typ, field_path)\n }\n\n fn build_extra_instructions(&self) -> Result> {\n if self.extra_instructions_per_field.is_empty() {\n return Ok(None);\n }\n\n let mut instructions = String::new();\n write!(&mut instructions, \"Instructions for specific fields:\\n\\n\")?;\n for (field_path, instruction) in self.extra_instructions_per_field.iter() {\n write!(\n &mut instructions,\n \"- {}: {}\\n\\n\",\n if field_path.is_empty() {\n \"(root object)\"\n } else {\n field_path.as_str()\n },\n instruction\n )?;\n }\n Ok(Some(instructions))\n }\n}\n\npub struct ValueExtractor {\n value_type: schema::ValueType,\n object_wrapper_field_name: Option,\n}\n\nimpl ValueExtractor {\n pub fn extract_value(&self, json_value: serde_json::Value) -> Result {\n let unwrapped_json_value =\n if let Some(object_wrapper_field_name) = &self.object_wrapper_field_name {\n match json_value {\n serde_json::Value::Object(mut o) => o\n .remove(object_wrapper_field_name)\n .unwrap_or(serde_json::Value::Null),\n _ => {\n bail!(\"Field `{}` not found\", object_wrapper_field_name)\n }\n }\n } else {\n json_value\n };\n let result = value::Value::from_json(unwrapped_json_value, &self.value_type)?;\n Ok(result)\n }\n}\n\npub struct BuildJsonSchemaOutput {\n pub schema: SchemaObject,\n pub extra_instructions: Option,\n pub value_extractor: ValueExtractor,\n}\n\npub fn build_json_schema(\n value_type: schema::EnrichedValueType,\n options: ToJsonSchemaOptions,\n) -> Result {\n let mut builder = JsonSchemaBuilder::new(options);\n let (schema, object_wrapper_field_name) = if builder.options.top_level_must_be_object\n && !matches!(value_type.typ, schema::ValueType::Struct(_))\n {\n let object_wrapper_field_name = \"value\".to_string();\n let wrapper_struct = schema::StructSchema {\n fields: Arc::new(vec![schema::FieldSchema {\n name: object_wrapper_field_name.clone(),\n value_type: value_type.clone(),\n }]),\n description: None,\n };\n (\n builder.for_struct_schema(&wrapper_struct, RefList::Nil),\n Some(object_wrapper_field_name),\n )\n } else {\n (\n builder.for_enriched_value_type(&value_type, RefList::Nil),\n None,\n )\n };\n Ok(BuildJsonSchemaOutput {\n schema,\n extra_instructions: builder.build_extra_instructions()?,\n value_extractor: ValueExtractor {\n value_type: value_type.typ,\n object_wrapper_field_name,\n },\n })\n}\n"], ["/cocoindex/src/llm/anthropic.rs", "use crate::prelude::*;\nuse base64::prelude::*;\n\nuse crate::llm::{\n LlmGenerateRequest, LlmGenerateResponse, LlmGenerationClient, OutputFormat,\n ToJsonSchemaOptions, detect_image_mime_type,\n};\nuse anyhow::Context;\nuse urlencoding::encode;\n\npub struct Client {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl Client {\n pub async fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Anthropic doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"ANTHROPIC_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"ANTHROPIC_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result {\n let mut user_content_parts: Vec = Vec::new();\n\n // Add image part if present\n if let Some(image_bytes) = &request.image {\n let base64_image = BASE64_STANDARD.encode(image_bytes.as_ref());\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n user_content_parts.push(serde_json::json!({\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": mime_type,\n \"data\": base64_image,\n }\n }));\n }\n\n // Add text part\n user_content_parts.push(serde_json::json!({\n \"type\": \"text\",\n \"text\": request.user_prompt\n }));\n\n let messages = vec![serde_json::json!({\n \"role\": \"user\",\n \"content\": user_content_parts\n })];\n\n let mut payload = serde_json::json!({\n \"model\": request.model,\n \"messages\": messages,\n \"max_tokens\": 4096\n });\n\n // Add system prompt as top-level field if present (required)\n if let Some(system) = request.system_prompt {\n payload[\"system\"] = serde_json::json!(system);\n }\n\n // Extract schema from output_format, error if not JsonSchema\n let schema = match request.output_format.as_ref() {\n Some(OutputFormat::JsonSchema { schema, .. }) => schema,\n _ => api_bail!(\"Anthropic client expects OutputFormat::JsonSchema for all requests\"),\n };\n\n let schema_json = serde_json::to_value(schema)?;\n payload[\"tools\"] = serde_json::json!([\n { \"type\": \"custom\", \"name\": \"report_result\", \"input_schema\": schema_json }\n ]);\n\n let url = \"https://api.anthropic.com/v1/messages\";\n\n let encoded_api_key = encode(&self.api_key);\n\n let resp = retryable::run(\n || {\n self.client\n .post(url)\n .header(\"x-api-key\", encoded_api_key.as_ref())\n .header(\"anthropic-version\", \"2023-06-01\")\n .json(&payload)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Anthropic API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let mut resp_json: serde_json::Value = resp.json().await.context(\"Invalid JSON\")?;\n if let Some(error) = resp_json.get(\"error\") {\n bail!(\"Anthropic API error: {:?}\", error);\n }\n\n // Debug print full response\n // println!(\"Anthropic API full response: {resp_json:?}\");\n\n let resp_content = &resp_json[\"content\"];\n let tool_name = \"report_result\";\n let mut extracted_json: Option = None;\n if let Some(array) = resp_content.as_array() {\n for item in array {\n if item.get(\"type\") == Some(&serde_json::Value::String(\"tool_use\".to_string()))\n && item.get(\"name\") == Some(&serde_json::Value::String(tool_name.to_string()))\n {\n if let Some(input) = item.get(\"input\") {\n extracted_json = Some(input.clone());\n break;\n }\n }\n }\n }\n let text = if let Some(json) = extracted_json {\n // Try strict JSON serialization first\n serde_json::to_string(&json)?\n } else {\n // Fallback: try text if no tool output found\n match &mut resp_json[\"content\"][0][\"text\"] {\n serde_json::Value::String(s) => {\n // Try strict JSON parsing first\n match serde_json::from_str::(s) {\n Ok(_) => std::mem::take(s),\n Err(e) => {\n // Try permissive json5 parsing as fallback\n match json5::from_str::(s) {\n Ok(value) => {\n println!(\"[Anthropic] Used permissive JSON5 parser for output\");\n serde_json::to_string(&value)?\n }\n Err(e2) => {\n return Err(anyhow::anyhow!(format!(\n \"No structured tool output or text found in response, and permissive JSON5 parsing also failed: {e}; {e2}\"\n )));\n }\n }\n }\n }\n }\n _ => {\n return Err(anyhow::anyhow!(\n \"No structured tool output or text found in response\"\n ));\n }\n }\n };\n\n Ok(LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n"], ["/cocoindex/src/setup/states.rs", "/// Concepts:\n/// - Resource: some setup that needs to be tracked and maintained.\n/// - Setup State: current state of a resource.\n/// - Staging Change: states changes that may not be really applied yet.\n/// - Combined Setup State: Setup State + Staging Change.\n/// - Status Check: information about changes that are being applied / need to be applied.\n///\n/// Resource hierarchy:\n/// - [resource: setup metadata table] /// - Flow\n/// - [resource: metadata]\n/// - [resource: tracking table]\n/// - Target\n/// - [resource: target-specific stuff]\nuse crate::prelude::*;\n\nuse indenter::indented;\nuse owo_colors::{AnsiColors, OwoColorize};\nuse std::any::Any;\nuse std::fmt::Debug;\nuse std::fmt::{Display, Write};\nuse std::hash::Hash;\n\nuse super::db_metadata;\nuse crate::execution::db_tracking_setup::{\n self, TrackingTableSetupState, TrackingTableSetupStatus,\n};\n\nconst INDENT: &str = \" \";\n\npub trait StateMode: Clone + Copy {\n type State: Debug + Clone;\n type DefaultState: Debug + Clone + Default;\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct DesiredMode;\nimpl StateMode for DesiredMode {\n type State = T;\n type DefaultState = T;\n}\n\n#[derive(Debug, Clone)]\npub struct CombinedState {\n pub current: Option,\n pub staging: Vec>,\n /// Legacy state keys that no longer identical to the latest serialized form (usually caused by code change).\n /// They will be deleted when the next change is applied.\n pub legacy_state_key: Option,\n}\n\nimpl CombinedState {\n pub fn from_desired(desired: T) -> Self {\n Self {\n current: Some(desired),\n staging: vec![],\n legacy_state_key: None,\n }\n }\n\n pub fn from_change(prev: Option>, change: Option>) -> Self\n where\n T: Clone,\n {\n Self {\n current: match change {\n Some(Some(state)) => Some(state.clone()),\n Some(None) => None,\n None => prev.and_then(|v| v.current),\n },\n staging: vec![],\n legacy_state_key: None,\n }\n }\n\n pub fn possible_versions(&self) -> impl Iterator {\n self.current\n .iter()\n .chain(self.staging.iter().flat_map(|s| s.state().into_iter()))\n }\n\n pub fn always_exists(&self) -> bool {\n self.current.is_some() && self.staging.iter().all(|s| !s.is_delete())\n }\n\n pub fn legacy_values &V>(\n &self,\n desired: Option<&T>,\n f: F,\n ) -> BTreeSet<&V> {\n let desired_value = desired.map(&f);\n self.possible_versions()\n .map(f)\n .filter(|v| Some(*v) != desired_value)\n .collect()\n }\n}\n\nimpl Default for CombinedState {\n fn default() -> Self {\n Self {\n current: None,\n staging: vec![],\n legacy_state_key: None,\n }\n }\n}\n\nimpl PartialEq for CombinedState {\n fn eq(&self, other: &T) -> bool {\n self.staging.is_empty() && self.current.as_ref() == Some(other)\n }\n}\n\n#[derive(Clone, Copy)]\npub struct ExistingMode;\nimpl StateMode for ExistingMode {\n type State = CombinedState;\n type DefaultState = CombinedState;\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub enum StateChange {\n Upsert(State),\n Delete,\n}\n\nimpl StateChange {\n pub fn is_delete(&self) -> bool {\n matches!(self, StateChange::Delete)\n }\n\n pub fn desired_state(&self) -> Option<&State> {\n match self {\n StateChange::Upsert(state) => Some(state),\n StateChange::Delete => None,\n }\n }\n\n pub fn state(&self) -> Option<&State> {\n match self {\n StateChange::Upsert(state) => Some(state),\n StateChange::Delete => None,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct SourceSetupState {\n pub source_id: i32,\n pub key_schema: schema::ValueType,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct ResourceIdentifier {\n pub key: serde_json::Value,\n pub target_kind: String,\n}\n\nimpl Display for ResourceIdentifier {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}:{}\", self.target_kind, self.key)\n }\n}\n\n/// Common state (i.e. not specific to a target kind) for a target.\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TargetSetupStateCommon {\n pub target_id: i32,\n pub schema_version_id: i32,\n pub max_schema_version_id: i32,\n #[serde(default)]\n pub setup_by_user: bool,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TargetSetupState {\n pub common: TargetSetupStateCommon,\n\n pub state: serde_json::Value,\n}\n\nimpl TargetSetupState {\n pub fn state_unless_setup_by_user(self) -> Option {\n (!self.common.setup_by_user).then_some(self.state)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]\npub struct FlowSetupMetadata {\n pub last_source_id: i32,\n pub last_target_id: i32,\n pub sources: BTreeMap,\n}\n\n#[derive(Debug, Clone)]\npub struct FlowSetupState {\n // The version number for the flow, last seen in the metadata table.\n pub seen_flow_metadata_version: Option,\n pub metadata: Mode::DefaultState,\n pub tracking_table: Mode::State,\n pub targets: IndexMap>,\n}\n\nimpl Default for FlowSetupState {\n fn default() -> Self {\n Self {\n seen_flow_metadata_version: None,\n metadata: Default::default(),\n tracking_table: Default::default(),\n targets: IndexMap::new(),\n }\n }\n}\n\nimpl PartialEq for FlowSetupState {\n fn eq(&self, other: &Self) -> bool {\n self.metadata == other.metadata\n && self.tracking_table == other.tracking_table\n && self.targets == other.targets\n }\n}\n\n#[derive(Debug, Clone)]\npub struct AllSetupStates {\n pub has_metadata_table: bool,\n pub flows: BTreeMap>,\n}\n\nimpl Default for AllSetupStates {\n fn default() -> Self {\n Self {\n has_metadata_table: false,\n flows: BTreeMap::new(),\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum SetupChangeType {\n NoChange,\n Create,\n Update,\n Delete,\n Invalid,\n}\n\npub enum ChangeDescription {\n Action(String),\n Note(String),\n}\n\npub trait ResourceSetupStatus: Send + Sync + Debug + Any + 'static {\n fn describe_changes(&self) -> Vec;\n\n fn change_type(&self) -> SetupChangeType;\n}\n\nimpl ResourceSetupStatus for Box {\n fn describe_changes(&self) -> Vec {\n self.as_ref().describe_changes()\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.as_ref().change_type()\n }\n}\n\nimpl ResourceSetupStatus for std::convert::Infallible {\n fn describe_changes(&self) -> Vec {\n unreachable!()\n }\n\n fn change_type(&self) -> SetupChangeType {\n unreachable!()\n }\n}\n\n#[derive(Debug)]\npub struct ResourceSetupInfo {\n pub key: K,\n pub state: Option,\n pub description: String,\n\n /// If `None`, the resource is managed by users.\n pub setup_status: Option,\n\n pub legacy_key: Option,\n}\n\nimpl std::fmt::Display for ResourceSetupInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let status_code = match self.setup_status.as_ref().map(|c| c.change_type()) {\n Some(SetupChangeType::NoChange) => \"READY\",\n Some(SetupChangeType::Create) => \"TO CREATE\",\n Some(SetupChangeType::Update) => \"TO UPDATE\",\n Some(SetupChangeType::Delete) => \"TO DELETE\",\n Some(SetupChangeType::Invalid) => \"INVALID\",\n None => \"USER MANAGED\",\n };\n let status_str = format!(\"[ {status_code:^9} ]\");\n let status_full = status_str.color(AnsiColors::Cyan);\n let desc_colored = &self.description;\n writeln!(f, \"{status_full} {desc_colored}\")?;\n if let Some(setup_status) = &self.setup_status {\n let changes = setup_status.describe_changes();\n if !changes.is_empty() {\n let mut f = indented(f).with_str(INDENT);\n writeln!(f, \"\")?;\n for change in changes {\n match change {\n ChangeDescription::Action(action) => {\n writeln!(\n f,\n \"{} {}\",\n \"TODO:\".color(AnsiColors::BrightBlack).bold(),\n action.color(AnsiColors::BrightBlack)\n )?;\n }\n ChangeDescription::Note(note) => {\n writeln!(\n f,\n \"{} {}\",\n \"NOTE:\".color(AnsiColors::Yellow).bold(),\n note.color(AnsiColors::Yellow)\n )?;\n }\n }\n }\n writeln!(f)?;\n }\n }\n Ok(())\n }\n}\n\nimpl ResourceSetupInfo {\n pub fn is_up_to_date(&self) -> bool {\n self.setup_status\n .as_ref()\n .is_none_or(|c| c.change_type() == SetupChangeType::NoChange)\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum ObjectStatus {\n Invalid,\n New,\n Existing,\n Deleted,\n}\n\npub trait ObjectSetupStatus {\n fn status(&self) -> Option;\n fn is_up_to_date(&self) -> bool;\n}\n\n#[derive(Debug)]\npub struct FlowSetupStatus {\n pub status: Option,\n pub seen_flow_metadata_version: Option,\n\n pub metadata_change: Option>,\n\n pub tracking_table:\n Option>,\n pub target_resources:\n Vec>>,\n\n pub unknown_resources: Vec,\n}\n\nimpl ObjectSetupStatus for FlowSetupStatus {\n fn status(&self) -> Option {\n self.status\n }\n\n fn is_up_to_date(&self) -> bool {\n self.metadata_change.is_none()\n && self\n .tracking_table\n .as_ref()\n .is_none_or(|t| t.is_up_to_date())\n && self\n .target_resources\n .iter()\n .all(|target| target.is_up_to_date())\n }\n}\n\n#[derive(Debug)]\npub struct GlobalSetupStatus {\n pub metadata_table: ResourceSetupInfo<(), (), db_metadata::MetadataTableSetup>,\n}\n\nimpl GlobalSetupStatus {\n pub fn from_setup_states(setup_states: &AllSetupStates) -> Self {\n Self {\n metadata_table: db_metadata::MetadataTableSetup {\n metadata_table_missing: !setup_states.has_metadata_table,\n }\n .into_setup_info(),\n }\n }\n\n pub fn is_up_to_date(&self) -> bool {\n self.metadata_table.is_up_to_date()\n }\n}\n\npub struct ObjectSetupStatusCode<'a, Status: ObjectSetupStatus>(&'a Status);\nimpl std::fmt::Display for ObjectSetupStatusCode<'_, Status> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let Some(status) = self.0.status() else {\n return Ok(());\n };\n write!(\n f,\n \"[ {:^9} ]\",\n match status {\n ObjectStatus::New => \"TO CREATE\",\n ObjectStatus::Existing =>\n if self.0.is_up_to_date() {\n \"READY\"\n } else {\n \"TO UPDATE\"\n },\n ObjectStatus::Deleted => \"TO DELETE\",\n ObjectStatus::Invalid => \"INVALID\",\n }\n )\n }\n}\n\nimpl std::fmt::Display for GlobalSetupStatus {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n writeln!(f, \"{}\", self.metadata_table)\n }\n}\n\npub struct FormattedFlowSetupStatus<'a>(pub &'a str, pub &'a FlowSetupStatus);\n\nimpl std::fmt::Display for FormattedFlowSetupStatus<'_> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let flow_ssc = self.1;\n if flow_ssc.status.is_none() {\n return Ok(());\n }\n\n writeln!(\n f,\n \"{} Flow: {}\",\n ObjectSetupStatusCode(flow_ssc)\n .to_string()\n .color(AnsiColors::Cyan),\n self.0\n )?;\n\n let mut f = indented(f).with_str(INDENT);\n if let Some(tracking_table) = &flow_ssc.tracking_table {\n write!(f, \"{tracking_table}\")?;\n }\n for target_resource in &flow_ssc.target_resources {\n write!(f, \"{target_resource}\")?;\n }\n for resource in &flow_ssc.unknown_resources {\n writeln!(f, \"[ UNKNOWN ] {resource}\")?;\n }\n\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/functions/embed_text.rs", "use crate::{\n llm::{\n LlmApiConfig, LlmApiType, LlmEmbeddingClient, LlmEmbeddingRequest, new_llm_embedding_client,\n },\n ops::sdk::*,\n};\n\n#[derive(Deserialize)]\nstruct Spec {\n api_type: LlmApiType,\n model: String,\n address: Option,\n api_config: Option,\n output_dimension: Option,\n task_type: Option,\n}\n\nstruct Args {\n client: Box,\n text: ResolvedOpArg,\n}\n\nstruct Executor {\n spec: Spec,\n args: Args,\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n fn behavior_version(&self) -> Option {\n Some(1)\n }\n\n fn enable_cache(&self) -> bool {\n true\n }\n\n async fn evaluate(&self, input: Vec) -> Result {\n let text = self.args.text.value(&input)?.as_str()?;\n let req = LlmEmbeddingRequest {\n model: &self.spec.model,\n text: Cow::Borrowed(text),\n output_dimension: self.spec.output_dimension,\n task_type: self\n .spec\n .task_type\n .as_ref()\n .map(|s| Cow::Borrowed(s.as_str())),\n };\n let embedding = self.args.client.embed_text(req).await?;\n Ok(embedding.embedding.into())\n }\n}\n\nstruct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"EmbedText\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Self::ResolvedArgs, EnrichedValueType)> {\n let text = args_resolver.next_arg(\"text\")?;\n let client =\n new_llm_embedding_client(spec.api_type, spec.address.clone(), spec.api_config.clone())\n .await?;\n let output_dimension = match spec.output_dimension {\n Some(output_dimension) => output_dimension,\n None => {\n client.get_default_embedding_dimension(spec.model.as_str())\n .ok_or_else(|| api_error!(\"model \\\"{}\\\" is unknown for {:?}, needs to specify `output_dimension` explicitly\", spec.model, spec.api_type))?\n }\n };\n let output_schema = make_output_type(BasicValueType::Vector(VectorTypeSchema {\n dimension: Some(output_dimension as usize),\n element_type: Box::new(BasicValueType::Float32),\n }));\n Ok((Args { client, text }, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor { spec, args }))\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory.register(registry)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n #[tokio::test]\n #[ignore = \"This test requires OpenAI API key or a configured local LLM and may make network calls.\"]\n async fn test_embed_text() {\n let spec = Spec {\n api_type: LlmApiType::OpenAi,\n model: \"text-embedding-ada-002\".to_string(),\n address: None,\n api_config: None,\n output_dimension: None,\n task_type: None,\n };\n\n let factory = Arc::new(Factory);\n let text_content = \"CocoIndex is a performant data transformation framework for AI.\";\n\n let input_args_values = vec![text_content.to_string().into()];\n\n let input_arg_schemas = vec![build_arg_schema(\"text\", BasicValueType::Str)];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n if result.is_err() {\n eprintln!(\n \"test_embed_text: test_flow_function returned error (potentially expected for evaluate): {:?}\",\n result.as_ref().err()\n );\n }\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed. NOTE: This test may require network access/API keys for OpenAI. Error: {:?}\",\n result.err()\n );\n\n let value = result.unwrap();\n\n match value {\n Value::Basic(BasicValue::Vector(arc_vec)) => {\n assert_eq!(arc_vec.len(), 1536, \"Embedding vector dimension mismatch\");\n for item in arc_vec.iter() {\n match item {\n BasicValue::Float32(_) => {}\n _ => panic!(\"Embedding vector element is not Float32: {item:?}\"),\n }\n }\n }\n _ => panic!(\"Expected Value::Basic(BasicValue::Vector), got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/py/convert.rs", "use crate::prelude::*;\n\nuse bytes::Bytes;\nuse numpy::{PyArray1, PyArrayDyn, PyArrayMethods};\nuse pyo3::IntoPyObjectExt;\nuse pyo3::exceptions::PyTypeError;\nuse pyo3::types::PyAny;\nuse pyo3::types::{PyList, PyTuple};\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pythonize::{depythonize, pythonize};\nuse serde::de::DeserializeOwned;\nuse std::ops::Deref;\n\nuse super::IntoPyResult;\n\n#[derive(Debug)]\npub struct Pythonized(pub T);\n\nimpl<'py, T: DeserializeOwned> FromPyObject<'py> for Pythonized {\n fn extract_bound(obj: &Bound<'py, PyAny>) -> PyResult {\n Ok(Pythonized(depythonize(obj).into_py_result()?))\n }\n}\n\nimpl<'py, T: Serialize> IntoPyObject<'py> for &Pythonized {\n type Target = PyAny;\n type Output = Bound<'py, PyAny>;\n type Error = PyErr;\n\n fn into_pyobject(self, py: Python<'py>) -> PyResult {\n pythonize(py, &self.0).into_py_result()\n }\n}\n\nimpl<'py, T: Serialize> IntoPyObject<'py> for Pythonized {\n type Target = PyAny;\n type Output = Bound<'py, PyAny>;\n type Error = PyErr;\n\n fn into_pyobject(self, py: Python<'py>) -> PyResult {\n (&self).into_pyobject(py)\n }\n}\n\nimpl Pythonized {\n pub fn into_inner(self) -> T {\n self.0\n }\n}\n\nimpl Deref for Pythonized {\n type Target = T;\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nfn basic_value_to_py_object<'py>(\n py: Python<'py>,\n v: &value::BasicValue,\n) -> PyResult> {\n let result = match v {\n value::BasicValue::Bytes(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Str(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Bool(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Int64(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Float32(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Float64(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Range(v) => pythonize(py, v).into_py_result()?,\n value::BasicValue::Uuid(uuid_val) => uuid_val.into_bound_py_any(py)?,\n value::BasicValue::Date(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Time(v) => v.into_bound_py_any(py)?,\n value::BasicValue::LocalDateTime(v) => v.into_bound_py_any(py)?,\n value::BasicValue::OffsetDateTime(v) => v.into_bound_py_any(py)?,\n value::BasicValue::TimeDelta(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Json(v) => pythonize(py, v).into_py_result()?,\n value::BasicValue::Vector(v) => handle_vector_to_py(py, v)?,\n value::BasicValue::UnionVariant { tag_id, value } => {\n (*tag_id, basic_value_to_py_object(py, value)?).into_bound_py_any(py)?\n }\n };\n Ok(result)\n}\n\npub fn field_values_to_py_object<'py, 'a>(\n py: Python<'py>,\n values: impl Iterator,\n) -> PyResult> {\n let fields = values\n .map(|v| value_to_py_object(py, v))\n .collect::>>()?;\n Ok(PyTuple::new(py, fields)?.into_any())\n}\n\npub fn value_to_py_object<'py>(py: Python<'py>, v: &value::Value) -> PyResult> {\n let result = match v {\n value::Value::Null => py.None().into_bound(py),\n value::Value::Basic(v) => basic_value_to_py_object(py, v)?,\n value::Value::Struct(v) => field_values_to_py_object(py, v.fields.iter())?,\n value::Value::UTable(v) | value::Value::LTable(v) => {\n let rows = v\n .iter()\n .map(|v| field_values_to_py_object(py, v.0.fields.iter()))\n .collect::>>()?;\n PyList::new(py, rows)?.into_any()\n }\n value::Value::KTable(v) => {\n let rows = v\n .iter()\n .map(|(k, v)| {\n field_values_to_py_object(\n py,\n std::iter::once(&value::Value::from(k.clone())).chain(v.0.fields.iter()),\n )\n })\n .collect::>>()?;\n PyList::new(py, rows)?.into_any()\n }\n };\n Ok(result)\n}\n\nfn basic_value_from_py_object<'py>(\n typ: &schema::BasicValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let result = match typ {\n schema::BasicValueType::Bytes => {\n value::BasicValue::Bytes(Bytes::from(v.extract::>()?))\n }\n schema::BasicValueType::Str => value::BasicValue::Str(Arc::from(v.extract::()?)),\n schema::BasicValueType::Bool => value::BasicValue::Bool(v.extract::()?),\n schema::BasicValueType::Int64 => value::BasicValue::Int64(v.extract::()?),\n schema::BasicValueType::Float32 => value::BasicValue::Float32(v.extract::()?),\n schema::BasicValueType::Float64 => value::BasicValue::Float64(v.extract::()?),\n schema::BasicValueType::Range => value::BasicValue::Range(depythonize(v)?),\n schema::BasicValueType::Uuid => value::BasicValue::Uuid(v.extract::()?),\n schema::BasicValueType::Date => value::BasicValue::Date(v.extract::()?),\n schema::BasicValueType::Time => value::BasicValue::Time(v.extract::()?),\n schema::BasicValueType::LocalDateTime => {\n value::BasicValue::LocalDateTime(v.extract::()?)\n }\n schema::BasicValueType::OffsetDateTime => {\n if v.getattr_opt(\"tzinfo\")?\n .ok_or_else(|| {\n PyErr::new::(format!(\n \"expecting a datetime.datetime value, got {}\",\n v.get_type()\n ))\n })?\n .is_none()\n {\n value::BasicValue::OffsetDateTime(\n v.extract::()?.and_utc().into(),\n )\n } else {\n value::BasicValue::OffsetDateTime(\n v.extract::>()?,\n )\n }\n }\n schema::BasicValueType::TimeDelta => {\n value::BasicValue::TimeDelta(v.extract::()?)\n }\n schema::BasicValueType::Json => {\n value::BasicValue::Json(Arc::from(depythonize::(v)?))\n }\n schema::BasicValueType::Vector(elem) => {\n if let Some(vector) = handle_ndarray_from_py(&elem.element_type, v)? {\n vector\n } else {\n // Fallback to list\n value::BasicValue::Vector(Arc::from(\n v.extract::>>()?\n .into_iter()\n .map(|v| basic_value_from_py_object(&elem.element_type, &v))\n .collect::>>()?,\n ))\n }\n }\n schema::BasicValueType::Union(s) => {\n let mut valid_value = None;\n\n // Try parsing the value\n for (i, typ) in s.types.iter().enumerate() {\n if let Ok(value) = basic_value_from_py_object(typ, v) {\n valid_value = Some(value::BasicValue::UnionVariant {\n tag_id: i,\n value: Box::new(value),\n });\n break;\n }\n }\n\n valid_value.ok_or_else(|| {\n PyErr::new::(format!(\n \"invalid union value: {}, available types: {:?}\",\n v, s.types\n ))\n })?\n }\n };\n Ok(result)\n}\n\n// Helper function to convert PyAny to BasicValue for NDArray\nfn handle_ndarray_from_py<'py>(\n elem_type: &schema::BasicValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult> {\n macro_rules! try_convert {\n ($t:ty, $cast:expr) => {\n if let Ok(array) = v.downcast::>() {\n let data = array.readonly().as_slice()?.to_vec();\n let vec = data.into_iter().map($cast).collect::>();\n return Ok(Some(value::BasicValue::Vector(Arc::from(vec))));\n }\n };\n }\n\n match *elem_type {\n schema::BasicValueType::Float32 => try_convert!(f32, value::BasicValue::Float32),\n schema::BasicValueType::Float64 => try_convert!(f64, value::BasicValue::Float64),\n schema::BasicValueType::Int64 => try_convert!(i64, value::BasicValue::Int64),\n _ => {}\n }\n\n Ok(None)\n}\n\n// Helper function to convert BasicValue::Vector to PyAny\nfn handle_vector_to_py<'py>(\n py: Python<'py>,\n v: &[value::BasicValue],\n) -> PyResult> {\n match v.first() {\n Some(value::BasicValue::Float32(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Float32(f) => Ok(*f),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Float32\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n Some(value::BasicValue::Float64(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Float64(f) => Ok(*f),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Float64\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n Some(value::BasicValue::Int64(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Int64(i) => Ok(*i),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Int64\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n _ => Ok(v\n .iter()\n .map(|v| basic_value_to_py_object(py, v))\n .collect::>>()?\n .into_bound_py_any(py)?),\n }\n}\n\nfn field_values_from_py_object<'py>(\n schema: &schema::StructSchema,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let list = v.extract::>>()?;\n if list.len() != schema.fields.len() {\n return Err(PyException::new_err(format!(\n \"struct field number mismatch, expected {}, got {}\",\n schema.fields.len(),\n list.len()\n )));\n }\n\n Ok(value::FieldValues {\n fields: schema\n .fields\n .iter()\n .zip(list.into_iter())\n .map(|(f, v)| value_from_py_object(&f.value_type.typ, &v))\n .collect::>>()?,\n })\n}\n\npub fn value_from_py_object<'py>(\n typ: &schema::ValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let result = if v.is_none() {\n value::Value::Null\n } else {\n match typ {\n schema::ValueType::Basic(typ) => {\n value::Value::Basic(basic_value_from_py_object(typ, v)?)\n }\n schema::ValueType::Struct(schema) => {\n value::Value::Struct(field_values_from_py_object(schema, v)?)\n }\n schema::ValueType::Table(schema) => {\n let list = v.extract::>>()?;\n let values = list\n .into_iter()\n .map(|v| field_values_from_py_object(&schema.row, &v))\n .collect::>>()?;\n\n match schema.kind {\n schema::TableKind::UTable => {\n value::Value::UTable(values.into_iter().map(|v| v.into()).collect())\n }\n schema::TableKind::LTable => {\n value::Value::LTable(values.into_iter().map(|v| v.into()).collect())\n }\n\n schema::TableKind::KTable => value::Value::KTable(\n values\n .into_iter()\n .map(|v| {\n let mut iter = v.fields.into_iter();\n let key = iter.next().unwrap().into_key().into_py_result()?;\n Ok((\n key,\n value::ScopeValue(value::FieldValues {\n fields: iter.collect::>(),\n }),\n ))\n })\n .collect::>>()?,\n ),\n }\n }\n }\n };\n Ok(result)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::base::schema;\n use crate::base::value;\n use crate::base::value::ScopeValue;\n use pyo3::Python;\n use std::collections::BTreeMap;\n use std::sync::Arc;\n\n fn assert_roundtrip_conversion(original_value: &value::Value, value_type: &schema::ValueType) {\n Python::with_gil(|py| {\n // Convert Rust value to Python object using value_to_py_object\n let py_object = value_to_py_object(py, original_value)\n .expect(\"Failed to convert Rust value to Python object\");\n\n println!(\"Python object: {py_object:?}\");\n let roundtripped_value = value_from_py_object(value_type, &py_object)\n .expect(\"Failed to convert Python object back to Rust value\");\n\n println!(\"Roundtripped value: {roundtripped_value:?}\");\n assert_eq!(\n original_value, &roundtripped_value,\n \"Value mismatch after roundtrip\"\n );\n });\n }\n\n #[test]\n fn test_roundtrip_basic_values() {\n let values_and_types = vec![\n (\n value::Value::Basic(value::BasicValue::Int64(42)),\n schema::ValueType::Basic(schema::BasicValueType::Int64),\n ),\n (\n value::Value::Basic(value::BasicValue::Float64(3.14)),\n schema::ValueType::Basic(schema::BasicValueType::Float64),\n ),\n (\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"hello\"))),\n schema::ValueType::Basic(schema::BasicValueType::Str),\n ),\n (\n value::Value::Basic(value::BasicValue::Bool(true)),\n schema::ValueType::Basic(schema::BasicValueType::Bool),\n ),\n ];\n\n for (val, typ) in values_and_types {\n assert_roundtrip_conversion(&val, &typ);\n }\n }\n\n #[test]\n fn test_roundtrip_struct() {\n let struct_schema = schema::StructSchema {\n description: Some(Arc::from(\"Test struct description\")),\n fields: Arc::new(vec![\n schema::FieldSchema {\n name: \"a\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Int64),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"b\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Str),\n nullable: false,\n attrs: Default::default(),\n },\n },\n ]),\n };\n\n let struct_val_data = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(10)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"world\"))),\n ],\n };\n\n let struct_val = value::Value::Struct(struct_val_data);\n let struct_typ = schema::ValueType::Struct(struct_schema); // No clone needed\n\n assert_roundtrip_conversion(&struct_val, &struct_typ);\n }\n\n #[test]\n fn test_roundtrip_table_types() {\n let row_schema_struct = Arc::new(schema::StructSchema {\n description: Some(Arc::from(\"Test table row description\")),\n fields: Arc::new(vec![\n schema::FieldSchema {\n name: \"key_col\".to_string(), // Will be used as key for KTable implicitly\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Int64),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"data_col_1\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Str),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"data_col_2\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Bool),\n nullable: false,\n attrs: Default::default(),\n },\n },\n ]),\n });\n\n let row1_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(1)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row1_data\"))),\n value::Value::Basic(value::BasicValue::Bool(true)),\n ],\n };\n let row1_scope_val: value::ScopeValue = row1_fields.into();\n\n let row2_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(2)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row2_data\"))),\n value::Value::Basic(value::BasicValue::Bool(false)),\n ],\n };\n let row2_scope_val: value::ScopeValue = row2_fields.into();\n\n // UTable\n let utable_schema = schema::TableSchema {\n kind: schema::TableKind::UTable,\n row: (*row_schema_struct).clone(),\n };\n let utable_val = value::Value::UTable(vec![row1_scope_val.clone(), row2_scope_val.clone()]);\n let utable_typ = schema::ValueType::Table(utable_schema);\n assert_roundtrip_conversion(&utable_val, &utable_typ);\n\n // LTable\n let ltable_schema = schema::TableSchema {\n kind: schema::TableKind::LTable,\n row: (*row_schema_struct).clone(),\n };\n let ltable_val = value::Value::LTable(vec![row1_scope_val.clone(), row2_scope_val.clone()]);\n let ltable_typ = schema::ValueType::Table(ltable_schema);\n assert_roundtrip_conversion(<able_val, <able_typ);\n\n // KTable\n let ktable_schema = schema::TableSchema {\n kind: schema::TableKind::KTable,\n row: (*row_schema_struct).clone(),\n };\n let mut ktable_data = BTreeMap::new();\n\n // Create KTable entries where the ScopeValue doesn't include the key field\n // This matches how the Python code will serialize/deserialize\n let row1_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row1_data\"))),\n value::Value::Basic(value::BasicValue::Bool(true)),\n ],\n };\n let row1_scope_val: value::ScopeValue = row1_fields.into();\n\n let row2_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row2_data\"))),\n value::Value::Basic(value::BasicValue::Bool(false)),\n ],\n };\n let row2_scope_val: value::ScopeValue = row2_fields.into();\n\n // For KTable, the key is extracted from the first field of ScopeValue based on current serialization\n let key1 = value::Value::::Basic(value::BasicValue::Int64(1))\n .into_key()\n .unwrap();\n let key2 = value::Value::::Basic(value::BasicValue::Int64(2))\n .into_key()\n .unwrap();\n\n ktable_data.insert(key1, row1_scope_val.clone());\n ktable_data.insert(key2, row2_scope_val.clone());\n\n let ktable_val = value::Value::KTable(ktable_data);\n let ktable_typ = schema::ValueType::Table(ktable_schema);\n assert_roundtrip_conversion(&ktable_val, &ktable_typ);\n }\n}\n"], ["/cocoindex/src/execution/memoization.rs", "use anyhow::{Result, bail};\nuse serde::{Deserialize, Serialize};\nuse std::{\n borrow::Cow,\n collections::HashMap,\n future::Future,\n sync::{Arc, Mutex},\n};\n\nuse crate::{\n base::{schema, value},\n service::error::{SharedError, SharedResultExtRef},\n utils::fingerprint::{Fingerprint, Fingerprinter},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StoredCacheEntry {\n time_sec: i64,\n value: serde_json::Value,\n}\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct StoredMemoizationInfo {\n #[serde(default, skip_serializing_if = \"HashMap::is_empty\")]\n pub cache: HashMap,\n\n #[serde(default, skip_serializing_if = \"HashMap::is_empty\")]\n pub uuids: HashMap>,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub content_hash: Option,\n}\n\npub type CacheEntryCell = Arc>>;\nenum CacheData {\n /// Existing entry in previous runs, but not in current run yet.\n Previous(serde_json::Value),\n /// Value appeared in current run.\n Current(CacheEntryCell),\n}\n\nstruct CacheEntry {\n time: chrono::DateTime,\n data: CacheData,\n}\n\n#[derive(Default)]\nstruct UuidEntry {\n uuids: Vec,\n num_current: usize,\n}\n\nimpl UuidEntry {\n fn new(uuids: Vec) -> Self {\n Self {\n uuids,\n num_current: 0,\n }\n }\n\n fn into_stored(self) -> Option> {\n if self.num_current == 0 {\n return None;\n }\n let mut uuids = self.uuids;\n if self.num_current < uuids.len() {\n uuids.truncate(self.num_current);\n }\n Some(uuids)\n }\n}\n\npub struct EvaluationMemoryOptions {\n pub enable_cache: bool,\n\n /// If true, it's for evaluation only.\n /// In this mode, we don't memoize anything.\n pub evaluation_only: bool,\n}\n\npub struct EvaluationMemory {\n current_time: chrono::DateTime,\n cache: Option>>,\n uuids: Mutex>,\n evaluation_only: bool,\n}\n\nimpl EvaluationMemory {\n pub fn new(\n current_time: chrono::DateTime,\n stored_info: Option,\n options: EvaluationMemoryOptions,\n ) -> Self {\n let (stored_cache, stored_uuids) = stored_info\n .map(|stored_info| (stored_info.cache, stored_info.uuids))\n .unzip();\n Self {\n current_time,\n cache: options.enable_cache.then(|| {\n Mutex::new(\n stored_cache\n .into_iter()\n .flat_map(|iter| iter.into_iter())\n .map(|(k, e)| {\n (\n k,\n CacheEntry {\n time: chrono::DateTime::from_timestamp(e.time_sec, 0)\n .unwrap_or(chrono::DateTime::::MIN_UTC),\n data: CacheData::Previous(e.value),\n },\n )\n })\n .collect(),\n )\n }),\n uuids: Mutex::new(\n (!options.evaluation_only)\n .then_some(stored_uuids)\n .flatten()\n .into_iter()\n .flat_map(|iter| iter.into_iter())\n .map(|(k, v)| (k, UuidEntry::new(v)))\n .collect(),\n ),\n evaluation_only: options.evaluation_only,\n }\n }\n\n pub fn into_stored(self) -> Result {\n if self.evaluation_only {\n bail!(\"For evaluation only, cannot convert to stored MemoizationInfo\");\n }\n let cache = if let Some(cache) = self.cache {\n cache\n .into_inner()?\n .into_iter()\n .filter_map(|(k, e)| match e.data {\n CacheData::Previous(_) => None,\n CacheData::Current(entry) => match entry.get() {\n Some(Ok(v)) => Some(serde_json::to_value(v).map(|value| {\n (\n k,\n StoredCacheEntry {\n time_sec: e.time.timestamp(),\n value,\n },\n )\n })),\n _ => None,\n },\n })\n .collect::>()?\n } else {\n bail!(\"Cache is disabled, cannot convert to stored MemoizationInfo\");\n };\n let uuids = self\n .uuids\n .into_inner()?\n .into_iter()\n .filter_map(|(k, v)| v.into_stored().map(|uuids| (k, uuids)))\n .collect();\n Ok(StoredMemoizationInfo {\n cache,\n uuids,\n content_hash: None,\n })\n }\n\n pub fn get_cache_entry(\n &self,\n key: impl FnOnce() -> Result,\n typ: &schema::ValueType,\n ttl: Option,\n ) -> Result> {\n let mut cache = if let Some(cache) = &self.cache {\n cache.lock().unwrap()\n } else {\n return Ok(None);\n };\n let result = match cache.entry(key()?) {\n std::collections::hash_map::Entry::Occupied(mut entry)\n if !ttl\n .map(|ttl| entry.get().time + ttl < self.current_time)\n .unwrap_or(false) =>\n {\n let entry_mut = &mut entry.get_mut();\n match &mut entry_mut.data {\n CacheData::Previous(value) => {\n let value = value::Value::from_json(std::mem::take(value), typ)?;\n let cell = Arc::new(tokio::sync::OnceCell::from(Ok(value)));\n let time = entry_mut.time;\n entry.insert(CacheEntry {\n time,\n data: CacheData::Current(cell.clone()),\n });\n cell\n }\n CacheData::Current(cell) => cell.clone(),\n }\n }\n entry => {\n let cell = Arc::new(tokio::sync::OnceCell::new());\n entry.insert_entry(CacheEntry {\n time: self.current_time,\n data: CacheData::Current(cell.clone()),\n });\n cell\n }\n };\n Ok(Some(result))\n }\n\n pub fn next_uuid(&self, key: Fingerprint) -> Result {\n let mut uuids = self.uuids.lock().unwrap();\n\n let entry = uuids.entry(key).or_default();\n let uuid = if self.evaluation_only {\n let fp = Fingerprinter::default()\n .with(&key)?\n .with(&entry.num_current)?\n .into_fingerprint();\n uuid::Uuid::new_v8(fp.0)\n } else if entry.num_current < entry.uuids.len() {\n entry.uuids[entry.num_current]\n } else {\n let uuid = uuid::Uuid::new_v4();\n entry.uuids.push(uuid);\n uuid\n };\n entry.num_current += 1;\n Ok(uuid)\n }\n}\n\npub async fn evaluate_with_cell(\n cell: Option<&CacheEntryCell>,\n compute: impl FnOnce() -> Fut,\n) -> Result>\nwhere\n Fut: Future>,\n{\n let result = match cell {\n Some(cell) => Cow::Borrowed(\n cell.get_or_init(|| {\n let fut = compute();\n async move { fut.await.map_err(SharedError::new) }\n })\n .await\n .std_result()?,\n ),\n None => Cow::Owned(compute().await?),\n };\n Ok(result)\n}\n"], ["/cocoindex/src/base/spec.rs", "use crate::prelude::*;\n\nuse super::schema::{EnrichedValueType, FieldSchema};\nuse serde::{Deserialize, Serialize};\nuse std::fmt;\nuse std::ops::Deref;\n\n/// OutputMode enum for displaying spec info in different granularity\n#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"lowercase\")]\npub enum OutputMode {\n Concise,\n Verbose,\n}\n\n/// Formatting spec per output mode\npub trait SpecFormatter {\n fn format(&self, mode: OutputMode) -> String;\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum SpecString {\n /// The value comes from the environment variable.\n Env(String),\n /// The value is defined by the literal string.\n #[serde(untagged)]\n Literal(String),\n}\n\npub type ScopeName = String;\n\n/// Used to identify a data field within a flow.\n/// Within a flow, in each specific scope, each field name must be unique.\n/// - A field is defined by `outputs` of an operation. There must be exactly one definition for each field.\n/// - A field can be used as an input for multiple operations.\npub type FieldName = String;\n\npub const ROOT_SCOPE_NAME: &str = \"_root\";\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Default)]\npub struct FieldPath(pub Vec);\n\nimpl Deref for FieldPath {\n type Target = Vec;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl fmt::Display for FieldPath {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if self.is_empty() {\n write!(f, \"*\")\n } else {\n write!(f, \"{}\", self.join(\".\"))\n }\n }\n}\n\n/// Used to identify an input or output argument for an operator.\n/// Useful to identify different inputs/outputs of the same operation. Usually omitted for operations with the same purpose of input/output.\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]\npub struct OpArgName(pub Option);\n\nimpl fmt::Display for OpArgName {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if let Some(arg_name) = &self.0 {\n write!(f, \"${arg_name}\")\n } else {\n write!(f, \"?\")\n }\n }\n}\n\nimpl OpArgName {\n pub fn is_unnamed(&self) -> bool {\n self.0.is_none()\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct NamedSpec {\n pub name: String,\n\n #[serde(flatten)]\n pub spec: T,\n}\n\nimpl fmt::Display for NamedSpec {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}: {}\", self.name, self.spec)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FieldMapping {\n /// If unspecified, means the current scope.\n /// \"_root\" refers to the top-level scope.\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub scope: Option,\n\n pub field_path: FieldPath,\n}\n\nimpl fmt::Display for FieldMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let scope = self.scope.as_deref().unwrap_or(\"\");\n write!(\n f,\n \"{}{}\",\n if scope.is_empty() {\n \"\".to_string()\n } else {\n format!(\"{scope}.\")\n },\n self.field_path\n )\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ConstantMapping {\n pub schema: EnrichedValueType,\n pub value: serde_json::Value,\n}\n\nimpl fmt::Display for ConstantMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let value = serde_json::to_string(&self.value).unwrap_or(\"#serde_error\".to_string());\n write!(f, \"{value}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CollectionMapping {\n pub field: FieldMapping,\n pub scope_name: ScopeName,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StructMapping {\n pub fields: Vec>,\n}\n\nimpl fmt::Display for StructMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let fields = self\n .fields\n .iter()\n .map(|field| field.name.clone())\n .collect::>()\n .join(\",\");\n write!(f, \"{fields}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum ValueMapping {\n Constant(ConstantMapping),\n Field(FieldMapping),\n Struct(StructMapping),\n // TODO: Add support for collections\n}\n\nimpl ValueMapping {\n pub fn is_entire_scope(&self) -> bool {\n match self {\n ValueMapping::Field(FieldMapping {\n scope: None,\n field_path,\n }) => field_path.is_empty(),\n _ => false,\n }\n }\n}\n\nimpl std::fmt::Display for ValueMapping {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result {\n match self {\n ValueMapping::Constant(v) => write!(\n f,\n \"{}\",\n serde_json::to_string(&v.value)\n .unwrap_or_else(|_| \"#(invalid json value)\".to_string())\n ),\n ValueMapping::Field(v) => {\n write!(f, \"{}.{}\", v.scope.as_deref().unwrap_or(\"\"), v.field_path)\n }\n ValueMapping::Struct(v) => write!(\n f,\n \"Struct({})\",\n v.fields\n .iter()\n .map(|f| format!(\"{}={}\", f.name, f.spec))\n .collect::>()\n .join(\", \")\n ),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct OpArgBinding {\n #[serde(default, skip_serializing_if = \"OpArgName::is_unnamed\")]\n pub arg_name: OpArgName,\n\n #[serde(flatten)]\n pub value: ValueMapping,\n}\n\nimpl fmt::Display for OpArgBinding {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if self.arg_name.is_unnamed() {\n write!(f, \"{}\", self.value)\n } else {\n write!(f, \"{}={}\", self.arg_name, self.value)\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct OpSpec {\n pub kind: String,\n #[serde(flatten, default)]\n pub spec: serde_json::Map,\n}\n\nimpl SpecFormatter for OpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match mode {\n OutputMode::Concise => self.kind.clone(),\n OutputMode::Verbose => {\n let spec_str = serde_json::to_string_pretty(&self.spec)\n .map(|s| {\n let lines: Vec<&str> = s.lines().collect();\n if lines.len() < s.lines().count() {\n lines\n .into_iter()\n .chain([\"...\"])\n .collect::>()\n .join(\"\\n \")\n } else {\n lines.join(\"\\n \")\n }\n })\n .unwrap_or(\"#serde_error\".to_string());\n format!(\"{}({})\", self.kind, spec_str)\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct ExecutionOptions {\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub max_inflight_rows: Option,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub max_inflight_bytes: Option,\n}\n\nimpl ExecutionOptions {\n pub fn get_concur_control_options(&self) -> concur_control::Options {\n concur_control::Options {\n max_inflight_rows: self.max_inflight_rows,\n max_inflight_bytes: self.max_inflight_bytes,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct SourceRefreshOptions {\n pub refresh_interval: Option,\n}\n\nimpl fmt::Display for SourceRefreshOptions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let refresh = self\n .refresh_interval\n .map(|d| format!(\"{d:?}\"))\n .unwrap_or(\"none\".to_string());\n write!(f, \"{refresh}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ImportOpSpec {\n pub source: OpSpec,\n\n #[serde(default)]\n pub refresh_options: SourceRefreshOptions,\n\n #[serde(default)]\n pub execution_options: ExecutionOptions,\n}\n\nimpl SpecFormatter for ImportOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let source = self.source.format(mode);\n format!(\"source={}, refresh={}\", source, self.refresh_options)\n }\n}\n\nimpl fmt::Display for ImportOpSpec {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}\", self.format(OutputMode::Concise))\n }\n}\n\n/// Transform data using a given operator.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TransformOpSpec {\n pub inputs: Vec,\n pub op: OpSpec,\n}\n\nimpl SpecFormatter for TransformOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let inputs = self\n .inputs\n .iter()\n .map(ToString::to_string)\n .collect::>()\n .join(\",\");\n let op_str = self.op.format(mode);\n match mode {\n OutputMode::Concise => format!(\"op={op_str}, inputs={inputs}\"),\n OutputMode::Verbose => format!(\"op={op_str}, inputs=[{inputs}]\"),\n }\n }\n}\n\n/// Apply reactive operations to each row of the input field.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ForEachOpSpec {\n /// Mapping that provides a table to apply reactive operations to.\n pub field_path: FieldPath,\n pub op_scope: ReactiveOpScope,\n\n #[serde(default)]\n pub execution_options: ExecutionOptions,\n}\n\nimpl ForEachOpSpec {\n pub fn get_label(&self) -> String {\n format!(\"Loop over {}\", self.field_path)\n }\n}\n\nimpl SpecFormatter for ForEachOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match mode {\n OutputMode::Concise => self.get_label(),\n OutputMode::Verbose => format!(\"field={}\", self.field_path),\n }\n }\n}\n\n/// Emit data to a given collector at the given scope.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CollectOpSpec {\n /// Field values to be collected.\n pub input: StructMapping,\n /// Scope for the collector.\n pub scope_name: ScopeName,\n /// Name of the collector.\n pub collector_name: FieldName,\n /// If specified, the collector will have an automatically generated UUID field with the given name.\n /// The uuid will remain stable when collected input values remain unchanged.\n pub auto_uuid_field: Option,\n}\n\nimpl SpecFormatter for CollectOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let uuid = self.auto_uuid_field.as_deref().unwrap_or(\"none\");\n match mode {\n OutputMode::Concise => {\n format!(\n \"collector={}, input={}, uuid={}\",\n self.collector_name, self.input, uuid\n )\n }\n OutputMode::Verbose => {\n format!(\n \"scope={}, collector={}, input=[{}], uuid={}\",\n self.scope_name, self.collector_name, self.input, uuid\n )\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum VectorSimilarityMetric {\n CosineSimilarity,\n L2Distance,\n InnerProduct,\n}\n\nimpl fmt::Display for VectorSimilarityMetric {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n match self {\n VectorSimilarityMetric::CosineSimilarity => write!(f, \"Cosine\"),\n VectorSimilarityMetric::L2Distance => write!(f, \"L2\"),\n VectorSimilarityMetric::InnerProduct => write!(f, \"InnerProduct\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct VectorIndexDef {\n pub field_name: FieldName,\n pub metric: VectorSimilarityMetric,\n}\n\nimpl fmt::Display for VectorIndexDef {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}:{}\", self.field_name, self.metric)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct IndexOptions {\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub primary_key_fields: Option>,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n pub vector_indexes: Vec,\n}\n\nimpl IndexOptions {\n pub fn primary_key_fields(&self) -> Result<&[FieldName]> {\n Ok(self\n .primary_key_fields\n .as_ref()\n .ok_or(api_error!(\"Primary key fields are not set\"))?\n .as_ref())\n }\n}\n\nimpl fmt::Display for IndexOptions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let primary_keys = self\n .primary_key_fields\n .as_ref()\n .map(|p| p.join(\",\"))\n .unwrap_or_default();\n let vector_indexes = self\n .vector_indexes\n .iter()\n .map(|v| v.to_string())\n .collect::>()\n .join(\",\");\n write!(f, \"keys={primary_keys}, indexes={vector_indexes}\")\n }\n}\n\n/// Store data to a given sink.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ExportOpSpec {\n pub collector_name: FieldName,\n pub target: OpSpec,\n pub index_options: IndexOptions,\n pub setup_by_user: bool,\n}\n\nimpl SpecFormatter for ExportOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let target_str = self.target.format(mode);\n let base = format!(\n \"collector={}, target={}, {}\",\n self.collector_name, target_str, self.index_options\n );\n match mode {\n OutputMode::Concise => base,\n OutputMode::Verbose => format!(\"{}, setup_by_user={}\", base, self.setup_by_user),\n }\n }\n}\n\n/// A reactive operation reacts on given input values.\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"action\")]\npub enum ReactiveOpSpec {\n Transform(TransformOpSpec),\n ForEach(ForEachOpSpec),\n Collect(CollectOpSpec),\n}\n\nimpl SpecFormatter for ReactiveOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match self {\n ReactiveOpSpec::Transform(t) => format!(\"Transform: {}\", t.format(mode)),\n ReactiveOpSpec::ForEach(fe) => match mode {\n OutputMode::Concise => fe.get_label().to_string(),\n OutputMode::Verbose => format!(\"ForEach: {}\", fe.format(mode)),\n },\n ReactiveOpSpec::Collect(c) => format!(\"Collect: {}\", c.format(mode)),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ReactiveOpScope {\n pub name: ScopeName,\n pub ops: Vec>,\n // TODO: Suport collectors\n}\n\nimpl fmt::Display for ReactiveOpScope {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"Scope: name={}\", self.name)\n }\n}\n\n/// A flow defines the rule to sync data from given sources to given sinks with given transformations.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FlowInstanceSpec {\n /// Name of the flow instance.\n pub name: String,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub import_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub reactive_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub export_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub declarations: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TransientFlowSpec {\n pub name: String,\n pub input_fields: Vec,\n pub reactive_ops: Vec>,\n pub output_value: ValueMapping,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SimpleSemanticsQueryHandlerSpec {\n pub name: String,\n pub flow_instance_name: String,\n pub export_target_name: String,\n pub query_transform_flow: TransientFlowSpec,\n pub default_similarity_metric: VectorSimilarityMetric,\n}\n\npub struct AuthEntryReference {\n pub key: String,\n _phantom: std::marker::PhantomData,\n}\n\nimpl fmt::Debug for AuthEntryReference {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"AuthEntryReference({})\", self.key)\n }\n}\n\nimpl fmt::Display for AuthEntryReference {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"AuthEntryReference({})\", self.key)\n }\n}\n\nimpl Clone for AuthEntryReference {\n fn clone(&self) -> Self {\n Self {\n key: self.key.clone(),\n _phantom: std::marker::PhantomData,\n }\n }\n}\n\n#[derive(Serialize, Deserialize)]\nstruct UntypedAuthEntryReference {\n key: T,\n}\n\nimpl Serialize for AuthEntryReference {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n UntypedAuthEntryReference { key: &self.key }.serialize(serializer)\n }\n}\n\nimpl<'de, T> Deserialize<'de> for AuthEntryReference {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let untyped_ref = UntypedAuthEntryReference::::deserialize(deserializer)?;\n Ok(AuthEntryReference {\n key: untyped_ref.key,\n _phantom: std::marker::PhantomData,\n })\n }\n}\n\nimpl PartialEq for AuthEntryReference {\n fn eq(&self, other: &Self) -> bool {\n self.key == other.key\n }\n}\n\nimpl Eq for AuthEntryReference {}\n\nimpl std::hash::Hash for AuthEntryReference {\n fn hash(&self, state: &mut H) {\n self.key.hash(state);\n }\n}\n"], ["/cocoindex/src/lib_context.rs", "use crate::prelude::*;\n\nuse crate::builder::AnalyzedFlow;\nuse crate::execution::source_indexer::SourceIndexingContext;\nuse crate::service::error::ApiError;\nuse crate::settings;\nuse crate::setup::ObjectSetupStatus;\nuse axum::http::StatusCode;\nuse sqlx::PgPool;\nuse sqlx::postgres::PgConnectOptions;\nuse tokio::runtime::Runtime;\n\npub struct FlowExecutionContext {\n pub setup_execution_context: Arc,\n pub setup_status: setup::FlowSetupStatus,\n source_indexing_contexts: Vec>>,\n}\n\nasync fn build_setup_context(\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n) -> Result<(\n Arc,\n setup::FlowSetupStatus,\n)> {\n let setup_execution_context = Arc::new(exec_ctx::build_flow_setup_execution_context(\n &analyzed_flow.flow_instance,\n &analyzed_flow.data_schema,\n &analyzed_flow.setup_state,\n existing_flow_ss,\n )?);\n\n let setup_status = setup::check_flow_setup_status(\n Some(&setup_execution_context.setup_state),\n existing_flow_ss,\n &analyzed_flow.flow_instance_ctx,\n )\n .await?;\n\n Ok((setup_execution_context, setup_status))\n}\n\nimpl FlowExecutionContext {\n async fn new(\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result {\n let (setup_execution_context, setup_status) =\n build_setup_context(analyzed_flow, existing_flow_ss).await?;\n\n let mut source_indexing_contexts = Vec::new();\n source_indexing_contexts.resize_with(analyzed_flow.flow_instance.import_ops.len(), || {\n tokio::sync::OnceCell::new()\n });\n\n Ok(Self {\n setup_execution_context,\n setup_status,\n source_indexing_contexts,\n })\n }\n\n pub async fn update_setup_state(\n &mut self,\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result<()> {\n let (setup_execution_context, setup_status) =\n build_setup_context(analyzed_flow, existing_flow_ss).await?;\n\n self.setup_execution_context = setup_execution_context;\n self.setup_status = setup_status;\n Ok(())\n }\n\n pub async fn get_source_indexing_context(\n &self,\n flow: &Arc,\n source_idx: usize,\n pool: &PgPool,\n ) -> Result<&Arc> {\n self.source_indexing_contexts[source_idx]\n .get_or_try_init(|| async move {\n anyhow::Ok(Arc::new(\n SourceIndexingContext::load(\n flow.clone(),\n source_idx,\n self.setup_execution_context.clone(),\n pool,\n )\n .await?,\n ))\n })\n .await\n }\n}\n\npub struct FlowContext {\n pub flow: Arc,\n execution_ctx: Arc>,\n}\n\nimpl FlowContext {\n pub fn flow_name(&self) -> &str {\n &self.flow.flow_instance.name\n }\n\n pub async fn new(\n flow: Arc,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result {\n let execution_ctx = Arc::new(tokio::sync::RwLock::new(\n FlowExecutionContext::new(&flow, existing_flow_ss).await?,\n ));\n Ok(Self {\n flow,\n execution_ctx,\n })\n }\n\n pub async fn use_execution_ctx(\n &self,\n ) -> Result> {\n let execution_ctx = self.execution_ctx.read().await;\n if !execution_ctx.setup_status.is_up_to_date() {\n api_bail!(\n \"Setup for flow `{}` is not up-to-date. Please run `cocoindex setup` to update the setup.\",\n self.flow_name()\n );\n }\n Ok(execution_ctx)\n }\n\n pub async fn use_owned_execution_ctx(\n &self,\n ) -> Result> {\n let execution_ctx = self.execution_ctx.clone().read_owned().await;\n if !execution_ctx.setup_status.is_up_to_date() {\n api_bail!(\n \"Setup for flow `{}` is not up-to-date. Please run `cocoindex setup` to update the setup.\",\n self.flow_name()\n );\n }\n Ok(execution_ctx)\n }\n\n pub fn get_execution_ctx_for_setup(&self) -> &tokio::sync::RwLock {\n &self.execution_ctx\n }\n}\n\nstatic TOKIO_RUNTIME: LazyLock = LazyLock::new(|| Runtime::new().unwrap());\nstatic AUTH_REGISTRY: LazyLock> = LazyLock::new(|| Arc::new(AuthRegistry::new()));\n\ntype PoolKey = (String, Option);\ntype PoolValue = Arc>;\n\n#[derive(Default)]\npub struct DbPools {\n pub pools: Mutex>,\n}\n\nimpl DbPools {\n pub async fn get_pool(&self, conn_spec: &settings::DatabaseConnectionSpec) -> Result {\n let db_pool_cell = {\n let key = (conn_spec.url.clone(), conn_spec.user.clone());\n let mut db_pools = self.pools.lock().unwrap();\n db_pools.entry(key).or_default().clone()\n };\n let pool = db_pool_cell\n .get_or_try_init(|| async move {\n let mut pg_options: PgConnectOptions = conn_spec.url.parse()?;\n if let Some(user) = &conn_spec.user {\n pg_options = pg_options.username(user);\n }\n if let Some(password) = &conn_spec.password {\n pg_options = pg_options.password(password);\n }\n let pool = PgPool::connect_with(pg_options)\n .await\n .context(\"Failed to connect to database\")?;\n anyhow::Ok(pool)\n })\n .await?;\n Ok(pool.clone())\n }\n}\n\npub struct LibSetupContext {\n pub all_setup_states: setup::AllSetupStates,\n pub global_setup_status: setup::GlobalSetupStatus,\n}\npub struct PersistenceContext {\n pub builtin_db_pool: PgPool,\n pub setup_ctx: tokio::sync::RwLock,\n}\n\npub struct LibContext {\n pub db_pools: DbPools,\n pub persistence_ctx: Option,\n pub flows: Mutex>>,\n\n pub global_concurrency_controller: Arc,\n}\n\nimpl LibContext {\n pub fn get_flow_context(&self, flow_name: &str) -> Result> {\n let flows = self.flows.lock().unwrap();\n let flow_ctx = flows\n .get(flow_name)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"Flow instance not found: {flow_name}\"),\n StatusCode::NOT_FOUND,\n )\n })?\n .clone();\n Ok(flow_ctx)\n }\n\n pub fn remove_flow_context(&self, flow_name: &str) {\n let mut flows = self.flows.lock().unwrap();\n flows.remove(flow_name);\n }\n\n pub fn require_persistence_ctx(&self) -> Result<&PersistenceContext> {\n self.persistence_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Database is required for this operation. Please set COCOINDEX_DATABASE_URL environment variable and call cocoindex.init() with database settings.\"))\n }\n\n pub fn require_builtin_db_pool(&self) -> Result<&PgPool> {\n Ok(&self.require_persistence_ctx()?.builtin_db_pool)\n }\n}\n\npub fn get_runtime() -> &'static Runtime {\n &TOKIO_RUNTIME\n}\n\npub fn get_auth_registry() -> &'static Arc {\n &AUTH_REGISTRY\n}\n\nstatic LIB_INIT: OnceLock<()> = OnceLock::new();\npub fn create_lib_context(settings: settings::Settings) -> Result {\n LIB_INIT.get_or_init(|| {\n let _ = env_logger::try_init();\n\n pyo3_async_runtimes::tokio::init_with_runtime(get_runtime()).unwrap();\n\n let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();\n });\n\n let db_pools = DbPools::default();\n let persistence_ctx = if let Some(database_spec) = &settings.database {\n let (pool, all_setup_states) = get_runtime().block_on(async {\n let pool = db_pools.get_pool(database_spec).await?;\n let existing_ss = setup::get_existing_setup_state(&pool).await?;\n anyhow::Ok((pool, existing_ss))\n })?;\n Some(PersistenceContext {\n builtin_db_pool: pool,\n setup_ctx: tokio::sync::RwLock::new(LibSetupContext {\n global_setup_status: setup::GlobalSetupStatus::from_setup_states(&all_setup_states),\n all_setup_states,\n }),\n })\n } else {\n // No database configured\n None\n };\n\n Ok(LibContext {\n db_pools,\n persistence_ctx,\n flows: Mutex::new(BTreeMap::new()),\n global_concurrency_controller: Arc::new(concur_control::ConcurrencyController::new(\n &concur_control::Options {\n max_inflight_rows: settings.global_execution_options.source_max_inflight_rows,\n max_inflight_bytes: settings.global_execution_options.source_max_inflight_bytes,\n },\n )),\n })\n}\n\npub static LIB_CONTEXT: RwLock>> = RwLock::new(None);\n\npub(crate) fn init_lib_context(settings: settings::Settings) -> Result<()> {\n let mut lib_context_locked = LIB_CONTEXT.write().unwrap();\n *lib_context_locked = Some(Arc::new(create_lib_context(settings)?));\n Ok(())\n}\n\npub(crate) fn get_lib_context() -> Result> {\n let lib_context_locked = LIB_CONTEXT.read().unwrap();\n lib_context_locked\n .as_ref()\n .cloned()\n .ok_or_else(|| anyhow!(\"CocoIndex library is not initialized or already stopped\"))\n}\n\npub(crate) fn clear_lib_context() {\n let mut lib_context_locked = LIB_CONTEXT.write().unwrap();\n *lib_context_locked = None;\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_db_pools_default() {\n let db_pools = DbPools::default();\n assert!(db_pools.pools.lock().unwrap().is_empty());\n }\n\n #[test]\n fn test_lib_context_without_database() {\n let lib_context = create_lib_context(settings::Settings::default()).unwrap();\n assert!(lib_context.persistence_ctx.is_none());\n assert!(lib_context.require_builtin_db_pool().is_err());\n }\n\n #[test]\n fn test_persistence_context_type_safety() {\n // This test ensures that PersistenceContext groups related fields together\n let settings = settings::Settings {\n database: Some(settings::DatabaseConnectionSpec {\n url: \"postgresql://test\".to_string(),\n user: None,\n password: None,\n }),\n ..Default::default()\n };\n\n // This would fail at runtime due to invalid connection, but we're testing the structure\n let result = create_lib_context(settings);\n // We expect this to fail due to invalid connection, but the structure should be correct\n assert!(result.is_err());\n }\n}\n"], ["/cocoindex/src/setup/components.rs", "use super::{CombinedState, ResourceSetupStatus, SetupChangeType, StateChange};\nuse crate::prelude::*;\nuse std::fmt::Debug;\n\npub trait State: Debug + Send + Sync {\n fn key(&self) -> Key;\n}\n\n#[async_trait]\npub trait SetupOperator: 'static + Send + Sync {\n type Key: Debug + Hash + Eq + Clone + Send + Sync;\n type State: State;\n type SetupState: Send + Sync + IntoIterator;\n type Context: Sync;\n\n fn describe_key(&self, key: &Self::Key) -> String;\n\n fn describe_state(&self, state: &Self::State) -> String;\n\n fn is_up_to_date(&self, current: &Self::State, desired: &Self::State) -> bool;\n\n async fn create(&self, state: &Self::State, context: &Self::Context) -> Result<()>;\n\n async fn delete(&self, key: &Self::Key, context: &Self::Context) -> Result<()>;\n\n async fn update(&self, state: &Self::State, context: &Self::Context) -> Result<()> {\n self.delete(&state.key(), context).await?;\n self.create(state, context).await\n }\n}\n\n#[derive(Debug)]\nstruct CompositeStateUpsert {\n state: S,\n already_exists: bool,\n}\n\n#[derive(Derivative)]\n#[derivative(Debug)]\npub struct SetupStatus {\n #[derivative(Debug = \"ignore\")]\n desc: D,\n keys_to_delete: IndexSet,\n states_to_upsert: Vec>,\n}\n\nimpl SetupStatus {\n pub fn create(\n desc: D,\n desired: Option,\n existing: CombinedState,\n ) -> Result {\n let existing_component_states = CombinedState {\n current: existing.current.map(|s| {\n s.into_iter()\n .map(|s| (s.key(), s))\n .collect::>()\n }),\n staging: existing\n .staging\n .into_iter()\n .map(|s| match s {\n StateChange::Delete => StateChange::Delete,\n StateChange::Upsert(s) => {\n StateChange::Upsert(s.into_iter().map(|s| (s.key(), s)).collect())\n }\n })\n .collect(),\n legacy_state_key: existing.legacy_state_key,\n };\n let mut keys_to_delete = IndexSet::new();\n let mut states_to_upsert = vec![];\n\n // Collect all existing component keys\n for c in existing_component_states.possible_versions() {\n keys_to_delete.extend(c.keys().cloned());\n }\n\n if let Some(desired_state) = desired {\n for desired_comp_state in desired_state {\n let key = desired_comp_state.key();\n\n // Remove keys that should be kept from deletion list\n keys_to_delete.shift_remove(&key);\n\n // Add components that need to be updated\n let is_up_to_date = existing_component_states.always_exists()\n && existing_component_states.possible_versions().all(|v| {\n v.get(&key)\n .is_some_and(|s| desc.is_up_to_date(s, &desired_comp_state))\n });\n if !is_up_to_date {\n let already_exists = existing_component_states\n .possible_versions()\n .any(|v| v.contains_key(&key));\n states_to_upsert.push(CompositeStateUpsert {\n state: desired_comp_state,\n already_exists,\n });\n }\n }\n }\n\n Ok(Self {\n desc,\n keys_to_delete,\n states_to_upsert,\n })\n }\n}\n\nimpl ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n\n for key in &self.keys_to_delete {\n result.push(setup::ChangeDescription::Action(format!(\n \"Delete {}\",\n self.desc.describe_key(key)\n )));\n }\n\n for state in &self.states_to_upsert {\n result.push(setup::ChangeDescription::Action(format!(\n \"{} {}\",\n if state.already_exists {\n \"Update\"\n } else {\n \"Create\"\n },\n self.desc.describe_state(&state.state)\n )));\n }\n\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n if self.keys_to_delete.is_empty() && self.states_to_upsert.is_empty() {\n SetupChangeType::NoChange\n } else if self.keys_to_delete.is_empty() {\n SetupChangeType::Create\n } else if self.states_to_upsert.is_empty() {\n SetupChangeType::Delete\n } else {\n SetupChangeType::Update\n }\n }\n}\n\npub async fn apply_component_changes(\n changes: Vec<&SetupStatus>,\n context: &D::Context,\n) -> Result<()> {\n // First delete components that need to be removed\n for change in changes.iter() {\n for key in &change.keys_to_delete {\n change.desc.delete(key, context).await?;\n }\n }\n\n // Then upsert components that need to be updated\n for change in changes.iter() {\n for state in &change.states_to_upsert {\n if state.already_exists {\n change.desc.update(&state.state, context).await?;\n } else {\n change.desc.create(&state.state, context).await?;\n }\n }\n }\n\n Ok(())\n}\n\nimpl ResourceSetupStatus for (A, B) {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n result.extend(self.0.describe_changes());\n result.extend(self.1.describe_changes());\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n match (self.0.change_type(), self.1.change_type()) {\n (SetupChangeType::Invalid, _) | (_, SetupChangeType::Invalid) => {\n SetupChangeType::Invalid\n }\n (SetupChangeType::NoChange, b) => b,\n (a, _) => a,\n }\n }\n}\n"], ["/cocoindex/src/llm/openai.rs", "use crate::api_bail;\n\nuse super::{LlmEmbeddingClient, LlmGenerationClient, detect_image_mime_type};\nuse anyhow::Result;\nuse async_openai::{\n Client as OpenAIClient,\n config::OpenAIConfig,\n types::{\n ChatCompletionRequestMessage, ChatCompletionRequestMessageContentPartImage,\n ChatCompletionRequestMessageContentPartText, ChatCompletionRequestSystemMessage,\n ChatCompletionRequestSystemMessageContent, ChatCompletionRequestUserMessage,\n ChatCompletionRequestUserMessageContent, ChatCompletionRequestUserMessageContentPart,\n CreateChatCompletionRequest, CreateEmbeddingRequest, EmbeddingInput, ImageDetail,\n ResponseFormat, ResponseFormatJsonSchema,\n },\n};\nuse async_trait::async_trait;\nuse base64::prelude::*;\nuse phf::phf_map;\n\nstatic DEFAULT_EMBEDDING_DIMENSIONS: phf::Map<&str, u32> = phf_map! {\n \"text-embedding-3-small\" => 1536,\n \"text-embedding-3-large\" => 3072,\n \"text-embedding-ada-002\" => 1536,\n};\n\npub struct Client {\n client: async_openai::Client,\n}\n\nimpl Client {\n pub(crate) fn from_parts(client: async_openai::Client) -> Self {\n Self { client }\n }\n\n pub fn new(address: Option) -> Result {\n if let Some(address) = address {\n api_bail!(\"OpenAI doesn't support custom API address: {address}\");\n }\n // Verify API key is set\n if std::env::var(\"OPENAI_API_KEY\").is_err() {\n api_bail!(\"OPENAI_API_KEY environment variable must be set\");\n }\n Ok(Self {\n // OpenAI client will use OPENAI_API_KEY env variable by default\n client: OpenAIClient::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n let mut messages = Vec::new();\n\n // Add system prompt if provided\n if let Some(system) = request.system_prompt {\n messages.push(ChatCompletionRequestMessage::System(\n ChatCompletionRequestSystemMessage {\n content: ChatCompletionRequestSystemMessageContent::Text(system.into_owned()),\n ..Default::default()\n },\n ));\n }\n\n // Add user message\n let user_message_content = match request.image {\n Some(img_bytes) => {\n let base64_image = BASE64_STANDARD.encode(img_bytes.as_ref());\n let mime_type = detect_image_mime_type(img_bytes.as_ref())?;\n let image_url = format!(\"data:{mime_type};base64,{base64_image}\");\n ChatCompletionRequestUserMessageContent::Array(vec![\n ChatCompletionRequestUserMessageContentPart::Text(\n ChatCompletionRequestMessageContentPartText {\n text: request.user_prompt.into_owned(),\n },\n ),\n ChatCompletionRequestUserMessageContentPart::ImageUrl(\n ChatCompletionRequestMessageContentPartImage {\n image_url: async_openai::types::ImageUrl {\n url: image_url,\n detail: Some(ImageDetail::Auto),\n },\n },\n ),\n ])\n }\n None => ChatCompletionRequestUserMessageContent::Text(request.user_prompt.into_owned()),\n };\n messages.push(ChatCompletionRequestMessage::User(\n ChatCompletionRequestUserMessage {\n content: user_message_content,\n ..Default::default()\n },\n ));\n\n // Create the chat completion request\n let request = CreateChatCompletionRequest {\n model: request.model.to_string(),\n messages,\n response_format: match request.output_format {\n Some(super::OutputFormat::JsonSchema { name, schema }) => {\n Some(ResponseFormat::JsonSchema {\n json_schema: ResponseFormatJsonSchema {\n name: name.into_owned(),\n description: None,\n schema: Some(serde_json::to_value(&schema)?),\n strict: Some(true),\n },\n })\n }\n None => None,\n },\n ..Default::default()\n };\n\n // Send request and get response\n let response = self.client.chat().create(request).await?;\n\n // Extract the response text from the first choice\n let text = response\n .choices\n .into_iter()\n .next()\n .and_then(|choice| choice.message.content)\n .ok_or_else(|| anyhow::anyhow!(\"No response from OpenAI\"))?;\n\n Ok(super::LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> super::ToJsonSchemaOptions {\n super::ToJsonSchemaOptions {\n fields_always_required: true,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let response = self\n .client\n .embeddings()\n .create(CreateEmbeddingRequest {\n model: request.model.to_string(),\n input: EmbeddingInput::String(request.text.to_string()),\n dimensions: request.output_dimension,\n ..Default::default()\n })\n .await?;\n Ok(super::LlmEmbeddingResponse {\n embedding: response\n .data\n .into_iter()\n .next()\n .ok_or_else(|| anyhow::anyhow!(\"No embedding returned from OpenAI\"))?\n .embedding,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n DEFAULT_EMBEDDING_DIMENSIONS.get(model).copied()\n }\n}\n"], ["/cocoindex/src/base/schema.rs", "use crate::prelude::*;\n\nuse super::spec::*;\nuse crate::builder::plan::AnalyzedValueMapping;\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct VectorTypeSchema {\n pub element_type: Box,\n pub dimension: Option,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct UnionTypeSchema {\n pub types: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(tag = \"kind\")]\npub enum BasicValueType {\n /// A sequence of bytes in binary.\n Bytes,\n\n /// String encoded in UTF-8.\n Str,\n\n /// A boolean value.\n Bool,\n\n /// 64-bit integer.\n Int64,\n\n /// 32-bit floating point number.\n Float32,\n\n /// 64-bit floating point number.\n Float64,\n\n /// A range, with a start offset and a length.\n Range,\n\n /// A UUID.\n Uuid,\n\n /// Date (without time within the current day).\n Date,\n\n /// Time of the day.\n Time,\n\n /// Local date and time, without timezone.\n LocalDateTime,\n\n /// Date and time with timezone.\n OffsetDateTime,\n\n /// A time duration.\n TimeDelta,\n\n /// A JSON value.\n Json,\n\n /// A vector of values (usually numbers, for embeddings).\n Vector(VectorTypeSchema),\n\n /// A union\n Union(UnionTypeSchema),\n}\n\nimpl std::fmt::Display for BasicValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n BasicValueType::Bytes => write!(f, \"Bytes\"),\n BasicValueType::Str => write!(f, \"Str\"),\n BasicValueType::Bool => write!(f, \"Bool\"),\n BasicValueType::Int64 => write!(f, \"Int64\"),\n BasicValueType::Float32 => write!(f, \"Float32\"),\n BasicValueType::Float64 => write!(f, \"Float64\"),\n BasicValueType::Range => write!(f, \"Range\"),\n BasicValueType::Uuid => write!(f, \"Uuid\"),\n BasicValueType::Date => write!(f, \"Date\"),\n BasicValueType::Time => write!(f, \"Time\"),\n BasicValueType::LocalDateTime => write!(f, \"LocalDateTime\"),\n BasicValueType::OffsetDateTime => write!(f, \"OffsetDateTime\"),\n BasicValueType::TimeDelta => write!(f, \"TimeDelta\"),\n BasicValueType::Json => write!(f, \"Json\"),\n BasicValueType::Vector(s) => {\n write!(f, \"Vector[{}\", s.element_type)?;\n if let Some(dimension) = s.dimension {\n write!(f, \", {dimension}\")?;\n }\n write!(f, \"]\")\n }\n BasicValueType::Union(s) => {\n write!(f, \"Union[\")?;\n for (i, typ) in s.types.iter().enumerate() {\n if i > 0 {\n // Add type delimiter\n write!(f, \" | \")?;\n }\n write!(f, \"{typ}\")?;\n }\n write!(f, \"]\")\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]\npub struct StructSchema {\n pub fields: Arc>,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub description: Option>,\n}\n\nimpl StructSchema {\n pub fn without_attrs(&self) -> Self {\n Self {\n fields: Arc::new(self.fields.iter().map(|f| f.without_attrs()).collect()),\n description: None,\n }\n }\n}\n\nimpl std::fmt::Display for StructSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Struct(\")?;\n for (i, field) in self.fields.iter().enumerate() {\n if i > 0 {\n write!(f, \", \")?;\n }\n write!(f, \"{field}\")?;\n }\n write!(f, \")\")\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\n#[allow(clippy::enum_variant_names)]\npub enum TableKind {\n /// An table with unordered rows, without key.\n UTable,\n /// A table's first field is the key.\n #[serde(alias = \"Table\")]\n KTable,\n /// A table whose rows orders are preserved.\n #[serde(alias = \"List\")]\n LTable,\n}\n\nimpl std::fmt::Display for TableKind {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n TableKind::UTable => write!(f, \"Table\"),\n TableKind::KTable => write!(f, \"KTable\"),\n TableKind::LTable => write!(f, \"LTable\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct TableSchema {\n pub kind: TableKind,\n pub row: StructSchema,\n}\n\nimpl TableSchema {\n pub fn has_key(&self) -> bool {\n match self.kind {\n TableKind::KTable => true,\n TableKind::UTable | TableKind::LTable => false,\n }\n }\n\n pub fn key_type(&self) -> Option<&EnrichedValueType> {\n match self.kind {\n TableKind::KTable => self\n .row\n .fields\n .first()\n .as_ref()\n .map(|field| &field.value_type),\n TableKind::UTable | TableKind::LTable => None,\n }\n }\n\n pub fn without_attrs(&self) -> Self {\n Self {\n kind: self.kind,\n row: self.row.without_attrs(),\n }\n }\n}\n\nimpl std::fmt::Display for TableSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}({})\", self.kind, self.row)\n }\n}\n\nimpl TableSchema {\n pub fn new(kind: TableKind, row: StructSchema) -> Self {\n Self { kind, row }\n }\n\n pub fn key_field(&self) -> Option<&FieldSchema> {\n match self.kind {\n TableKind::KTable => Some(self.row.fields.first().unwrap()),\n TableKind::UTable | TableKind::LTable => None,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(tag = \"kind\")]\npub enum ValueType {\n Struct(StructSchema),\n\n #[serde(untagged)]\n Basic(BasicValueType),\n\n #[serde(untagged)]\n Table(TableSchema),\n}\n\nimpl ValueType {\n pub fn key_type(&self) -> Option<&EnrichedValueType> {\n match self {\n ValueType::Basic(_) => None,\n ValueType::Struct(_) => None,\n ValueType::Table(c) => c.key_type(),\n }\n }\n\n // Type equality, ignoring attributes.\n pub fn without_attrs(&self) -> Self {\n match self {\n ValueType::Basic(a) => ValueType::Basic(a.clone()),\n ValueType::Struct(a) => ValueType::Struct(a.without_attrs()),\n ValueType::Table(a) => ValueType::Table(a.without_attrs()),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct EnrichedValueType {\n #[serde(rename = \"type\")]\n pub typ: DataType,\n\n #[serde(default, skip_serializing_if = \"std::ops::Not::not\")]\n pub nullable: bool,\n\n #[serde(default, skip_serializing_if = \"BTreeMap::is_empty\")]\n pub attrs: Arc>,\n}\n\nimpl EnrichedValueType {\n pub fn without_attrs(&self) -> Self {\n Self {\n typ: self.typ.without_attrs(),\n nullable: self.nullable,\n attrs: Default::default(),\n }\n }\n}\n\nimpl EnrichedValueType {\n pub fn from_alternative(\n value_type: &EnrichedValueType,\n ) -> Result\n where\n for<'a> &'a AltDataType: TryInto,\n {\n Ok(Self {\n typ: (&value_type.typ).try_into()?,\n nullable: value_type.nullable,\n attrs: value_type.attrs.clone(),\n })\n }\n\n pub fn with_attr(mut self, key: &str, value: serde_json::Value) -> Self {\n Arc::make_mut(&mut self.attrs).insert(key.to_string(), value);\n self\n }\n}\n\nimpl std::fmt::Display for EnrichedValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.typ)?;\n if self.nullable {\n write!(f, \"?\")?;\n }\n if !self.attrs.is_empty() {\n write!(\n f,\n \" [{}]\",\n self.attrs\n .iter()\n .map(|(k, v)| format!(\"{k}: {v}\"))\n .collect::>()\n .join(\", \")\n )?;\n }\n Ok(())\n }\n}\n\nimpl std::fmt::Display for ValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ValueType::Basic(b) => write!(f, \"{b}\"),\n ValueType::Struct(s) => write!(f, \"{s}\"),\n ValueType::Table(c) => write!(f, \"{c}\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct FieldSchema {\n /// ID is used to identify the field in the schema.\n pub name: FieldName,\n\n #[serde(flatten)]\n pub value_type: EnrichedValueType,\n}\n\nimpl FieldSchema {\n pub fn new(name: impl ToString, value_type: EnrichedValueType) -> Self {\n Self {\n name: name.to_string(),\n value_type,\n }\n }\n\n pub fn without_attrs(&self) -> Self {\n Self {\n name: self.name.clone(),\n value_type: self.value_type.without_attrs(),\n }\n }\n}\n\nimpl FieldSchema {\n pub fn from_alternative(field: &FieldSchema) -> Result\n where\n for<'a> &'a AltDataType: TryInto,\n {\n Ok(Self {\n name: field.name.clone(),\n value_type: EnrichedValueType::from_alternative(&field.value_type)?,\n })\n }\n}\n\nimpl std::fmt::Display for FieldSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.name, self.value_type)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct CollectorSchema {\n pub fields: Vec,\n /// If specified, the collector will have an automatically generated UUID field with the given index.\n pub auto_uuid_field_idx: Option,\n}\n\nimpl std::fmt::Display for CollectorSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Collector(\")?;\n for (i, field) in self.fields.iter().enumerate() {\n if i > 0 {\n write!(f, \", \")?;\n }\n write!(f, \"{field}\")?;\n }\n write!(f, \")\")\n }\n}\n\nimpl CollectorSchema {\n pub fn from_fields(fields: Vec, auto_uuid_field: Option) -> Self {\n let mut fields = fields;\n let auto_uuid_field_idx = if let Some(auto_uuid_field) = auto_uuid_field {\n fields.insert(\n 0,\n FieldSchema::new(\n auto_uuid_field,\n EnrichedValueType {\n typ: ValueType::Basic(BasicValueType::Uuid),\n nullable: false,\n attrs: Default::default(),\n },\n ),\n );\n Some(0)\n } else {\n None\n };\n Self {\n fields,\n auto_uuid_field_idx,\n }\n }\n pub fn without_attrs(&self) -> Self {\n Self {\n fields: self.fields.iter().map(|f| f.without_attrs()).collect(),\n auto_uuid_field_idx: self.auto_uuid_field_idx,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct OpScopeSchema {\n /// Output schema for ops with output.\n pub op_output_types: HashMap,\n\n /// Child op scope for foreach ops.\n pub op_scopes: HashMap>,\n\n /// Collectors for the current scope.\n pub collectors: Vec>>,\n}\n\n/// Top-level schema for a flow instance.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FlowSchema {\n pub schema: StructSchema,\n\n pub root_op_scope: OpScopeSchema,\n}\n\nimpl std::ops::Deref for FlowSchema {\n type Target = StructSchema;\n\n fn deref(&self) -> &Self::Target {\n &self.schema\n }\n}\n\npub struct OpArgSchema {\n pub name: OpArgName,\n pub value_type: EnrichedValueType,\n pub analyzed_value: AnalyzedValueMapping,\n}\n"], ["/cocoindex/src/service/flows.rs", "use crate::prelude::*;\n\nuse crate::execution::{evaluator, indexing_status, memoization, row_indexer, stats};\nuse crate::lib_context::LibContext;\nuse crate::{base::schema::FlowSchema, ops::interface::SourceExecutorListOptions};\nuse axum::{\n Json,\n extract::{Path, State},\n http::StatusCode,\n};\nuse axum_extra::extract::Query;\n\npub async fn list_flows(\n State(lib_context): State>,\n) -> Result>, ApiError> {\n Ok(Json(\n lib_context.flows.lock().unwrap().keys().cloned().collect(),\n ))\n}\n\npub async fn get_flow_schema(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n Ok(Json(flow_ctx.flow.data_schema.clone()))\n}\n\n#[derive(Serialize)]\npub struct GetFlowResponse {\n flow_spec: spec::FlowInstanceSpec,\n data_schema: FlowSchema,\n fingerprint: utils::fingerprint::Fingerprint,\n}\n\npub async fn get_flow(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let flow_spec = flow_ctx.flow.flow_instance.clone();\n let data_schema = flow_ctx.flow.data_schema.clone();\n let fingerprint = utils::fingerprint::Fingerprinter::default()\n .with(&flow_spec)\n .map_err(|e| api_error!(\"failed to fingerprint flow spec: {e}\"))?\n .with(&data_schema)\n .map_err(|e| api_error!(\"failed to fingerprint data schema: {e}\"))?\n .into_fingerprint();\n Ok(Json(GetFlowResponse {\n flow_spec,\n data_schema,\n fingerprint,\n }))\n}\n\n#[derive(Deserialize)]\npub struct GetKeysParam {\n field: String,\n}\n\n#[derive(Serialize)]\npub struct GetKeysResponse {\n key_type: schema::EnrichedValueType,\n keys: Vec,\n}\n\npub async fn get_keys(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let schema = &flow_ctx.flow.data_schema;\n\n let field_idx = schema\n .fields\n .iter()\n .position(|f| f.name == query.field)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field not found: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n let key_type = schema.fields[field_idx]\n .value_type\n .typ\n .key_type()\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field has no key: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n\n let execution_plan = flow_ctx.flow.get_execution_plan().await?;\n let import_op = execution_plan\n .import_ops\n .iter()\n .find(|op| op.output.field_idx == field_idx as u32)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field is not a source: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n\n let mut rows_stream = import_op.executor.list(&SourceExecutorListOptions {\n include_ordinal: false,\n });\n let mut keys = Vec::new();\n while let Some(rows) = rows_stream.next().await {\n keys.extend(rows?.into_iter().map(|row| row.key));\n }\n Ok(Json(GetKeysResponse {\n key_type: key_type.clone(),\n keys,\n }))\n}\n\n#[derive(Deserialize)]\npub struct SourceRowKeyParams {\n field: String,\n key: Vec,\n}\n\n#[derive(Serialize)]\npub struct EvaluateDataResponse {\n schema: FlowSchema,\n data: value::ScopeValue,\n}\n\nstruct SourceRowKeyContextHolder<'a> {\n plan: Arc,\n import_op_idx: usize,\n schema: &'a FlowSchema,\n key: value::KeyValue,\n}\n\nimpl<'a> SourceRowKeyContextHolder<'a> {\n async fn create(flow_ctx: &'a FlowContext, source_row_key: SourceRowKeyParams) -> Result {\n let schema = &flow_ctx.flow.data_schema;\n let import_op_idx = flow_ctx\n .flow\n .flow_instance\n .import_ops\n .iter()\n .position(|op| op.name == source_row_key.field)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"source field not found: {}\", source_row_key.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n let plan = flow_ctx.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[import_op_idx];\n let field_schema = &schema.fields[import_op.output.field_idx as usize];\n let table_schema = match &field_schema.value_type.typ {\n schema::ValueType::Table(table) => table,\n _ => api_bail!(\"field is not a table: {}\", source_row_key.field),\n };\n let key_field = table_schema\n .key_field()\n .ok_or_else(|| api_error!(\"field {} does not have a key\", source_row_key.field))?;\n let key = value::KeyValue::from_strs(source_row_key.key, &key_field.value_type.typ)?;\n Ok(Self {\n plan,\n import_op_idx,\n schema,\n key,\n })\n }\n\n fn as_context<'b>(&'b self) -> evaluator::SourceRowEvaluationContext<'b> {\n evaluator::SourceRowEvaluationContext {\n plan: &self.plan,\n import_op: &self.plan.import_ops[self.import_op_idx],\n schema: self.schema,\n key: &self.key,\n import_op_idx: self.import_op_idx,\n }\n }\n}\n\npub async fn evaluate_data(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let source_row_key_ctx = SourceRowKeyContextHolder::create(&flow_ctx, query).await?;\n let execution_ctx = flow_ctx.use_execution_ctx().await?;\n let evaluate_output = row_indexer::evaluate_source_entry_with_memory(\n &source_row_key_ctx.as_context(),\n &execution_ctx.setup_execution_context,\n memoization::EvaluationMemoryOptions {\n enable_cache: true,\n evaluation_only: true,\n },\n lib_context.require_builtin_db_pool()?,\n )\n .await?\n .ok_or_else(|| {\n api_error!(\n \"value not found for source at the specified key: {key:?}\",\n key = source_row_key_ctx.key\n )\n })?;\n\n Ok(Json(EvaluateDataResponse {\n schema: flow_ctx.flow.data_schema.clone(),\n data: evaluate_output.data_scope.into(),\n }))\n}\n\npub async fn update(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let live_updater = execution::FlowLiveUpdater::start(\n flow_ctx.clone(),\n lib_context.require_builtin_db_pool()?,\n execution::FlowLiveUpdaterOptions {\n live_mode: false,\n ..Default::default()\n },\n )\n .await?;\n live_updater.wait().await?;\n Ok(Json(live_updater.index_update_info()))\n}\n\npub async fn get_row_indexing_status(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let source_row_key_ctx = SourceRowKeyContextHolder::create(&flow_ctx, query).await?;\n\n let execution_ctx = flow_ctx.use_execution_ctx().await?;\n let indexing_status = indexing_status::get_source_row_indexing_status(\n &source_row_key_ctx.as_context(),\n &execution_ctx.setup_execution_context,\n lib_context.require_builtin_db_pool()?,\n )\n .await?;\n Ok(Json(indexing_status))\n}\n"], ["/cocoindex/src/llm/ollama.rs", "use crate::prelude::*;\n\nuse super::{LlmEmbeddingClient, LlmGenerationClient};\nuse schemars::schema::SchemaObject;\nuse serde_with::{base64::Base64, serde_as};\n\nfn get_embedding_dimension(model: &str) -> Option {\n match model.to_ascii_lowercase().as_str() {\n \"mxbai-embed-large\"\n | \"bge-m3\"\n | \"bge-large\"\n | \"snowflake-arctic-embed\"\n | \"snowflake-arctic-embed2\" => Some(1024),\n\n \"nomic-embed-text\"\n | \"paraphrase-multilingual\"\n | \"snowflake-arctic-embed:110m\"\n | \"snowflake-arctic-embed:137m\"\n | \"granite-embedding:278m\" => Some(768),\n\n \"all-minilm\"\n | \"snowflake-arctic-embed:22m\"\n | \"snowflake-arctic-embed:33m\"\n | \"granite-embedding\" => Some(384),\n\n _ => None,\n }\n}\n\npub struct Client {\n generate_url: String,\n embed_url: String,\n reqwest_client: reqwest::Client,\n}\n\n#[derive(Debug, Serialize)]\nenum OllamaFormat<'a> {\n #[serde(untagged)]\n JsonSchema(&'a SchemaObject),\n}\n\n#[serde_as]\n#[derive(Debug, Serialize)]\nstruct OllamaRequest<'a> {\n pub model: &'a str,\n pub prompt: &'a str,\n #[serde_as(as = \"Option>\")]\n pub images: Option>,\n pub format: Option>,\n pub system: Option<&'a str>,\n pub stream: Option,\n}\n\n#[derive(Debug, Deserialize)]\nstruct OllamaResponse {\n pub response: String,\n}\n\n#[derive(Debug, Serialize)]\nstruct OllamaEmbeddingRequest<'a> {\n pub model: &'a str,\n pub input: &'a str,\n}\n\n#[derive(Debug, Deserialize)]\nstruct OllamaEmbeddingResponse {\n pub embedding: Vec,\n}\n\nconst OLLAMA_DEFAULT_ADDRESS: &str = \"http://localhost:11434\";\n\nimpl Client {\n pub async fn new(address: Option) -> Result {\n let address = match &address {\n Some(addr) => addr.trim_end_matches('/'),\n None => OLLAMA_DEFAULT_ADDRESS,\n };\n Ok(Self {\n generate_url: format!(\"{address}/api/generate\"),\n embed_url: format!(\"{address}/api/embed\"),\n reqwest_client: reqwest::Client::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n let req = OllamaRequest {\n model: request.model,\n prompt: request.user_prompt.as_ref(),\n images: request.image.as_deref().map(|img| vec![img]),\n format: request.output_format.as_ref().map(\n |super::OutputFormat::JsonSchema { schema, .. }| {\n OllamaFormat::JsonSchema(schema.as_ref())\n },\n ),\n system: request.system_prompt.as_ref().map(|s| s.as_ref()),\n stream: Some(false),\n };\n let res = retryable::run(\n || {\n self.reqwest_client\n .post(self.generate_url.as_str())\n .json(&req)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !res.status().is_success() {\n bail!(\n \"Ollama API error: {:?}\\n{}\\n\",\n res.status(),\n res.text().await?\n );\n }\n let json: OllamaResponse = res.json().await?;\n Ok(super::LlmGenerateResponse {\n text: json.response,\n })\n }\n\n fn json_schema_options(&self) -> super::ToJsonSchemaOptions {\n super::ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: true,\n extract_descriptions: true,\n top_level_must_be_object: false,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let req = OllamaEmbeddingRequest {\n model: request.model,\n input: request.text.as_ref(),\n };\n let resp = retryable::run(\n || {\n self.reqwest_client\n .post(self.embed_url.as_str())\n .json(&req)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Ollama API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let embedding_resp: OllamaEmbeddingResponse = resp.json().await.context(\"Invalid JSON\")?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding_resp.embedding,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n"], ["/cocoindex/src/setup/db_metadata.rs", "use crate::prelude::*;\n\nuse super::{ResourceSetupInfo, ResourceSetupStatus, SetupChangeType, StateChange};\nuse crate::utils::db::WriteAction;\nuse axum::http::StatusCode;\nuse sqlx::PgPool;\n\nconst SETUP_METADATA_TABLE_NAME: &str = \"cocoindex_setup_metadata\";\npub const FLOW_VERSION_RESOURCE_TYPE: &str = \"__FlowVersion\";\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SetupMetadataRecord {\n pub flow_name: String,\n // e.g. \"Flow\", \"SourceTracking\", \"Target:{TargetType}\"\n pub resource_type: String,\n pub key: serde_json::Value,\n pub state: Option,\n pub staging_changes: sqlx::types::Json>>,\n}\n\npub fn parse_flow_version(state: &Option) -> Option {\n match state {\n Some(serde_json::Value::Number(n)) => n.as_u64(),\n _ => None,\n }\n}\n\n/// Returns None if metadata table doesn't exist.\npub async fn read_setup_metadata(pool: &PgPool) -> Result>> {\n let mut db_conn = pool.acquire().await?;\n let query_str = format!(\n \"SELECT flow_name, resource_type, key, state, staging_changes FROM {SETUP_METADATA_TABLE_NAME}\",\n );\n let metadata = sqlx::query_as(&query_str).fetch_all(&mut *db_conn).await;\n let result = match metadata {\n Ok(metadata) => Some(metadata),\n Err(err) => {\n let exists: Option = sqlx::query_scalar(\n \"SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = $1)\",\n )\n .bind(SETUP_METADATA_TABLE_NAME)\n .fetch_one(&mut *db_conn)\n .await?;\n if !exists.unwrap_or(false) {\n None\n } else {\n return Err(err.into());\n }\n }\n };\n Ok(result)\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct ResourceTypeKey {\n pub resource_type: String,\n pub key: serde_json::Value,\n}\n\nimpl ResourceTypeKey {\n pub fn new(resource_type: String, key: serde_json::Value) -> Self {\n Self { resource_type, key }\n }\n}\n\nstatic VERSION_RESOURCE_TYPE_ID: LazyLock = LazyLock::new(|| ResourceTypeKey {\n resource_type: FLOW_VERSION_RESOURCE_TYPE.to_string(),\n key: serde_json::Value::Null,\n});\n\nasync fn read_metadata_records_for_flow(\n flow_name: &str,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT flow_name, resource_type, key, state, staging_changes FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1\",\n );\n let metadata: Vec = sqlx::query_as(&query_str)\n .bind(flow_name)\n .fetch_all(db_executor)\n .await?;\n let result = metadata\n .into_iter()\n .map(|m| {\n (\n ResourceTypeKey {\n resource_type: m.resource_type.clone(),\n key: m.key.clone(),\n },\n m,\n )\n })\n .collect();\n Ok(result)\n}\n\nasync fn read_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT state FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n );\n let state: Option = sqlx::query_scalar(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .fetch_optional(db_executor)\n .await?;\n Ok(state)\n}\n\nasync fn upsert_staging_changes(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n staging_changes: Vec>,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {SETUP_METADATA_TABLE_NAME} (flow_name, resource_type, key, staging_changes) VALUES ($1, $2, $3, $4)\",\n ),\n WriteAction::Update => format!(\n \"UPDATE {SETUP_METADATA_TABLE_NAME} SET staging_changes = $4 WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n ),\n };\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .bind(sqlx::types::Json(staging_changes))\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\nasync fn upsert_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n state: &serde_json::Value,\n action: WriteAction,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {SETUP_METADATA_TABLE_NAME} (flow_name, resource_type, key, state, staging_changes) VALUES ($1, $2, $3, $4, $5)\",\n ),\n WriteAction::Update => format!(\n \"UPDATE {SETUP_METADATA_TABLE_NAME} SET state = $4, staging_changes = $5 WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n ),\n };\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .bind(sqlx::types::Json(state))\n .bind(sqlx::types::Json(Vec::::new()))\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\nasync fn delete_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"DELETE FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n );\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\npub struct StateUpdateInfo {\n pub desired_state: Option,\n pub legacy_key: Option,\n}\n\nimpl StateUpdateInfo {\n pub fn new(\n desired_state: Option<&impl Serialize>,\n legacy_key: Option,\n ) -> Result {\n Ok(Self {\n desired_state: desired_state\n .as_ref()\n .map(serde_json::to_value)\n .transpose()?,\n legacy_key,\n })\n }\n}\n\npub async fn stage_changes_for_flow(\n flow_name: &str,\n seen_metadata_version: Option,\n resource_update_info: &HashMap,\n pool: &PgPool,\n) -> Result {\n let mut txn = pool.begin().await?;\n let mut existing_records = read_metadata_records_for_flow(flow_name, &mut *txn).await?;\n let latest_metadata_version = existing_records\n .get(&VERSION_RESOURCE_TYPE_ID)\n .and_then(|m| parse_flow_version(&m.state));\n if seen_metadata_version < latest_metadata_version {\n return Err(ApiError::new(\n \"seen newer version in the metadata table\",\n StatusCode::CONFLICT,\n ))?;\n }\n let new_metadata_version = seen_metadata_version.unwrap_or_default() + 1;\n upsert_state(\n flow_name,\n &VERSION_RESOURCE_TYPE_ID,\n &serde_json::Value::Number(new_metadata_version.into()),\n if latest_metadata_version.is_some() {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n &mut *txn,\n )\n .await?;\n\n for (type_id, update_info) in resource_update_info {\n let existing = existing_records.remove(type_id);\n let change = match &update_info.desired_state {\n Some(desired_state) => StateChange::Upsert(desired_state.clone()),\n None => StateChange::Delete,\n };\n let mut new_staging_changes = vec![];\n if let Some(legacy_key) = &update_info.legacy_key {\n if let Some(legacy_record) = existing_records.remove(legacy_key) {\n new_staging_changes.extend(legacy_record.staging_changes.0);\n delete_state(flow_name, legacy_key, &mut *txn).await?;\n }\n }\n let (action, existing_staging_changes) = match existing {\n Some(existing) => {\n let existing_staging_changes = existing.staging_changes.0;\n if existing_staging_changes.iter().all(|c| c != &change) {\n new_staging_changes.push(change);\n }\n (WriteAction::Update, existing_staging_changes)\n }\n None => {\n if update_info.desired_state.is_some() {\n new_staging_changes.push(change);\n }\n (WriteAction::Insert, vec![])\n }\n };\n if !new_staging_changes.is_empty() {\n upsert_staging_changes(\n flow_name,\n type_id,\n [existing_staging_changes, new_staging_changes].concat(),\n &mut *txn,\n action,\n )\n .await?;\n }\n }\n txn.commit().await?;\n Ok(new_metadata_version)\n}\n\npub async fn commit_changes_for_flow(\n flow_name: &str,\n curr_metadata_version: u64,\n state_updates: &HashMap,\n delete_version: bool,\n pool: &PgPool,\n) -> Result<()> {\n let mut txn = pool.begin().await?;\n let latest_metadata_version =\n parse_flow_version(&read_state(flow_name, &VERSION_RESOURCE_TYPE_ID, &mut *txn).await?);\n if latest_metadata_version != Some(curr_metadata_version) {\n return Err(ApiError::new(\n \"seen newer version in the metadata table\",\n StatusCode::CONFLICT,\n ))?;\n }\n for (type_id, update_info) in state_updates.iter() {\n match &update_info.desired_state {\n Some(desired_state) => {\n upsert_state(\n flow_name,\n type_id,\n desired_state,\n WriteAction::Update,\n &mut *txn,\n )\n .await?;\n }\n None => {\n delete_state(flow_name, type_id, &mut *txn).await?;\n }\n }\n }\n if delete_version {\n delete_state(flow_name, &VERSION_RESOURCE_TYPE_ID, &mut *txn).await?;\n }\n txn.commit().await?;\n Ok(())\n}\n\n#[derive(Debug)]\npub struct MetadataTableSetup {\n pub metadata_table_missing: bool,\n}\n\nimpl MetadataTableSetup {\n pub fn into_setup_info(self) -> ResourceSetupInfo<(), (), MetadataTableSetup> {\n ResourceSetupInfo {\n key: (),\n state: None,\n description: \"CocoIndex Metadata Table\".to_string(),\n setup_status: Some(self),\n legacy_key: None,\n }\n }\n}\n\nimpl ResourceSetupStatus for MetadataTableSetup {\n fn describe_changes(&self) -> Vec {\n if self.metadata_table_missing {\n vec![setup::ChangeDescription::Action(format!(\n \"Create the cocoindex metadata table {SETUP_METADATA_TABLE_NAME}\"\n ))]\n } else {\n vec![]\n }\n }\n\n fn change_type(&self) -> SetupChangeType {\n if self.metadata_table_missing {\n SetupChangeType::Create\n } else {\n SetupChangeType::NoChange\n }\n }\n}\n\nimpl MetadataTableSetup {\n pub async fn apply_change(&self) -> Result<()> {\n if !self.metadata_table_missing {\n return Ok(());\n }\n let lib_context = get_lib_context()?;\n let pool = lib_context.require_builtin_db_pool()?;\n let query_str = format!(\n \"CREATE TABLE IF NOT EXISTS {SETUP_METADATA_TABLE_NAME} (\n flow_name TEXT NOT NULL,\n resource_type TEXT NOT NULL,\n key JSONB NOT NULL,\n state JSONB,\n staging_changes JSONB NOT NULL,\n\n PRIMARY KEY (flow_name, resource_type, key)\n )\n \",\n );\n sqlx::query(&query_str).execute(pool).await?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/functions/parse_json.rs", "use crate::ops::sdk::*;\nuse anyhow::Result;\nuse std::collections::HashMap;\nuse std::sync::{Arc, LazyLock};\nuse unicase::UniCase;\n\npub struct Args {\n text: ResolvedOpArg,\n language: Option,\n}\n\ntype ParseFn = fn(&str) -> Result;\nstruct LanguageConfig {\n parse_fn: ParseFn,\n}\n\nfn add_language(\n output: &mut HashMap, Arc>,\n name: &'static str,\n aliases: impl IntoIterator,\n parse_fn: ParseFn,\n) {\n let lang_config = Arc::new(LanguageConfig { parse_fn });\n for name in std::iter::once(name).chain(aliases.into_iter()) {\n if output.insert(name.into(), lang_config.clone()).is_some() {\n panic!(\"Language `{name}` already exists\");\n }\n }\n}\n\nfn parse_json(text: &str) -> Result {\n Ok(serde_json::from_str(text)?)\n}\n\nstatic PARSE_FN_BY_LANG: LazyLock, Arc>> =\n LazyLock::new(|| {\n let mut map = HashMap::new();\n add_language(&mut map, \"json\", [\".json\"], parse_json);\n map\n });\n\nstruct Executor {\n args: Args,\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n async fn evaluate(&self, input: Vec) -> Result {\n let text = self.args.text.value(&input)?.as_str()?;\n let lang_config = {\n let language = self.args.language.value(&input)?;\n language\n .optional()\n .map(|v| anyhow::Ok(v.as_str()?.as_ref()))\n .transpose()?\n .and_then(|lang| PARSE_FN_BY_LANG.get(&UniCase::new(lang)))\n };\n let parse_fn = lang_config.map(|c| c.parse_fn).unwrap_or(parse_json);\n let parsed_value = parse_fn(text)?;\n Ok(value::Value::Basic(value::BasicValue::Json(Arc::new(\n parsed_value,\n ))))\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = EmptySpec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"ParseJson\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n _spec: &'a EmptySpec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n language: args_resolver\n .next_optional_arg(\"language\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n };\n\n let output_schema = make_output_type(BasicValueType::Json);\n Ok((args, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n _spec: EmptySpec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor { args }))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n use serde_json::json;\n\n #[tokio::test]\n async fn test_parse_json() {\n let spec = EmptySpec {};\n\n let factory = Arc::new(Factory);\n let json_string_content = r#\"{\"city\": \"Magdeburg\"}\"#;\n let lang_value: Value = \"json\".to_string().into();\n\n let input_args_values = vec![json_string_content.to_string().into(), lang_value.clone()];\n\n let input_arg_schemas = vec![\n build_arg_schema(\"text\", BasicValueType::Str),\n build_arg_schema(\"language\", BasicValueType::Str),\n ];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed: {:?}\",\n result.err()\n );\n let value = result.unwrap();\n\n match value {\n Value::Basic(BasicValue::Json(arc_json_value)) => {\n let expected_json = json!({\"city\": \"Magdeburg\"});\n assert_eq!(\n *arc_json_value, expected_json,\n \"Parsed JSON value mismatch with specified language\"\n );\n }\n _ => panic!(\"Expected Value::Basic(BasicValue::Json), got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/ops/targets/shared/table_columns.rs", "use crate::{\n ops::sdk::SetupStateCompatibility,\n prelude::*,\n setup::{CombinedState, SetupChangeType},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TableColumnsSchema {\n #[serde(with = \"indexmap::map::serde_seq\", alias = \"key_fields_schema\")]\n pub key_columns: IndexMap,\n\n #[serde(with = \"indexmap::map::serde_seq\", alias = \"value_fields_schema\")]\n pub value_columns: IndexMap,\n}\n\n#[derive(Debug)]\npub enum TableUpsertionAction {\n Create {\n keys: IndexMap,\n values: IndexMap,\n },\n Update {\n columns_to_delete: IndexSet,\n columns_to_upsert: IndexMap,\n },\n}\n\nimpl TableUpsertionAction {\n pub fn is_empty(&self) -> bool {\n match self {\n Self::Create { .. } => false,\n Self::Update {\n columns_to_delete,\n columns_to_upsert,\n } => columns_to_delete.is_empty() && columns_to_upsert.is_empty(),\n }\n }\n}\n\n#[derive(Debug)]\npub struct TableMainSetupAction {\n pub drop_existing: bool,\n pub table_upsertion: Option>,\n}\n\nimpl TableMainSetupAction {\n pub fn from_states(\n desired_state: Option<&S>,\n existing: &CombinedState,\n existing_invalidated: bool,\n ) -> Self\n where\n for<'a> &'a S: Into>>,\n T: Clone,\n {\n let existing_may_exists = existing.possible_versions().next().is_some();\n let possible_existing_cols: Vec>> = existing\n .possible_versions()\n .map(Into::>>::into)\n .collect();\n let Some(desired_state) = desired_state else {\n return Self {\n drop_existing: existing_may_exists,\n table_upsertion: None,\n };\n };\n\n let desired_cols: Cow<'_, TableColumnsSchema> = desired_state.into();\n let drop_existing = existing_invalidated\n || possible_existing_cols\n .iter()\n .any(|v| v.key_columns != desired_cols.key_columns)\n || (existing_may_exists && !existing.always_exists());\n\n let table_upsertion = if existing.always_exists() && !drop_existing {\n TableUpsertionAction::Update {\n columns_to_delete: possible_existing_cols\n .iter()\n .flat_map(|v| v.value_columns.keys())\n .filter(|column_name| !desired_cols.value_columns.contains_key(*column_name))\n .cloned()\n .collect(),\n columns_to_upsert: desired_cols\n .value_columns\n .iter()\n .filter(|(column_name, schema)| {\n !possible_existing_cols\n .iter()\n .all(|v| v.value_columns.get(*column_name) == Some(schema))\n })\n .map(|(k, v)| (k.to_owned(), v.to_owned()))\n .collect(),\n }\n } else {\n TableUpsertionAction::Create {\n keys: desired_cols.key_columns.to_owned(),\n values: desired_cols.value_columns.to_owned(),\n }\n };\n\n Self {\n drop_existing,\n table_upsertion: Some(table_upsertion).filter(|action| !action.is_empty()),\n }\n }\n\n pub fn describe_changes(&self) -> Vec\n where\n T: std::fmt::Display,\n {\n let mut descriptions = vec![];\n if self.drop_existing {\n descriptions.push(setup::ChangeDescription::Action(\"Drop table\".to_string()));\n }\n if let Some(table_upsertion) = &self.table_upsertion {\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Create table:\\n key columns: {}\\n value columns: {}\\n\",\n keys.iter().map(|(k, v)| format!(\"{k} {v}\")).join(\", \"),\n values.iter().map(|(k, v)| format!(\"{k} {v}\")).join(\", \"),\n )));\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n if !columns_to_delete.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Delete column from table: {}\",\n columns_to_delete.iter().join(\", \"),\n )));\n }\n if !columns_to_upsert.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Add / update columns in table: {}\",\n columns_to_upsert\n .iter()\n .map(|(k, v)| format!(\"{k} {v}\"))\n .join(\", \"),\n )));\n }\n }\n }\n }\n descriptions\n }\n\n pub fn change_type(&self, has_other_update: bool) -> SetupChangeType {\n match (self.drop_existing, &self.table_upsertion) {\n (_, Some(TableUpsertionAction::Create { .. })) => SetupChangeType::Create,\n (_, Some(TableUpsertionAction::Update { .. })) => SetupChangeType::Update,\n (true, None) => SetupChangeType::Delete,\n (false, None) => {\n if has_other_update {\n SetupChangeType::Update\n } else {\n SetupChangeType::NoChange\n }\n }\n }\n }\n}\n\npub fn check_table_compatibility(\n desired: &TableColumnsSchema,\n existing: &TableColumnsSchema,\n) -> SetupStateCompatibility {\n let is_key_identical = existing.key_columns == desired.key_columns;\n if is_key_identical {\n let is_value_lossy = existing\n .value_columns\n .iter()\n .any(|(k, v)| desired.value_columns.get(k) != Some(v));\n if is_value_lossy {\n SetupStateCompatibility::PartialCompatible\n } else {\n SetupStateCompatibility::Compatible\n }\n } else {\n SetupStateCompatibility::NotCompatible\n }\n}\n"], ["/cocoindex/src/builder/exec_ctx.rs", "use crate::prelude::*;\n\nuse crate::execution::db_tracking_setup;\nuse crate::ops::get_executor_factory;\nuse crate::ops::interface::SetupStateCompatibility;\n\npub struct ImportOpExecutionContext {\n pub source_id: i32,\n}\n\npub struct ExportOpExecutionContext {\n pub target_id: i32,\n}\n\npub struct FlowSetupExecutionContext {\n pub setup_state: setup::FlowSetupState,\n pub import_ops: Vec,\n pub export_ops: Vec,\n}\n\npub struct AnalyzedTargetSetupState {\n pub target_kind: String,\n pub setup_key: serde_json::Value,\n pub desired_setup_state: serde_json::Value,\n pub setup_by_user: bool,\n}\n\npub struct AnalyzedSetupState {\n pub targets: Vec,\n pub declarations: Vec,\n}\n\nfn build_import_op_exec_ctx(\n import_field_name: &spec::FieldName,\n import_op_output_type: &schema::EnrichedValueType,\n existing_source_states: Option<&Vec<&setup::SourceSetupState>>,\n metadata: &mut setup::FlowSetupMetadata,\n) -> Result {\n let key_schema_no_attrs = import_op_output_type\n .typ\n .key_type()\n .ok_or_else(|| api_error!(\"Source must produce a type with key\"))?\n .typ\n .without_attrs();\n\n let existing_source_ids = existing_source_states\n .iter()\n .flat_map(|v| v.iter())\n .filter_map(|state| {\n if state.key_schema == key_schema_no_attrs {\n Some(state.source_id)\n } else {\n None\n }\n })\n .collect::>();\n let source_id = if existing_source_ids.len() == 1 {\n existing_source_ids.into_iter().next().unwrap()\n } else {\n if existing_source_ids.len() > 1 {\n warn!(\"Multiple source states with the same key schema found\");\n }\n metadata.last_source_id += 1;\n metadata.last_source_id\n };\n metadata.sources.insert(\n import_field_name.clone(),\n setup::SourceSetupState {\n source_id,\n key_schema: key_schema_no_attrs,\n },\n );\n Ok(ImportOpExecutionContext { source_id })\n}\n\nfn build_target_id(\n analyzed_target_ss: &AnalyzedTargetSetupState,\n existing_target_states: &HashMap<&setup::ResourceIdentifier, Vec<&setup::TargetSetupState>>,\n flow_setup_state: &mut setup::FlowSetupState,\n) -> Result {\n let interface::ExecutorFactory::ExportTarget(target_factory) =\n get_executor_factory(&analyzed_target_ss.target_kind)?\n else {\n api_bail!(\n \"`{}` is not a export target op\",\n analyzed_target_ss.target_kind\n )\n };\n\n let resource_id = setup::ResourceIdentifier {\n key: analyzed_target_ss.setup_key.clone(),\n target_kind: analyzed_target_ss.target_kind.clone(),\n };\n let existing_target_states = existing_target_states.get(&resource_id);\n let mut compatible_target_ids = HashSet::>::new();\n let mut reusable_schema_version_ids = HashSet::>::new();\n for existing_state in existing_target_states.iter().flat_map(|v| v.iter()) {\n let compatibility =\n if analyzed_target_ss.setup_by_user == existing_state.common.setup_by_user {\n target_factory.check_state_compatibility(\n &analyzed_target_ss.desired_setup_state,\n &existing_state.state,\n )?\n } else {\n SetupStateCompatibility::NotCompatible\n };\n let compatible_target_id = if compatibility != SetupStateCompatibility::NotCompatible {\n reusable_schema_version_ids.insert(\n (compatibility == SetupStateCompatibility::Compatible)\n .then_some(existing_state.common.schema_version_id),\n );\n Some(existing_state.common.target_id)\n } else {\n None\n };\n compatible_target_ids.insert(compatible_target_id);\n }\n\n let target_id = if compatible_target_ids.len() == 1 {\n compatible_target_ids.into_iter().next().flatten()\n } else {\n if compatible_target_ids.len() > 1 {\n warn!(\"Multiple target states with the same key schema found\");\n }\n None\n };\n let target_id = target_id.unwrap_or_else(|| {\n flow_setup_state.metadata.last_target_id += 1;\n flow_setup_state.metadata.last_target_id\n });\n let max_schema_version_id = existing_target_states\n .iter()\n .flat_map(|v| v.iter())\n .map(|s| s.common.max_schema_version_id)\n .max()\n .unwrap_or(0);\n let schema_version_id = if reusable_schema_version_ids.len() == 1 {\n reusable_schema_version_ids\n .into_iter()\n .next()\n .unwrap()\n .unwrap_or(max_schema_version_id + 1)\n } else {\n max_schema_version_id + 1\n };\n match flow_setup_state.targets.entry(resource_id) {\n indexmap::map::Entry::Occupied(entry) => {\n api_bail!(\n \"Target resource already exists: kind = {}, key = {}\",\n entry.key().target_kind,\n entry.key().key\n );\n }\n indexmap::map::Entry::Vacant(entry) => {\n entry.insert(setup::TargetSetupState {\n common: setup::TargetSetupStateCommon {\n target_id,\n schema_version_id,\n max_schema_version_id: max_schema_version_id.max(schema_version_id),\n setup_by_user: analyzed_target_ss.setup_by_user,\n },\n state: analyzed_target_ss.desired_setup_state.clone(),\n });\n }\n }\n Ok(target_id)\n}\n\npub fn build_flow_setup_execution_context(\n flow_inst: &spec::FlowInstanceSpec,\n data_schema: &schema::FlowSchema,\n analyzed_ss: &AnalyzedSetupState,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n) -> Result {\n let existing_metadata_versions = || {\n existing_flow_ss\n .iter()\n .flat_map(|flow_ss| flow_ss.metadata.possible_versions())\n };\n\n let mut source_states_by_name = HashMap::<&str, Vec<&setup::SourceSetupState>>::new();\n for metadata_version in existing_metadata_versions() {\n for (source_name, state) in metadata_version.sources.iter() {\n source_states_by_name\n .entry(source_name.as_str())\n .or_default()\n .push(state);\n }\n }\n\n let mut target_states_by_name_type =\n HashMap::<&setup::ResourceIdentifier, Vec<&setup::TargetSetupState>>::new();\n for metadata_version in existing_flow_ss.iter() {\n for (resource_id, target) in metadata_version.targets.iter() {\n target_states_by_name_type\n .entry(resource_id)\n .or_default()\n .extend(target.possible_versions());\n }\n }\n\n let mut setup_state = setup::FlowSetupState:: {\n seen_flow_metadata_version: existing_flow_ss\n .and_then(|flow_ss| flow_ss.seen_flow_metadata_version),\n metadata: setup::FlowSetupMetadata {\n last_source_id: existing_metadata_versions()\n .map(|metadata| metadata.last_source_id)\n .max()\n .unwrap_or(0),\n last_target_id: existing_metadata_versions()\n .map(|metadata| metadata.last_target_id)\n .max()\n .unwrap_or(0),\n sources: BTreeMap::new(),\n },\n tracking_table: db_tracking_setup::TrackingTableSetupState {\n table_name: existing_flow_ss\n .and_then(|flow_ss| {\n flow_ss\n .tracking_table\n .current\n .as_ref()\n .map(|v| v.table_name.clone())\n })\n .unwrap_or_else(|| db_tracking_setup::default_tracking_table_name(&flow_inst.name)),\n version_id: db_tracking_setup::CURRENT_TRACKING_TABLE_VERSION,\n },\n targets: IndexMap::new(),\n };\n\n let import_op_exec_ctx = flow_inst\n .import_ops\n .iter()\n .map(|import_op| {\n let output_type = data_schema\n .root_op_scope\n .op_output_types\n .get(&import_op.name)\n .ok_or_else(invariance_violation)?;\n build_import_op_exec_ctx(\n &import_op.name,\n output_type,\n source_states_by_name.get(&import_op.name.as_str()),\n &mut setup_state.metadata,\n )\n })\n .collect::>>()?;\n\n let export_op_exec_ctx = analyzed_ss\n .targets\n .iter()\n .map(|analyzed_target_ss| {\n let target_id = build_target_id(\n analyzed_target_ss,\n &target_states_by_name_type,\n &mut setup_state,\n )?;\n Ok(ExportOpExecutionContext { target_id })\n })\n .collect::>>()?;\n\n for analyzed_target_ss in analyzed_ss.declarations.iter() {\n build_target_id(\n analyzed_target_ss,\n &target_states_by_name_type,\n &mut setup_state,\n )?;\n }\n\n Ok(FlowSetupExecutionContext {\n setup_state,\n import_ops: import_op_exec_ctx,\n export_ops: export_op_exec_ctx,\n })\n}\n"], ["/cocoindex/src/execution/db_tracking_setup.rs", "use crate::prelude::*;\n\nuse crate::setup::{CombinedState, ResourceSetupInfo, ResourceSetupStatus, SetupChangeType};\nuse serde::{Deserialize, Serialize};\nuse sqlx::PgPool;\n\npub fn default_tracking_table_name(flow_name: &str) -> String {\n format!(\n \"{}__cocoindex_tracking\",\n utils::db::sanitize_identifier(flow_name)\n )\n}\n\npub const CURRENT_TRACKING_TABLE_VERSION: i32 = 1;\n\nasync fn upgrade_tracking_table(\n pool: &PgPool,\n table_name: &str,\n existing_version_id: i32,\n target_version_id: i32,\n) -> Result<()> {\n if existing_version_id < 1 && target_version_id >= 1 {\n let query = format!(\n \"CREATE TABLE IF NOT EXISTS {table_name} (\n source_id INTEGER NOT NULL,\n source_key JSONB NOT NULL,\n\n -- Update in the precommit phase: after evaluation done, before really applying the changes to the target storage.\n max_process_ordinal BIGINT NOT NULL,\n staging_target_keys JSONB NOT NULL,\n memoization_info JSONB,\n\n -- Update after applying the changes to the target storage.\n processed_source_ordinal BIGINT,\n process_logic_fingerprint BYTEA,\n process_ordinal BIGINT,\n process_time_micros BIGINT,\n target_keys JSONB,\n\n PRIMARY KEY (source_id, source_key)\n );\",\n );\n sqlx::query(&query).execute(pool).await?;\n }\n\n Ok(())\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TrackingTableSetupState {\n pub table_name: String,\n pub version_id: i32,\n}\n\n#[derive(Debug)]\npub struct TrackingTableSetupStatus {\n pub desired_state: Option,\n\n pub legacy_table_names: Vec,\n\n pub min_existing_version_id: Option,\n pub source_ids_to_delete: Vec,\n}\n\nimpl TrackingTableSetupStatus {\n pub fn new(\n desired: Option<&TrackingTableSetupState>,\n existing: &CombinedState,\n source_ids_to_delete: Vec,\n ) -> Option {\n let legacy_table_names = existing\n .legacy_values(desired, |v| &v.table_name)\n .into_iter()\n .cloned()\n .collect();\n let min_existing_version_id = existing\n .always_exists()\n .then(|| existing.possible_versions().map(|v| v.version_id).min())\n .flatten();\n if desired.is_some() || min_existing_version_id.is_some() {\n Some(Self {\n desired_state: desired.cloned(),\n legacy_table_names,\n min_existing_version_id,\n source_ids_to_delete,\n })\n } else {\n None\n }\n }\n\n pub fn into_setup_info(\n self,\n ) -> ResourceSetupInfo<(), TrackingTableSetupState, TrackingTableSetupStatus> {\n ResourceSetupInfo {\n key: (),\n state: self.desired_state.clone(),\n description: \"Tracking Table\".to_string(),\n setup_status: Some(self),\n legacy_key: None,\n }\n }\n}\n\nimpl ResourceSetupStatus for TrackingTableSetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut changes: Vec = vec![];\n if self.desired_state.is_some() && !self.legacy_table_names.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Rename legacy tracking tables: {}. \",\n self.legacy_table_names.join(\", \")\n )));\n }\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(state)) => {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Create the tracking table: {}. \",\n state.table_name\n )));\n }\n (Some(min_version_id), Some(desired)) => {\n if min_version_id < desired.version_id {\n changes.push(setup::ChangeDescription::Action(\n \"Update the tracking table. \".into(),\n ));\n }\n }\n (Some(_), None) => changes.push(setup::ChangeDescription::Action(format!(\n \"Drop existing tracking table: {}. \",\n self.legacy_table_names.join(\", \")\n ))),\n (None, None) => (),\n }\n if !self.source_ids_to_delete.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Delete source IDs: {}. \",\n self.source_ids_to_delete\n .iter()\n .map(|id| id.to_string())\n .collect::>()\n .join(\", \")\n )));\n }\n changes\n }\n\n fn change_type(&self) -> SetupChangeType {\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(_)) => SetupChangeType::Create,\n (Some(min_version_id), Some(desired)) => {\n if min_version_id == desired.version_id && self.legacy_table_names.is_empty() {\n SetupChangeType::NoChange\n } else if min_version_id < desired.version_id {\n SetupChangeType::Update\n } else {\n SetupChangeType::Invalid\n }\n }\n (Some(_), None) => SetupChangeType::Delete,\n (None, None) => SetupChangeType::NoChange,\n }\n }\n}\n\nimpl TrackingTableSetupStatus {\n pub async fn apply_change(&self) -> Result<()> {\n let lib_context = get_lib_context()?;\n let pool = lib_context.require_builtin_db_pool()?;\n if let Some(desired) = &self.desired_state {\n for lagacy_name in self.legacy_table_names.iter() {\n let query = format!(\n \"ALTER TABLE IF EXISTS {} RENAME TO {}\",\n lagacy_name, desired.table_name\n );\n sqlx::query(&query).execute(pool).await?;\n }\n\n if self.min_existing_version_id != Some(desired.version_id) {\n upgrade_tracking_table(\n pool,\n &desired.table_name,\n self.min_existing_version_id.unwrap_or(0),\n desired.version_id,\n )\n .await?;\n }\n } else {\n for lagacy_name in self.legacy_table_names.iter() {\n let query = format!(\"DROP TABLE IF EXISTS {lagacy_name}\");\n sqlx::query(&query).execute(pool).await?;\n }\n return Ok(());\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/execution/db_tracking.rs", "use crate::prelude::*;\n\nuse super::{db_tracking_setup::TrackingTableSetupState, memoization::StoredMemoizationInfo};\nuse crate::utils::{db::WriteAction, fingerprint::Fingerprint};\nuse futures::Stream;\nuse serde::de::{self, Deserializer, SeqAccess, Visitor};\nuse serde::ser::SerializeSeq;\nuse sqlx::PgPool;\nuse std::fmt;\n\n#[derive(Debug, Clone)]\npub struct TrackedTargetKeyInfo {\n pub key: serde_json::Value,\n pub additional_key: serde_json::Value,\n pub process_ordinal: i64,\n pub fingerprint: Option,\n}\n\nimpl Serialize for TrackedTargetKeyInfo {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let mut seq = serializer.serialize_seq(None)?;\n seq.serialize_element(&self.key)?;\n seq.serialize_element(&self.process_ordinal)?;\n seq.serialize_element(&self.fingerprint)?;\n if !self.additional_key.is_null() {\n seq.serialize_element(&self.additional_key)?;\n }\n seq.end()\n }\n}\n\nimpl<'de> serde::Deserialize<'de> for TrackedTargetKeyInfo {\n fn deserialize(deserializer: D) -> Result\n where\n D: Deserializer<'de>,\n {\n struct TrackedTargetKeyVisitor;\n\n impl<'de> Visitor<'de> for TrackedTargetKeyVisitor {\n type Value = TrackedTargetKeyInfo;\n\n fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n formatter.write_str(\"a sequence of 3 or 4 elements for TrackedTargetKey\")\n }\n\n fn visit_seq(self, mut seq: A) -> Result\n where\n A: SeqAccess<'de>,\n {\n let target_key: serde_json::Value = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(0, &self))?;\n let process_ordinal: i64 = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(1, &self))?;\n let fingerprint: Option = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(2, &self))?;\n let additional_key: Option = seq.next_element()?;\n\n Ok(TrackedTargetKeyInfo {\n key: target_key,\n process_ordinal,\n fingerprint,\n additional_key: additional_key.unwrap_or(serde_json::Value::Null),\n })\n }\n }\n\n deserializer.deserialize_seq(TrackedTargetKeyVisitor)\n }\n}\n\n/// (source_id, target_key)\npub type TrackedTargetKeyForSource = Vec<(i32, Vec)>;\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForProcessing {\n pub memoization_info: Option>>,\n\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub max_process_ordinal: Option,\n pub process_ordinal: Option,\n}\n\npub async fn read_source_tracking_info_for_processing(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n pool: &PgPool,\n) -> Result> {\n let query_str = format!(\n \"SELECT memoization_info, processed_source_ordinal, process_logic_fingerprint, max_process_ordinal, process_ordinal FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(pool)\n .await?;\n\n Ok(tracking_info)\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForPrecommit {\n pub max_process_ordinal: i64,\n pub staging_target_keys: sqlx::types::Json,\n\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub process_ordinal: Option,\n pub target_keys: Option>,\n}\n\npub async fn read_source_tracking_info_for_precommit(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT max_process_ordinal, staging_target_keys, processed_source_ordinal, process_logic_fingerprint, process_ordinal, target_keys FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let precommit_tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(db_executor)\n .await?;\n\n Ok(precommit_tracking_info)\n}\n\n#[allow(clippy::too_many_arguments)]\npub async fn precommit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n max_process_ordinal: i64,\n staging_target_keys: TrackedTargetKeyForSource,\n memoization_info: Option<&StoredMemoizationInfo>,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {} (source_id, source_key, max_process_ordinal, staging_target_keys, memoization_info) VALUES ($1, $2, $3, $4, $5)\",\n db_setup.table_name\n ),\n WriteAction::Update => format!(\n \"UPDATE {} SET max_process_ordinal = $3, staging_target_keys = $4, memoization_info = $5 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n ),\n };\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(max_process_ordinal) // $3\n .bind(sqlx::types::Json(staging_target_keys)) // $4\n .bind(memoization_info.map(sqlx::types::Json)) // $5\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForCommit {\n pub staging_target_keys: sqlx::types::Json,\n pub process_ordinal: Option,\n}\n\npub async fn read_source_tracking_info_for_commit(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT staging_target_keys, process_ordinal FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let commit_tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(db_executor)\n .await?;\n Ok(commit_tracking_info)\n}\n\n#[allow(clippy::too_many_arguments)]\npub async fn commit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n staging_target_keys: TrackedTargetKeyForSource,\n processed_source_ordinal: Option,\n logic_fingerprint: &[u8],\n process_ordinal: i64,\n process_time_micros: i64,\n target_keys: TrackedTargetKeyForSource,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {} ( \\\n source_id, source_key, \\\n max_process_ordinal, staging_target_keys, \\\n processed_source_ordinal, process_logic_fingerprint, process_ordinal, process_time_micros, target_keys) \\\n VALUES ($1, $2, $6 + 1, $3, $4, $5, $6, $7, $8)\",\n db_setup.table_name\n ),\n WriteAction::Update => format!(\n \"UPDATE {} SET staging_target_keys = $3, processed_source_ordinal = $4, process_logic_fingerprint = $5, process_ordinal = $6, process_time_micros = $7, target_keys = $8 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n ),\n };\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(sqlx::types::Json(staging_target_keys)) // $3\n .bind(processed_source_ordinal) // $4\n .bind(logic_fingerprint) // $5\n .bind(process_ordinal) // $6\n .bind(process_time_micros) // $7\n .bind(sqlx::types::Json(target_keys)) // $8\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\npub async fn delete_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"DELETE FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n sqlx::query(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct TrackedSourceKeyMetadata {\n pub source_key: serde_json::Value,\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n}\n\npub struct ListTrackedSourceKeyMetadataState {\n query_str: String,\n}\n\nimpl ListTrackedSourceKeyMetadataState {\n pub fn new() -> Self {\n Self {\n query_str: String::new(),\n }\n }\n\n pub fn list<'a>(\n &'a mut self,\n source_id: i32,\n db_setup: &'a TrackingTableSetupState,\n pool: &'a PgPool,\n ) -> impl Stream> + 'a {\n self.query_str = format!(\n \"SELECT source_key, processed_source_ordinal, process_logic_fingerprint FROM {} WHERE source_id = $1\",\n db_setup.table_name\n );\n sqlx::query_as(&self.query_str).bind(source_id).fetch(pool)\n }\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceLastProcessedInfo {\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub process_time_micros: Option,\n}\n\npub async fn read_source_last_processed_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n pool: &PgPool,\n) -> Result> {\n let query_str = format!(\n \"SELECT processed_source_ordinal, process_logic_fingerprint, process_time_micros FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let last_processed_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(pool)\n .await?;\n Ok(last_processed_info)\n}\n\npub async fn update_source_tracking_ordinal(\n source_id: i32,\n source_key_json: &serde_json::Value,\n processed_source_ordinal: Option,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"UPDATE {} SET processed_source_ordinal = $3 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(processed_source_ordinal) // $3\n .execute(db_executor)\n .await?;\n Ok(())\n}\n"], ["/cocoindex/src/ops/functions/test_utils.rs", "use crate::builder::plan::{\n AnalyzedFieldReference, AnalyzedLocalFieldReference, AnalyzedValueMapping,\n};\nuse crate::ops::sdk::{\n AuthRegistry, BasicValueType, EnrichedValueType, FlowInstanceContext, OpArgSchema,\n OpArgsResolver, SimpleFunctionExecutor, SimpleFunctionFactoryBase, Value, make_output_type,\n};\nuse anyhow::Result;\nuse serde::de::DeserializeOwned;\nuse std::sync::Arc;\n\n// This function builds an argument schema for a flow function.\npub fn build_arg_schema(\n name: &str,\n value_type: BasicValueType,\n) -> (Option<&str>, EnrichedValueType) {\n (Some(name), make_output_type(value_type))\n}\n\n// This function tests a flow function by providing a spec, input argument schemas, and values.\npub async fn test_flow_function(\n factory: Arc,\n spec: S,\n input_arg_schemas: Vec<(Option<&str>, EnrichedValueType)>,\n input_arg_values: Vec,\n) -> Result\nwhere\n S: DeserializeOwned + Send + Sync + 'static,\n R: Send + Sync + 'static,\n F: SimpleFunctionFactoryBase + ?Sized,\n{\n // 1. Construct OpArgSchema\n let op_arg_schemas: Vec = input_arg_schemas\n .into_iter()\n .enumerate()\n .map(|(idx, (name, value_type))| OpArgSchema {\n name: name.map_or(crate::base::spec::OpArgName(None), |n| {\n crate::base::spec::OpArgName(Some(n.to_string()))\n }),\n value_type,\n analyzed_value: AnalyzedValueMapping::Field(AnalyzedFieldReference {\n local: AnalyzedLocalFieldReference {\n fields_idx: vec![idx as u32],\n },\n scope_up_level: 0,\n }),\n })\n .collect();\n\n // 2. Resolve Schema & Args\n let mut args_resolver = OpArgsResolver::new(&op_arg_schemas)?;\n let context = Arc::new(FlowInstanceContext {\n flow_instance_name: \"test_flow_function\".to_string(),\n auth_registry: Arc::new(AuthRegistry::default()),\n py_exec_ctx: None,\n });\n\n let (resolved_args_from_schema, _output_schema): (R, EnrichedValueType) = factory\n .resolve_schema(&spec, &mut args_resolver, &context)\n .await?;\n\n args_resolver.done()?;\n\n // 3. Build Executor\n let executor: Box = factory\n .build_executor(spec, resolved_args_from_schema, Arc::clone(&context))\n .await?;\n\n // 4. Evaluate\n let result = executor.evaluate(input_arg_values).await?;\n\n Ok(result)\n}\n"], ["/cocoindex/src/utils/yaml_ser.rs", "use base64::prelude::*;\nuse serde::ser::{self, Serialize};\nuse yaml_rust2::yaml::Yaml;\n\n#[derive(Debug)]\npub struct YamlSerializerError {\n msg: String,\n}\n\nimpl std::fmt::Display for YamlSerializerError {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"YamlSerializerError: {}\", self.msg)\n }\n}\n\nimpl std::error::Error for YamlSerializerError {}\n\nimpl ser::Error for YamlSerializerError {\n fn custom(msg: T) -> Self\n where\n T: std::fmt::Display,\n {\n YamlSerializerError {\n msg: format!(\"{msg}\"),\n }\n }\n}\n\npub struct YamlSerializer;\n\nimpl YamlSerializer {\n pub fn serialize(value: &T) -> Result\n where\n T: Serialize,\n {\n value.serialize(YamlSerializer)\n }\n}\n\nimpl ser::Serializer for YamlSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n type SerializeSeq = SeqSerializer;\n type SerializeTuple = SeqSerializer;\n type SerializeTupleStruct = SeqSerializer;\n type SerializeTupleVariant = VariantSeqSerializer;\n type SerializeMap = MapSerializer;\n type SerializeStruct = MapSerializer;\n type SerializeStructVariant = VariantMapSerializer;\n\n fn serialize_bool(self, v: bool) -> Result {\n Ok(Yaml::Boolean(v))\n }\n\n fn serialize_i8(self, v: i8) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i16(self, v: i16) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i32(self, v: i32) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i64(self, v: i64) -> Result {\n Ok(Yaml::Integer(v))\n }\n\n fn serialize_u8(self, v: u8) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u16(self, v: u16) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u32(self, v: u32) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u64(self, v: u64) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_f32(self, v: f32) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_f64(self, v: f64) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_char(self, v: char) -> Result {\n Ok(Yaml::String(v.to_string()))\n }\n\n fn serialize_str(self, v: &str) -> Result {\n Ok(Yaml::String(v.to_owned()))\n }\n\n fn serialize_bytes(self, v: &[u8]) -> Result {\n let encoded = BASE64_STANDARD.encode(v);\n Ok(Yaml::String(encoded))\n }\n\n fn serialize_none(self) -> Result {\n Ok(Yaml::Null)\n }\n\n fn serialize_some(self, value: &T) -> Result\n where\n T: Serialize + ?Sized,\n {\n value.serialize(self)\n }\n\n fn serialize_unit(self) -> Result {\n Ok(Yaml::Hash(Default::default()))\n }\n\n fn serialize_unit_struct(self, _name: &'static str) -> Result {\n Ok(Yaml::Hash(Default::default()))\n }\n\n fn serialize_unit_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n ) -> Result {\n Ok(Yaml::String(variant.to_owned()))\n }\n\n fn serialize_newtype_struct(\n self,\n _name: &'static str,\n value: &T,\n ) -> Result\n where\n T: Serialize + ?Sized,\n {\n value.serialize(self)\n }\n\n fn serialize_newtype_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n value: &T,\n ) -> Result\n where\n T: Serialize + ?Sized,\n {\n let mut hash = yaml_rust2::yaml::Hash::new();\n hash.insert(Yaml::String(variant.to_owned()), value.serialize(self)?);\n Ok(Yaml::Hash(hash))\n }\n\n fn serialize_seq(self, len: Option) -> Result {\n Ok(SeqSerializer {\n vec: Vec::with_capacity(len.unwrap_or(0)),\n })\n }\n\n fn serialize_tuple(self, len: usize) -> Result {\n self.serialize_seq(Some(len))\n }\n\n fn serialize_tuple_struct(\n self,\n _name: &'static str,\n len: usize,\n ) -> Result {\n self.serialize_seq(Some(len))\n }\n\n fn serialize_tuple_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n len: usize,\n ) -> Result {\n Ok(VariantSeqSerializer {\n variant_name: variant.to_owned(),\n vec: Vec::with_capacity(len),\n })\n }\n\n fn serialize_map(self, _len: Option) -> Result {\n Ok(MapSerializer {\n map: yaml_rust2::yaml::Hash::new(),\n next_key: None,\n })\n }\n\n fn serialize_struct(\n self,\n _name: &'static str,\n len: usize,\n ) -> Result {\n self.serialize_map(Some(len))\n }\n\n fn serialize_struct_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n Ok(VariantMapSerializer {\n variant_name: variant.to_owned(),\n map: yaml_rust2::yaml::Hash::new(),\n })\n }\n}\n\npub struct SeqSerializer {\n vec: Vec,\n}\n\nimpl ser::SerializeSeq for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.vec.push(value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n Ok(Yaml::Array(self.vec))\n }\n}\n\nimpl ser::SerializeTuple for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeSeq::serialize_element(self, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeSeq::end(self)\n }\n}\n\nimpl ser::SerializeTupleStruct for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeSeq::serialize_element(self, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeSeq::end(self)\n }\n}\n\npub struct MapSerializer {\n map: yaml_rust2::yaml::Hash,\n next_key: Option,\n}\n\nimpl ser::SerializeMap for MapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.next_key = Some(key.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n let key = self.next_key.take().unwrap();\n self.map.insert(key, value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n Ok(Yaml::Hash(self.map))\n }\n}\n\nimpl ser::SerializeStruct for MapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeMap::serialize_entry(self, key, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeMap::end(self)\n }\n}\n\npub struct VariantMapSerializer {\n variant_name: String,\n map: yaml_rust2::yaml::Hash,\n}\n\nimpl ser::SerializeStructVariant for VariantMapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.map.insert(\n Yaml::String(key.to_owned()),\n value.serialize(YamlSerializer)?,\n );\n Ok(())\n }\n\n fn end(self) -> Result {\n let mut outer_map = yaml_rust2::yaml::Hash::new();\n outer_map.insert(Yaml::String(self.variant_name), Yaml::Hash(self.map));\n Ok(Yaml::Hash(outer_map))\n }\n}\n\npub struct VariantSeqSerializer {\n variant_name: String,\n vec: Vec,\n}\n\nimpl ser::SerializeTupleVariant for VariantSeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.vec.push(value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n let mut map = yaml_rust2::yaml::Hash::new();\n map.insert(Yaml::String(self.variant_name), Yaml::Array(self.vec));\n Ok(Yaml::Hash(map))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use serde::ser::Error as SerdeSerError;\n use serde::{Serialize, Serializer};\n use std::collections::BTreeMap;\n use yaml_rust2::yaml::{Hash, Yaml};\n\n fn assert_yaml_serialization(value: T, expected_yaml: Yaml) {\n let result = YamlSerializer::serialize(&value);\n println!(\"Serialized value: {result:?}, Expected value: {expected_yaml:?}\");\n\n assert!(\n result.is_ok(),\n \"Serialization failed when it should have succeeded. Error: {:?}\",\n result.err()\n );\n assert_eq!(\n result.unwrap(),\n expected_yaml,\n \"Serialized YAML did not match expected YAML.\"\n );\n }\n\n #[test]\n fn test_serialize_bool() {\n assert_yaml_serialization(true, Yaml::Boolean(true));\n assert_yaml_serialization(false, Yaml::Boolean(false));\n }\n\n #[test]\n fn test_serialize_integers() {\n assert_yaml_serialization(42i8, Yaml::Integer(42));\n assert_yaml_serialization(-100i16, Yaml::Integer(-100));\n assert_yaml_serialization(123456i32, Yaml::Integer(123456));\n assert_yaml_serialization(7890123456789i64, Yaml::Integer(7890123456789));\n assert_yaml_serialization(255u8, Yaml::Integer(255));\n assert_yaml_serialization(65535u16, Yaml::Integer(65535));\n assert_yaml_serialization(4000000000u32, Yaml::Integer(4000000000));\n // u64 is serialized as Yaml::Real(String) in your implementation\n assert_yaml_serialization(\n 18446744073709551615u64,\n Yaml::Real(\"18446744073709551615\".to_string()),\n );\n }\n\n #[test]\n fn test_serialize_floats() {\n assert_yaml_serialization(3.14f32, Yaml::Real(\"3.14\".to_string()));\n assert_yaml_serialization(-0.001f64, Yaml::Real(\"-0.001\".to_string()));\n assert_yaml_serialization(1.0e10f64, Yaml::Real(\"10000000000\".to_string()));\n }\n\n #[test]\n fn test_serialize_char() {\n assert_yaml_serialization('X', Yaml::String(\"X\".to_string()));\n assert_yaml_serialization('✨', Yaml::String(\"✨\".to_string()));\n }\n\n #[test]\n fn test_serialize_str_and_string() {\n assert_yaml_serialization(\"hello YAML\", Yaml::String(\"hello YAML\".to_string()));\n assert_yaml_serialization(\"\".to_string(), Yaml::String(\"\".to_string()));\n }\n\n #[test]\n fn test_serialize_raw_bytes() {\n let bytes_slice: &[u8] = &[0x48, 0x65, 0x6c, 0x6c, 0x6f]; // \"Hello\"\n let expected = Yaml::Array(vec![\n Yaml::Integer(72),\n Yaml::Integer(101),\n Yaml::Integer(108),\n Yaml::Integer(108),\n Yaml::Integer(111),\n ]);\n assert_yaml_serialization(bytes_slice, expected.clone());\n\n let bytes_vec: Vec = bytes_slice.to_vec();\n assert_yaml_serialization(bytes_vec, expected);\n\n let empty_bytes_slice: &[u8] = &[];\n assert_yaml_serialization(empty_bytes_slice, Yaml::Array(vec![]));\n }\n\n struct MyBytesWrapper<'a>(&'a [u8]);\n\n impl<'a> Serialize for MyBytesWrapper<'a> {\n fn serialize(&self, serializer: S) -> Result\n where\n S: Serializer,\n {\n serializer.serialize_bytes(self.0)\n }\n }\n\n #[test]\n fn test_custom_wrapper_serializes_bytes_as_base64_string() {\n let data: &[u8] = &[72, 101, 108, 108, 111]; // \"Hello\"\n let wrapped_data = MyBytesWrapper(data);\n\n let base64_encoded = BASE64_STANDARD.encode(data);\n let expected_yaml = Yaml::String(base64_encoded);\n\n assert_yaml_serialization(wrapped_data, expected_yaml);\n\n let empty_data: &[u8] = &[];\n let wrapped_empty_data = MyBytesWrapper(empty_data);\n let empty_base64_encoded = BASE64_STANDARD.encode(empty_data);\n let expected_empty_yaml = Yaml::String(empty_base64_encoded);\n assert_yaml_serialization(wrapped_empty_data, expected_empty_yaml);\n }\n\n #[test]\n fn test_serialize_option() {\n let val_none: Option = None;\n assert_yaml_serialization(val_none, Yaml::Null);\n\n let val_some: Option = Some(\"has value\".to_string());\n assert_yaml_serialization(val_some, Yaml::String(\"has value\".to_string()));\n }\n\n #[test]\n fn test_serialize_unit() {\n assert_yaml_serialization((), Yaml::Hash(Hash::new()));\n }\n\n #[test]\n fn test_serialize_unit_struct() {\n #[derive(Serialize)]\n struct MyUnitStruct;\n\n assert_yaml_serialization(MyUnitStruct, Yaml::Hash(Hash::new()));\n }\n\n #[test]\n fn test_serialize_newtype_struct() {\n #[derive(Serialize)]\n struct MyNewtypeStruct(u64);\n\n assert_yaml_serialization(MyNewtypeStruct(12345u64), Yaml::Real(\"12345\".to_string()));\n }\n\n #[test]\n fn test_serialize_seq() {\n let empty_vec: Vec = vec![];\n assert_yaml_serialization(empty_vec, Yaml::Array(vec![]));\n\n let simple_vec = vec![10, 20, 30];\n assert_yaml_serialization(\n simple_vec,\n Yaml::Array(vec![\n Yaml::Integer(10),\n Yaml::Integer(20),\n Yaml::Integer(30),\n ]),\n );\n\n let string_vec = vec![\"a\".to_string(), \"b\".to_string()];\n assert_yaml_serialization(\n string_vec,\n Yaml::Array(vec![\n Yaml::String(\"a\".to_string()),\n Yaml::String(\"b\".to_string()),\n ]),\n );\n }\n\n #[test]\n fn test_serialize_tuple() {\n let tuple_val = (42i32, \"text\", false);\n assert_yaml_serialization(\n tuple_val,\n Yaml::Array(vec![\n Yaml::Integer(42),\n Yaml::String(\"text\".to_string()),\n Yaml::Boolean(false),\n ]),\n );\n }\n\n #[test]\n fn test_serialize_tuple_struct() {\n #[derive(Serialize)]\n struct MyTupleStruct(String, i64);\n\n assert_yaml_serialization(\n MyTupleStruct(\"value\".to_string(), -500),\n Yaml::Array(vec![Yaml::String(\"value\".to_string()), Yaml::Integer(-500)]),\n );\n }\n\n #[test]\n fn test_serialize_map() {\n let mut map = BTreeMap::new(); // BTreeMap for ordered keys, matching yaml::Hash\n map.insert(\"key1\".to_string(), 100);\n map.insert(\"key2\".to_string(), 200);\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"key1\".to_string()), Yaml::Integer(100));\n expected_hash.insert(Yaml::String(\"key2\".to_string()), Yaml::Integer(200));\n assert_yaml_serialization(map, Yaml::Hash(expected_hash));\n\n let empty_map: BTreeMap = BTreeMap::new();\n assert_yaml_serialization(empty_map, Yaml::Hash(Hash::new()));\n }\n\n #[derive(Serialize)]\n struct SimpleStruct {\n id: u32,\n name: String,\n is_active: bool,\n }\n\n #[test]\n fn test_serialize_struct() {\n let s = SimpleStruct {\n id: 101,\n name: \"A Struct\".to_string(),\n is_active: true,\n };\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"id\".to_string()), Yaml::Integer(101));\n expected_hash.insert(\n Yaml::String(\"name\".to_string()),\n Yaml::String(\"A Struct\".to_string()),\n );\n expected_hash.insert(Yaml::String(\"is_active\".to_string()), Yaml::Boolean(true));\n assert_yaml_serialization(s, Yaml::Hash(expected_hash));\n }\n\n #[derive(Serialize)]\n struct NestedStruct {\n description: String,\n data: SimpleStruct,\n tags: Vec,\n }\n\n #[test]\n fn test_serialize_nested_struct() {\n let ns = NestedStruct {\n description: \"Contains another struct and a vec\".to_string(),\n data: SimpleStruct {\n id: 202,\n name: \"Inner\".to_string(),\n is_active: false,\n },\n tags: vec![\"nested\".to_string(), \"complex\".to_string()],\n };\n\n let mut inner_struct_hash = Hash::new();\n inner_struct_hash.insert(Yaml::String(\"id\".to_string()), Yaml::Integer(202));\n inner_struct_hash.insert(\n Yaml::String(\"name\".to_string()),\n Yaml::String(\"Inner\".to_string()),\n );\n inner_struct_hash.insert(Yaml::String(\"is_active\".to_string()), Yaml::Boolean(false));\n\n let tags_array = Yaml::Array(vec![\n Yaml::String(\"nested\".to_string()),\n Yaml::String(\"complex\".to_string()),\n ]);\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(\n Yaml::String(\"description\".to_string()),\n Yaml::String(\"Contains another struct and a vec\".to_string()),\n );\n expected_hash.insert(\n Yaml::String(\"data\".to_string()),\n Yaml::Hash(inner_struct_hash),\n );\n expected_hash.insert(Yaml::String(\"tags\".to_string()), tags_array);\n\n assert_yaml_serialization(ns, Yaml::Hash(expected_hash));\n }\n\n #[derive(Serialize)]\n enum MyEnum {\n Unit,\n Newtype(i32),\n Tuple(String, bool),\n Struct { field_a: u16, field_b: char },\n }\n\n #[test]\n fn test_serialize_enum_unit_variant() {\n assert_yaml_serialization(MyEnum::Unit, Yaml::String(\"Unit\".to_string()));\n }\n\n #[test]\n fn test_serialize_enum_newtype_variant() {\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"Newtype\".to_string()), Yaml::Integer(999));\n assert_yaml_serialization(MyEnum::Newtype(999), Yaml::Hash(expected_hash));\n }\n\n #[test]\n fn test_serialize_enum_tuple_variant() {\n let mut expected_hash = Hash::new();\n let inner_array = Yaml::Array(vec![\n Yaml::String(\"tuple_data\".to_string()),\n Yaml::Boolean(true),\n ]);\n expected_hash.insert(Yaml::String(\"Tuple\".to_string()), inner_array);\n assert_yaml_serialization(\n MyEnum::Tuple(\"tuple_data\".to_string(), true),\n Yaml::Hash(expected_hash),\n );\n }\n\n #[test]\n fn test_serialize_enum_struct_variant() {\n let mut inner_struct_hash = Hash::new();\n inner_struct_hash.insert(Yaml::String(\"field_a\".to_string()), Yaml::Integer(123));\n inner_struct_hash.insert(\n Yaml::String(\"field_b\".to_string()),\n Yaml::String(\"Z\".to_string()),\n );\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(\n Yaml::String(\"Struct\".to_string()),\n Yaml::Hash(inner_struct_hash),\n );\n assert_yaml_serialization(\n MyEnum::Struct {\n field_a: 123,\n field_b: 'Z',\n },\n Yaml::Hash(expected_hash),\n );\n }\n\n #[test]\n fn test_yaml_serializer_error_display() {\n let error = YamlSerializerError {\n msg: \"A test error message\".to_string(),\n };\n assert_eq!(\n format!(\"{error}\"),\n \"YamlSerializerError: A test error message\"\n );\n }\n\n #[test]\n fn test_yaml_serializer_error_custom() {\n let error = YamlSerializerError::custom(\"Custom error detail\");\n assert_eq!(error.msg, \"Custom error detail\");\n assert_eq!(\n format!(\"{error}\"),\n \"YamlSerializerError: Custom error detail\"\n );\n let _err_trait_obj: Box = Box::new(error);\n }\n}\n"], ["/cocoindex/src/llm/voyage.rs", "use crate::prelude::*;\n\nuse crate::llm::{LlmEmbeddingClient, LlmEmbeddingRequest, LlmEmbeddingResponse};\nuse phf::phf_map;\n\nstatic DEFAULT_EMBEDDING_DIMENSIONS: phf::Map<&str, u32> = phf_map! {\n // Current models\n \"voyage-3-large\" => 1024,\n \"voyage-3.5\" => 1024,\n \"voyage-3.5-lite\" => 1024,\n \"voyage-code-3\" => 1024,\n \"voyage-finance-2\" => 1024,\n \"voyage-law-2\" => 1024,\n \"voyage-code-2\" => 1536,\n\n // Legacy models\n \"voyage-3\" => 1024,\n \"voyage-3-lite\" => 512,\n \"voyage-multilingual-2\" => 1024,\n \"voyage-large-2-instruct\" => 1024,\n \"voyage-large-2\" => 1536,\n \"voyage-2\" => 1024,\n \"voyage-lite-02-instruct\" => 1024,\n \"voyage-02\" => 1024,\n \"voyage-01\" => 1024,\n \"voyage-lite-01\" => 1024,\n \"voyage-lite-01-instruct\" => 1024,\n};\n\npub struct Client {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl Client {\n pub fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Voyage AI doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"VOYAGE_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"VOYAGE_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n#[derive(Deserialize)]\nstruct EmbeddingData {\n embedding: Vec,\n}\n\n#[derive(Deserialize)]\nstruct EmbedResponse {\n data: Vec,\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: LlmEmbeddingRequest<'req>,\n ) -> Result {\n let url = \"https://api.voyageai.com/v1/embeddings\";\n\n let mut payload = serde_json::json!({\n \"input\": request.text,\n \"model\": request.model,\n });\n\n if let Some(task_type) = request.task_type {\n payload[\"input_type\"] = serde_json::Value::String(task_type.into());\n }\n\n let resp = retryable::run(\n || {\n self.client\n .post(url)\n .header(\"Authorization\", format!(\"Bearer {}\", self.api_key))\n .json(&payload)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n\n if !resp.status().is_success() {\n bail!(\n \"Voyage AI API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n\n let embedding_resp: EmbedResponse = resp.json().await.context(\"Invalid JSON\")?;\n\n if embedding_resp.data.is_empty() {\n bail!(\"No embedding data in response\");\n }\n\n Ok(LlmEmbeddingResponse {\n embedding: embedding_resp.data[0].embedding.clone(),\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n DEFAULT_EMBEDDING_DIMENSIONS.get(model).copied()\n }\n}\n"], ["/cocoindex/src/base/duration.rs", "use std::f64;\n\nuse anyhow::{Result, anyhow, bail};\nuse chrono::Duration;\n\n/// Parses a string of number-unit pairs into a vector of (number, unit),\n/// ensuring units are among the allowed ones.\nfn parse_components(\n s: &str,\n allowed_units: &[char],\n original_input: &str,\n) -> Result> {\n let mut result = Vec::new();\n let mut iter = s.chars().peekable();\n while iter.peek().is_some() {\n let mut num_str = String::new();\n let mut has_decimal = false;\n\n // Parse digits and optional decimal point\n while let Some(&c) = iter.peek() {\n if c.is_ascii_digit() || (c == '.' && !has_decimal) {\n if c == '.' {\n has_decimal = true;\n }\n num_str.push(iter.next().unwrap());\n } else {\n break;\n }\n }\n if num_str.is_empty() {\n bail!(\"Expected number in: {}\", original_input);\n }\n let num = num_str\n .parse::()\n .map_err(|_| anyhow!(\"Invalid number '{}' in: {}\", num_str, original_input))?;\n if let Some(&unit) = iter.peek() {\n if allowed_units.contains(&unit) {\n result.push((num, unit));\n iter.next();\n } else {\n bail!(\"Invalid unit '{}' in: {}\", unit, original_input);\n }\n } else {\n bail!(\n \"Missing unit after number '{}' in: {}\",\n num_str,\n original_input\n );\n }\n }\n Ok(result)\n}\n\n/// Parses an ISO 8601 duration string into a `chrono::Duration`.\nfn parse_iso8601_duration(s: &str, original_input: &str) -> Result {\n let (is_negative, s_after_sign) = if let Some(stripped) = s.strip_prefix('-') {\n (true, stripped)\n } else {\n (false, s)\n };\n\n if !s_after_sign.starts_with('P') {\n bail!(\"Duration must start with 'P' in: {}\", original_input);\n }\n let s_after_p = &s_after_sign[1..];\n\n let (date_part, time_part) = if let Some(pos) = s_after_p.find('T') {\n (&s_after_p[..pos], Some(&s_after_p[pos + 1..]))\n } else {\n (s_after_p, None)\n };\n\n // Date components (Y, M, W, D)\n let date_components = parse_components(date_part, &['Y', 'M', 'W', 'D'], original_input)?;\n\n // Time components (H, M, S)\n let time_components = if let Some(time_str) = time_part {\n let comps = parse_components(time_str, &['H', 'M', 'S'], original_input)?;\n if comps.is_empty() {\n bail!(\n \"Time part present but no time components in: {}\",\n original_input\n );\n }\n comps\n } else {\n vec![]\n };\n\n if date_components.is_empty() && time_components.is_empty() {\n bail!(\"No components in duration: {}\", original_input);\n }\n\n // Accumulate date duration\n let date_duration = date_components\n .iter()\n .fold(Duration::zero(), |acc, &(num, unit)| {\n let days = match unit {\n 'Y' => num * 365.0,\n 'M' => num * 30.0,\n 'W' => num * 7.0,\n 'D' => num,\n _ => unreachable!(\"Invalid date unit should be caught by prior validation\"),\n };\n let microseconds = (days * 86_400_000_000.0) as i64;\n acc + Duration::microseconds(microseconds)\n });\n\n // Accumulate time duration\n let time_duration =\n time_components\n .iter()\n .fold(Duration::zero(), |acc, &(num, unit)| match unit {\n 'H' => {\n let nanoseconds = (num * 3_600_000_000_000.0).round() as i64;\n acc + Duration::nanoseconds(nanoseconds)\n }\n 'M' => {\n let nanoseconds = (num * 60_000_000_000.0).round() as i64;\n acc + Duration::nanoseconds(nanoseconds)\n }\n 'S' => {\n let nanoseconds = (num.fract() * 1_000_000_000.0).round() as i64;\n acc + Duration::seconds(num as i64) + Duration::nanoseconds(nanoseconds)\n }\n _ => unreachable!(\"Invalid time unit should be caught by prior validation\"),\n });\n\n let mut total = date_duration + time_duration;\n if is_negative {\n total = -total;\n }\n\n Ok(total)\n}\n\n/// Parses a human-readable duration string into a `chrono::Duration`.\nfn parse_human_readable_duration(s: &str, original_input: &str) -> Result {\n let parts: Vec<&str> = s.split_whitespace().collect();\n if parts.is_empty() || parts.len() % 2 != 0 {\n bail!(\n \"Invalid human-readable duration format in: {}\",\n original_input\n );\n }\n\n let durations: Result> = parts\n .chunks(2)\n .map(|chunk| {\n let num: i64 = chunk[0]\n .parse()\n .map_err(|_| anyhow!(\"Invalid number '{}' in: {}\", chunk[0], original_input))?;\n\n match chunk[1].to_lowercase().as_str() {\n \"day\" | \"days\" => Ok(Duration::days(num)),\n \"hour\" | \"hours\" => Ok(Duration::hours(num)),\n \"minute\" | \"minutes\" => Ok(Duration::minutes(num)),\n \"second\" | \"seconds\" => Ok(Duration::seconds(num)),\n \"millisecond\" | \"milliseconds\" => Ok(Duration::milliseconds(num)),\n \"microsecond\" | \"microseconds\" => Ok(Duration::microseconds(num)),\n _ => bail!(\"Invalid unit '{}' in: {}\", chunk[1], original_input),\n }\n })\n .collect();\n\n durations.map(|durs| durs.into_iter().sum())\n}\n\n/// Parses a duration string into a `chrono::Duration`, trying ISO 8601 first, then human-readable format.\npub fn parse_duration(s: &str) -> Result {\n let original_input = s;\n let s = s.trim();\n if s.is_empty() {\n bail!(\"Empty duration string\");\n }\n\n let is_likely_iso8601 = match s.as_bytes() {\n [c, ..] if c.eq_ignore_ascii_case(&b'P') => true,\n [b'-', c, ..] if c.eq_ignore_ascii_case(&b'P') => true,\n _ => false,\n };\n\n if is_likely_iso8601 {\n parse_iso8601_duration(s, original_input)\n } else {\n parse_human_readable_duration(s, original_input)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n fn check_ok(res: Result, expected: Duration, input_str: &str) {\n match res {\n Ok(duration) => assert_eq!(duration, expected, \"Input: '{input_str}'\"),\n Err(e) => panic!(\"Input: '{input_str}', expected Ok({expected:?}), but got Err: {e}\"),\n }\n }\n\n fn check_err_contains(res: Result, expected_substring: &str, input_str: &str) {\n match res {\n Ok(d) => panic!(\n \"Input: '{input_str}', expected error containing '{expected_substring}', but got Ok({d:?})\"\n ),\n Err(e) => {\n let err_msg = e.to_string();\n assert!(\n err_msg.contains(expected_substring),\n \"Input: '{input_str}', error message '{err_msg}' does not contain expected substring '{expected_substring}'\"\n );\n }\n }\n }\n\n #[test]\n fn test_empty_string() {\n check_err_contains(parse_duration(\"\"), \"Empty duration string\", \"\\\"\\\"\");\n }\n\n #[test]\n fn test_whitespace_string() {\n check_err_contains(parse_duration(\" \"), \"Empty duration string\", \"\\\" \\\"\");\n }\n\n #[test]\n fn test_iso_just_p() {\n check_err_contains(parse_duration(\"P\"), \"No components in duration: P\", \"\\\"P\\\"\");\n }\n\n #[test]\n fn test_iso_pt() {\n check_err_contains(\n parse_duration(\"PT\"),\n \"Time part present but no time components in: PT\",\n \"\\\"PT\\\"\",\n );\n }\n\n #[test]\n fn test_iso_missing_number_before_unit_in_date_part() {\n check_err_contains(parse_duration(\"PD\"), \"Expected number in: PD\", \"\\\"PD\\\"\");\n }\n #[test]\n fn test_iso_missing_number_before_unit_in_time_part() {\n check_err_contains(parse_duration(\"PTM\"), \"Expected number in: PTM\", \"\\\"PTM\\\"\");\n }\n\n #[test]\n fn test_iso_time_unit_without_t() {\n check_err_contains(parse_duration(\"P1H\"), \"Invalid unit 'H' in: P1H\", \"\\\"P1H\\\"\");\n check_err_contains(parse_duration(\"P1S\"), \"Invalid unit 'S' in: P1S\", \"\\\"P1S\\\"\");\n }\n\n #[test]\n fn test_iso_invalid_unit() {\n check_err_contains(parse_duration(\"P1X\"), \"Invalid unit 'X' in: P1X\", \"\\\"P1X\\\"\");\n check_err_contains(\n parse_duration(\"PT1X\"),\n \"Invalid unit 'X' in: PT1X\",\n \"\\\"PT1X\\\"\",\n );\n }\n\n #[test]\n fn test_iso_valid_lowercase_unit_is_not_allowed() {\n check_err_contains(\n parse_duration(\"p1h\"),\n \"Duration must start with 'P' in: p1h\",\n \"\\\"p1h\\\"\",\n );\n check_err_contains(\n parse_duration(\"PT1h\"),\n \"Invalid unit 'h' in: PT1h\",\n \"\\\"PT1h\\\"\",\n );\n }\n\n #[test]\n fn test_iso_trailing_number_error() {\n check_err_contains(\n parse_duration(\"P1D2\"),\n \"Missing unit after number '2' in: P1D2\",\n \"\\\"P1D2\\\"\",\n );\n }\n\n #[test]\n fn test_iso_invalid_fractional_format() {\n check_err_contains(\n parse_duration(\"PT1..5S\"),\n \"Invalid unit '.' in: PT1..5S\",\n \"\\\"PT1..5S\\\"\",\n );\n check_err_contains(\n parse_duration(\"PT1.5.5S\"),\n \"Invalid unit '.' in: PT1.5.5S\",\n \"\\\"PT1.5.5S\\\"\",\n );\n check_err_contains(\n parse_duration(\"P1..5D\"),\n \"Invalid unit '.' in: P1..5D\",\n \"\\\"P1..5D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_misplaced_t() {\n check_err_contains(\n parse_duration(\"P1DT2H T3M\"),\n \"Expected number in: P1DT2H T3M\",\n \"\\\"P1DT2H T3M\\\"\",\n );\n check_err_contains(\n parse_duration(\"P1T2H\"),\n \"Missing unit after number '1' in: P1T2H\",\n \"\\\"P1T2H\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_number_after_p() {\n check_err_contains(\n parse_duration(\"P-1D\"),\n \"Expected number in: P-1D\",\n \"\\\"P-1D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_valid_months() {\n check_ok(parse_duration(\"P1M\"), Duration::days(30), \"\\\"P1M\\\"\");\n check_ok(parse_duration(\" P13M\"), Duration::days(13 * 30), \"\\\"P13M\\\"\");\n }\n\n #[test]\n fn test_iso_valid_weeks() {\n check_ok(parse_duration(\"P1W\"), Duration::days(7), \"\\\"P1W\\\"\");\n check_ok(parse_duration(\" P1W \"), Duration::days(7), \"\\\"P1W\\\"\");\n }\n\n #[test]\n fn test_iso_valid_days() {\n check_ok(parse_duration(\"P1D\"), Duration::days(1), \"\\\"P1D\\\"\");\n }\n\n #[test]\n fn test_iso_valid_hours() {\n check_ok(parse_duration(\"PT2H\"), Duration::hours(2), \"\\\"PT2H\\\"\");\n }\n\n #[test]\n fn test_iso_valid_minutes() {\n check_ok(parse_duration(\"PT3M\"), Duration::minutes(3), \"\\\"PT3M\\\"\");\n }\n\n #[test]\n fn test_iso_valid_seconds() {\n check_ok(parse_duration(\"PT4S\"), Duration::seconds(4), \"\\\"PT4S\\\"\");\n }\n\n #[test]\n fn test_iso_combined_units() {\n check_ok(\n parse_duration(\"P1Y2M3W4DT5H6M7S\"),\n Duration::days(365 + 60 + 3 * 7 + 4)\n + Duration::hours(5)\n + Duration::minutes(6)\n + Duration::seconds(7),\n \"\\\"P1Y2M3DT4H5M6S\\\"\",\n );\n check_ok(\n parse_duration(\"P1DT2H3M4S\"),\n Duration::days(1) + Duration::hours(2) + Duration::minutes(3) + Duration::seconds(4),\n \"\\\"P1DT2H3M4S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_duplicated_unit() {\n check_ok(parse_duration(\"P1D1D\"), Duration::days(2), \"\\\"P1D1D\\\"\");\n check_ok(parse_duration(\"PT1H1H\"), Duration::hours(2), \"\\\"PT1H1H\\\"\");\n }\n\n #[test]\n fn test_iso_out_of_order_unit() {\n check_ok(\n parse_duration(\"P1W1Y\"),\n Duration::days(365 + 7),\n \"\\\"P1W1Y\\\"\",\n );\n check_ok(\n parse_duration(\"PT2S1H\"),\n Duration::hours(1) + Duration::seconds(2),\n \"\\\"PT2S1H\\\"\",\n );\n check_ok(parse_duration(\"P3M\"), Duration::days(90), \"\\\"PT2S1H\\\"\");\n check_ok(parse_duration(\"PT3M\"), Duration::minutes(3), \"\\\"PT2S1H\\\"\");\n check_err_contains(\n parse_duration(\"P1H2D\"),\n \"Invalid unit 'H' in: P1H2D\", // Time part without 'T' is invalid\n \"\\\"P1H2D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_duration_p1d() {\n check_ok(parse_duration(\"-P1D\"), -Duration::days(1), \"\\\"-P1D\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pd0() {\n check_ok(parse_duration(\"P0D\"), Duration::zero(), \"\\\"P0D\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pt0s() {\n check_ok(parse_duration(\"PT0S\"), Duration::zero(), \"\\\"PT0S\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pt0h0m0s() {\n check_ok(parse_duration(\"PT0H0M0S\"), Duration::zero(), \"\\\"PT0H0M0S\\\"\");\n }\n\n #[test]\n fn test_iso_fractional_seconds() {\n check_ok(\n parse_duration(\"PT1.5S\"),\n Duration::seconds(1) + Duration::milliseconds(500),\n \"\\\"PT1.5S\\\"\",\n );\n check_ok(\n parse_duration(\"PT441010.456123S\"),\n Duration::seconds(441010) + Duration::microseconds(456123),\n \"\\\"PT441010.456123S\\\"\",\n );\n check_ok(\n parse_duration(\"PT0.000001S\"),\n Duration::microseconds(1),\n \"\\\"PT0.000001S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_fractional_date_units() {\n check_ok(\n parse_duration(\"P1.5D\"),\n Duration::microseconds((1.5 * 86_400_000_000.0) as i64),\n \"\\\"P1.5D\\\"\",\n );\n check_ok(\n parse_duration(\"P1.25Y\"),\n Duration::microseconds((1.25 * 365.0 * 86_400_000_000.0) as i64),\n \"\\\"P1.25Y\\\"\",\n );\n check_ok(\n parse_duration(\"P2.75M\"),\n Duration::microseconds((2.75 * 30.0 * 86_400_000_000.0) as i64),\n \"\\\"P2.75M\\\"\",\n );\n check_ok(\n parse_duration(\"P0.5W\"),\n Duration::microseconds((0.5 * 7.0 * 86_400_000_000.0) as i64),\n \"\\\"P0.5W\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_fractional_date_units() {\n check_ok(\n parse_duration(\"-P1.5D\"),\n -Duration::microseconds((1.5 * 86_400_000_000.0) as i64),\n \"\\\"-P1.5D\\\"\",\n );\n check_ok(\n parse_duration(\"-P0.25Y\"),\n -Duration::microseconds((0.25 * 365.0 * 86_400_000_000.0) as i64),\n \"\\\"-P0.25Y\\\"\",\n );\n }\n\n #[test]\n fn test_iso_combined_fractional_units() {\n check_ok(\n parse_duration(\"P1.5DT2.5H3.5M4.5S\"),\n Duration::microseconds((1.5 * 86_400_000_000.0) as i64)\n + Duration::microseconds((2.5 * 3_600_000_000.0) as i64)\n + Duration::microseconds((3.5 * 60_000_000.0) as i64)\n + Duration::seconds(4)\n + Duration::milliseconds(500),\n \"\\\"1.5DT2.5H3.5M4.5S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_multiple_fractional_time_units() {\n check_ok(\n parse_duration(\"PT1.5S2.5S\"),\n Duration::seconds(1 + 2) + Duration::milliseconds(500) + Duration::milliseconds(500),\n \"\\\"PT1.5S2.5S\\\"\",\n );\n check_ok(\n parse_duration(\"PT1.1H2.2M3.3S\"),\n Duration::hours(1)\n + Duration::seconds((0.1 * 3600.0) as i64)\n + Duration::minutes(2)\n + Duration::seconds((0.2 * 60.0) as i64)\n + Duration::seconds(3)\n + Duration::milliseconds(300),\n \"\\\"PT1.1H2.2M3.3S\\\"\",\n );\n }\n\n // Human-readable Tests\n #[test]\n fn test_human_missing_unit() {\n check_err_contains(\n parse_duration(\"1\"),\n \"Invalid human-readable duration format in: 1\",\n \"\\\"1\\\"\",\n );\n }\n\n #[test]\n fn test_human_missing_number() {\n check_err_contains(\n parse_duration(\"day\"),\n \"Invalid human-readable duration format in: day\",\n \"\\\"day\\\"\",\n );\n }\n\n #[test]\n fn test_human_incomplete_pair() {\n check_err_contains(\n parse_duration(\"1 day 2\"),\n \"Invalid human-readable duration format in: 1 day 2\",\n \"\\\"1 day 2\\\"\",\n );\n }\n\n #[test]\n fn test_human_invalid_number_at_start() {\n check_err_contains(\n parse_duration(\"one day\"),\n \"Invalid number 'one' in: one day\",\n \"\\\"one day\\\"\",\n );\n }\n\n #[test]\n fn test_human_invalid_unit() {\n check_err_contains(\n parse_duration(\"1 hour 2 minutes 3 seconds four seconds\"),\n \"Invalid number 'four' in: 1 hour 2 minutes 3 seconds four seconds\",\n \"\\\"1 hour 2 minutes 3 seconds four seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_float_number_fail() {\n check_err_contains(\n parse_duration(\"1.5 hours\"),\n \"Invalid number '1.5' in: 1.5 hours\",\n \"\\\"1.5 hours\\\"\",\n );\n }\n\n #[test]\n fn test_invalid_human_readable_no_pairs() {\n check_err_contains(\n parse_duration(\"just some words\"),\n \"Invalid human-readable duration format in: just some words\",\n \"\\\"just some words\\\"\",\n );\n }\n\n #[test]\n fn test_human_unknown_unit() {\n check_err_contains(\n parse_duration(\"1 year\"),\n \"Invalid unit 'year' in: 1 year\",\n \"\\\"1 year\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_day() {\n check_ok(parse_duration(\"1 day\"), Duration::days(1), \"\\\"1 day\\\"\");\n }\n\n #[test]\n fn test_human_valid_days_uppercase() {\n check_ok(parse_duration(\"2 DAYS\"), Duration::days(2), \"\\\"2 DAYS\\\"\");\n }\n\n #[test]\n fn test_human_valid_hour() {\n check_ok(parse_duration(\"3 hour\"), Duration::hours(3), \"\\\"3 hour\\\"\");\n }\n\n #[test]\n fn test_human_valid_hours_mixedcase() {\n check_ok(parse_duration(\"4 HoUrS\"), Duration::hours(4), \"\\\"4 HoUrS\\\"\");\n }\n\n #[test]\n fn test_human_valid_minute() {\n check_ok(\n parse_duration(\"5 minute\"),\n Duration::minutes(5),\n \"\\\"5 minute\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_minutes() {\n check_ok(\n parse_duration(\"6 minutes\"),\n Duration::minutes(6),\n \"\\\"6 minutes\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_second() {\n check_ok(\n parse_duration(\"7 second\"),\n Duration::seconds(7),\n \"\\\"7 second\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_seconds() {\n check_ok(\n parse_duration(\"8 seconds\"),\n Duration::seconds(8),\n \"\\\"8 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_millisecond() {\n check_ok(\n parse_duration(\"9 millisecond\"),\n Duration::milliseconds(9),\n \"\\\"9 millisecond\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_milliseconds() {\n check_ok(\n parse_duration(\"10 milliseconds\"),\n Duration::milliseconds(10),\n \"\\\"10 milliseconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_microsecond() {\n check_ok(\n parse_duration(\"11 microsecond\"),\n Duration::microseconds(11),\n \"\\\"11 microsecond\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_microseconds() {\n check_ok(\n parse_duration(\"12 microseconds\"),\n Duration::microseconds(12),\n \"\\\"12 microseconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_combined() {\n let expected =\n Duration::days(1) + Duration::hours(2) + Duration::minutes(3) + Duration::seconds(4);\n check_ok(\n parse_duration(\"1 day 2 hours 3 minutes 4 seconds\"),\n expected,\n \"\\\"1 day 2 hours 3 minutes 4 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_out_of_order() {\n check_ok(\n parse_duration(\"1 second 2 hours\"),\n Duration::hours(2) + Duration::seconds(1),\n \"\\\"1 second 2 hours\\\"\",\n );\n check_ok(\n parse_duration(\"7 minutes 6 hours 5 days\"),\n Duration::days(5) + Duration::hours(6) + Duration::minutes(7),\n \"\\\"7 minutes 6 hours 5 days\\\"\",\n )\n }\n\n #[test]\n fn test_human_zero_duration_seconds() {\n check_ok(\n parse_duration(\"0 seconds\"),\n Duration::zero(),\n \"\\\"0 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_zero_duration_days_hours() {\n check_ok(\n parse_duration(\"0 day 0 hour\"),\n Duration::zero(),\n \"\\\"0 day 0 hour\\\"\",\n );\n }\n\n #[test]\n fn test_human_zero_duration_multiple_zeros() {\n check_ok(\n parse_duration(\"0 days 0 hours 0 minutes 0 seconds\"),\n Duration::zero(),\n \"\\\"0 days 0 hours 0 minutes 0 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_no_space_between_num_unit() {\n check_err_contains(\n parse_duration(\"1day\"),\n \"Invalid human-readable duration format in: 1day\",\n \"\\\"1day\\\"\",\n );\n }\n\n #[test]\n fn test_human_trimmed() {\n check_ok(parse_duration(\" 1 day \"), Duration::days(1), \"\\\" 1 day \\\"\");\n }\n\n #[test]\n fn test_human_extra_whitespace() {\n check_ok(\n parse_duration(\" 1 day 2 hours \"),\n Duration::days(1) + Duration::hours(2),\n \"\\\" 1 day 2 hours \\\"\",\n );\n }\n\n #[test]\n fn test_human_negative_numbers() {\n check_ok(\n parse_duration(\"-1 day 2 hours\"),\n Duration::days(-1) + Duration::hours(2),\n \"\\\"-1 day 2 hours\\\"\",\n );\n check_ok(\n parse_duration(\"1 day -2 hours\"),\n Duration::days(1) + Duration::hours(-2),\n \"\\\"1 day -2 hours\\\"\",\n );\n }\n}\n"], ["/cocoindex/src/ops/sdk.rs", "pub(crate) use crate::prelude::*;\n\nuse crate::builder::plan::AnalyzedFieldReference;\nuse crate::builder::plan::AnalyzedLocalFieldReference;\nuse std::collections::BTreeMap;\n\npub use super::factory_bases::*;\npub use super::interface::*;\npub use crate::base::schema::*;\npub use crate::base::spec::*;\npub use crate::base::value::*;\n\n// Disambiguate the ExportTargetBuildOutput type.\npub use super::factory_bases::TypedExportDataCollectionBuildOutput;\npub use super::registry::ExecutorFactoryRegistry;\n/// Defined for all types convertible to ValueType, to ease creation for ValueType in various operation factories.\npub trait TypeCore {\n fn into_type(self) -> ValueType;\n}\n\nimpl TypeCore for BasicValueType {\n fn into_type(self) -> ValueType {\n ValueType::Basic(self)\n }\n}\n\nimpl TypeCore for StructSchema {\n fn into_type(self) -> ValueType {\n ValueType::Struct(self)\n }\n}\n\nimpl TypeCore for TableSchema {\n fn into_type(self) -> ValueType {\n ValueType::Table(self)\n }\n}\n\npub fn make_output_type(value_type: Type) -> EnrichedValueType {\n EnrichedValueType {\n typ: value_type.into_type(),\n attrs: Default::default(),\n nullable: false,\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct EmptySpec {}\n\n#[macro_export]\nmacro_rules! fields_value {\n ($($field:expr), +) => {\n $crate::base::value::FieldValues { fields: std::vec![ $(($field).into()),+ ] }\n };\n}\n\npub struct SchemaBuilderFieldRef(AnalyzedLocalFieldReference);\n\nimpl SchemaBuilderFieldRef {\n pub fn to_field_ref(&self) -> AnalyzedFieldReference {\n AnalyzedFieldReference {\n local: self.0.clone(),\n scope_up_level: 0,\n }\n }\n}\npub struct StructSchemaBuilder<'a> {\n base_fields_idx: Vec,\n target: &'a mut StructSchema,\n}\n\nimpl<'a> StructSchemaBuilder<'a> {\n pub fn new(target: &'a mut StructSchema) -> Self {\n Self {\n base_fields_idx: Vec::new(),\n target,\n }\n }\n\n pub fn _set_description(&mut self, description: impl Into>) {\n self.target.description = Some(description.into());\n }\n\n pub fn add_field(&mut self, field_schema: FieldSchema) -> SchemaBuilderFieldRef {\n let current_idx = self.target.fields.len() as u32;\n Arc::make_mut(&mut self.target.fields).push(field_schema);\n let mut fields_idx = self.base_fields_idx.clone();\n fields_idx.push(current_idx);\n SchemaBuilderFieldRef(AnalyzedLocalFieldReference { fields_idx })\n }\n\n pub fn _add_struct_field(\n &mut self,\n name: impl Into,\n nullable: bool,\n attrs: Arc>,\n ) -> (StructSchemaBuilder<'_>, SchemaBuilderFieldRef) {\n let field_schema = FieldSchema::new(\n name.into(),\n EnrichedValueType {\n typ: ValueType::Struct(StructSchema {\n fields: Arc::new(Vec::new()),\n description: None,\n }),\n nullable,\n attrs,\n },\n );\n let local_ref = self.add_field(field_schema);\n let struct_schema = match &mut Arc::make_mut(&mut self.target.fields)\n .last_mut()\n .unwrap()\n .value_type\n .typ\n {\n ValueType::Struct(s) => s,\n _ => unreachable!(),\n };\n (\n StructSchemaBuilder {\n base_fields_idx: local_ref.0.fields_idx.clone(),\n target: struct_schema,\n },\n local_ref,\n )\n }\n}\n"], ["/cocoindex/src/utils/concur_control.rs", "use crate::prelude::*;\n\nuse tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore};\n\nstruct WeightedSemaphore {\n downscale_factor: u8,\n downscaled_quota: u32,\n sem: Arc,\n}\n\nimpl WeightedSemaphore {\n pub fn new(quota: usize) -> Self {\n let mut downscale_factor = 0;\n let mut downscaled_quota = quota;\n while downscaled_quota > u32::MAX as usize {\n downscaled_quota >>= 1;\n downscale_factor += 1;\n }\n let sem = Arc::new(Semaphore::new(downscaled_quota));\n Self {\n downscaled_quota: downscaled_quota as u32,\n downscale_factor,\n sem,\n }\n }\n\n async fn acquire_reservation(&self) -> Result {\n self.sem.clone().acquire_owned().await\n }\n\n async fn acquire(\n &self,\n weight: usize,\n reserved: bool,\n ) -> Result, AcquireError> {\n let downscaled_weight = (weight >> self.downscale_factor) as u32;\n let capped_weight = downscaled_weight.min(self.downscaled_quota);\n let reserved_weight = if reserved { 1 } else { 0 };\n if reserved_weight >= capped_weight {\n return Ok(None);\n }\n Ok(Some(\n self.sem\n .clone()\n .acquire_many_owned(capped_weight - reserved_weight)\n .await?,\n ))\n }\n}\n\npub struct Options {\n pub max_inflight_rows: Option,\n pub max_inflight_bytes: Option,\n}\n\npub struct ConcurrencyControllerPermit {\n _inflight_count_permit: Option,\n _inflight_bytes_permit: Option,\n}\n\npub struct ConcurrencyController {\n inflight_count_sem: Option>,\n inflight_bytes_sem: Option,\n}\n\npub static BYTES_UNKNOWN_YET: Option usize> = None;\n\nimpl ConcurrencyController {\n pub fn new(exec_options: &Options) -> Self {\n Self {\n inflight_count_sem: exec_options\n .max_inflight_rows\n .map(|max| Arc::new(Semaphore::new(max))),\n inflight_bytes_sem: exec_options.max_inflight_bytes.map(WeightedSemaphore::new),\n }\n }\n\n /// If `bytes_fn` is `None`, it means the number of bytes is not known yet.\n /// The controller will reserve a minimum number of bytes.\n /// The caller should call `acquire_bytes_with_reservation` with the actual number of bytes later.\n pub async fn acquire(\n &self,\n bytes_fn: Option usize>,\n ) -> Result {\n let inflight_count_permit = if let Some(sem) = &self.inflight_count_sem {\n Some(sem.clone().acquire_owned().await?)\n } else {\n None\n };\n let inflight_bytes_permit = if let Some(sem) = &self.inflight_bytes_sem {\n if let Some(bytes_fn) = bytes_fn {\n sem.acquire(bytes_fn(), false).await?\n } else {\n Some(sem.acquire_reservation().await?)\n }\n } else {\n None\n };\n Ok(ConcurrencyControllerPermit {\n _inflight_count_permit: inflight_count_permit,\n _inflight_bytes_permit: inflight_bytes_permit,\n })\n }\n\n pub async fn acquire_bytes_with_reservation(\n &self,\n bytes_fn: impl FnOnce() -> usize,\n ) -> Result, AcquireError> {\n if let Some(sem) = &self.inflight_bytes_sem {\n sem.acquire(bytes_fn(), true).await\n } else {\n Ok(None)\n }\n }\n}\n\npub struct CombinedConcurrencyControllerPermit {\n _permit: ConcurrencyControllerPermit,\n _global_permit: ConcurrencyControllerPermit,\n}\n\npub struct CombinedConcurrencyController {\n controller: ConcurrencyController,\n global_controller: Arc,\n needs_num_bytes: bool,\n}\n\nimpl CombinedConcurrencyController {\n pub fn new(exec_options: &Options, global_controller: Arc) -> Self {\n Self {\n controller: ConcurrencyController::new(exec_options),\n needs_num_bytes: exec_options.max_inflight_bytes.is_some()\n || global_controller.inflight_bytes_sem.is_some(),\n global_controller,\n }\n }\n\n pub async fn acquire(\n &self,\n bytes_fn: Option usize>,\n ) -> Result {\n let num_bytes_fn = if let Some(bytes_fn) = bytes_fn\n && self.needs_num_bytes\n {\n let num_bytes = bytes_fn();\n Some(move || num_bytes)\n } else {\n None\n };\n\n let permit = self.controller.acquire(num_bytes_fn).await?;\n let global_permit = self.global_controller.acquire(num_bytes_fn).await?;\n Ok(CombinedConcurrencyControllerPermit {\n _permit: permit,\n _global_permit: global_permit,\n })\n }\n\n pub async fn acquire_bytes_with_reservation(\n &self,\n bytes_fn: impl FnOnce() -> usize,\n ) -> Result<(Option, Option), AcquireError> {\n let num_bytes = bytes_fn();\n let permit = self\n .controller\n .acquire_bytes_with_reservation(move || num_bytes)\n .await?;\n let global_permit = self\n .global_controller\n .acquire_bytes_with_reservation(move || num_bytes)\n .await?;\n Ok((permit, global_permit))\n }\n}\n"], ["/cocoindex/src/utils/retryable.rs", "use log::trace;\nuse std::{future::Future, time::Duration};\n\npub trait IsRetryable {\n fn is_retryable(&self) -> bool;\n}\n\npub struct Error {\n error: anyhow::Error,\n is_retryable: bool,\n}\n\nimpl std::fmt::Display for Error {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Display::fmt(&self.error, f)\n }\n}\n\nimpl std::fmt::Debug for Error {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Debug::fmt(&self.error, f)\n }\n}\n\nimpl IsRetryable for Error {\n fn is_retryable(&self) -> bool {\n self.is_retryable\n }\n}\n\nimpl IsRetryable for reqwest::Error {\n fn is_retryable(&self) -> bool {\n self.status() == Some(reqwest::StatusCode::TOO_MANY_REQUESTS)\n }\n}\n\nimpl Error {\n pub fn always_retryable(error: anyhow::Error) -> Self {\n Self {\n error,\n is_retryable: true,\n }\n }\n}\n\nimpl From for Error {\n fn from(error: anyhow::Error) -> Self {\n Self {\n error,\n is_retryable: false,\n }\n }\n}\n\nimpl From for anyhow::Error {\n fn from(val: Error) -> Self {\n val.error\n }\n}\n\nimpl From for Error {\n fn from(error: E) -> Self {\n Self {\n is_retryable: error.is_retryable(),\n error: anyhow::Error::new(error),\n }\n }\n}\n\npub type Result = std::result::Result;\n\n#[allow(non_snake_case)]\npub fn Ok(value: T) -> Result {\n Result::Ok(value)\n}\n\npub struct RetryOptions {\n pub max_retries: Option,\n pub initial_backoff: Duration,\n pub max_backoff: Duration,\n}\n\nimpl Default for RetryOptions {\n fn default() -> Self {\n Self {\n max_retries: Some(10),\n initial_backoff: Duration::from_millis(100),\n max_backoff: Duration::from_secs(10),\n }\n }\n}\n\npub static HEAVY_LOADED_OPTIONS: RetryOptions = RetryOptions {\n max_retries: Some(10),\n initial_backoff: Duration::from_secs(1),\n max_backoff: Duration::from_secs(60),\n};\n\npub async fn run<\n Ok,\n Err: std::fmt::Display + IsRetryable,\n Fut: Future>,\n F: Fn() -> Fut,\n>(\n f: F,\n options: &RetryOptions,\n) -> Result {\n let mut retries = 0;\n let mut backoff = options.initial_backoff;\n\n loop {\n match f().await {\n Result::Ok(result) => return Result::Ok(result),\n Result::Err(err) => {\n if !err.is_retryable()\n || options\n .max_retries\n .is_some_and(|max_retries| retries >= max_retries)\n {\n return Result::Err(err);\n }\n retries += 1;\n trace!(\n \"Will retry #{} in {}ms for error: {}\",\n retries,\n backoff.as_millis(),\n err\n );\n tokio::time::sleep(backoff).await;\n if backoff < options.max_backoff {\n backoff = std::cmp::min(\n Duration::from_micros(\n (backoff.as_micros() * rand::random_range(1618..=2000) / 1000) as u64,\n ),\n options.max_backoff,\n );\n }\n }\n }\n }\n}\n"], ["/cocoindex/src/utils/fingerprint.rs", "use anyhow::bail;\nuse base64::prelude::*;\nuse blake2::digest::typenum;\nuse blake2::{Blake2b, Digest};\nuse serde::Deserialize;\nuse serde::ser::{\n Serialize, SerializeMap, SerializeSeq, SerializeStruct, SerializeStructVariant, SerializeTuple,\n SerializeTupleStruct, SerializeTupleVariant, Serializer,\n};\n\n#[derive(Debug)]\npub struct FingerprinterError {\n msg: String,\n}\n\nimpl std::fmt::Display for FingerprinterError {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"FingerprinterError: {}\", self.msg)\n }\n}\nimpl std::error::Error for FingerprinterError {}\nimpl serde::ser::Error for FingerprinterError {\n fn custom(msg: T) -> Self\n where\n T: std::fmt::Display,\n {\n FingerprinterError {\n msg: format!(\"{msg}\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\npub struct Fingerprint(pub [u8; 16]);\n\nimpl Fingerprint {\n pub fn to_base64(self) -> String {\n BASE64_STANDARD.encode(self.0)\n }\n\n pub fn from_base64(s: &str) -> anyhow::Result {\n let bytes = match s.len() {\n 24 => BASE64_STANDARD.decode(s)?,\n\n // For backward compatibility. Some old version (<= v0.1.2) is using hex encoding.\n 32 => hex::decode(s)?,\n _ => bail!(\"Encoded fingerprint length is unexpected: {}\", s.len()),\n };\n match bytes.try_into() {\n Ok(bytes) => Ok(Fingerprint(bytes)),\n Err(e) => bail!(\"Fingerprint bytes length is unexpected: {}\", e.len()),\n }\n }\n}\n\nimpl Serialize for Fingerprint {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n serializer.serialize_str(&self.to_base64())\n }\n}\n\nimpl<'de> Deserialize<'de> for Fingerprint {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let s = String::deserialize(deserializer)?;\n Self::from_base64(&s).map_err(serde::de::Error::custom)\n }\n}\n#[derive(Clone, Default)]\npub struct Fingerprinter {\n hasher: Blake2b,\n}\n\nimpl Fingerprinter {\n pub fn into_fingerprint(self) -> Fingerprint {\n Fingerprint(self.hasher.finalize().into())\n }\n\n pub fn with(self, value: &S) -> Result {\n let mut fingerprinter = self;\n value.serialize(&mut fingerprinter)?;\n Ok(fingerprinter)\n }\n\n pub fn write(&mut self, value: &S) -> Result<(), FingerprinterError> {\n value.serialize(self)\n }\n\n fn write_type_tag(&mut self, tag: &str) {\n self.hasher.update(tag.as_bytes());\n self.hasher.update(b\";\");\n }\n\n fn write_end_tag(&mut self) {\n self.hasher.update(b\".\");\n }\n\n fn write_varlen_bytes(&mut self, bytes: &[u8]) {\n self.write_usize(bytes.len());\n self.hasher.update(bytes);\n }\n\n fn write_usize(&mut self, value: usize) {\n self.hasher.update((value as u32).to_le_bytes());\n }\n}\n\nimpl Serializer for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n type SerializeSeq = Self;\n type SerializeTuple = Self;\n type SerializeTupleStruct = Self;\n type SerializeTupleVariant = Self;\n type SerializeMap = Self;\n type SerializeStruct = Self;\n type SerializeStructVariant = Self;\n\n fn serialize_bool(self, v: bool) -> Result<(), Self::Error> {\n self.write_type_tag(if v { \"t\" } else { \"f\" });\n Ok(())\n }\n\n fn serialize_i8(self, v: i8) -> Result<(), Self::Error> {\n self.write_type_tag(\"i1\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i16(self, v: i16) -> Result<(), Self::Error> {\n self.write_type_tag(\"i2\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i32(self, v: i32) -> Result<(), Self::Error> {\n self.write_type_tag(\"i4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i64(self, v: i64) -> Result<(), Self::Error> {\n self.write_type_tag(\"i8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u8(self, v: u8) -> Result<(), Self::Error> {\n self.write_type_tag(\"u1\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u16(self, v: u16) -> Result<(), Self::Error> {\n self.write_type_tag(\"u2\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u32(self, v: u32) -> Result<(), Self::Error> {\n self.write_type_tag(\"u4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u64(self, v: u64) -> Result<(), Self::Error> {\n self.write_type_tag(\"u8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_f32(self, v: f32) -> Result<(), Self::Error> {\n self.write_type_tag(\"f4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_f64(self, v: f64) -> Result<(), Self::Error> {\n self.write_type_tag(\"f8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_char(self, v: char) -> Result<(), Self::Error> {\n self.write_type_tag(\"c\");\n self.write_usize(v as usize);\n Ok(())\n }\n\n fn serialize_str(self, v: &str) -> Result<(), Self::Error> {\n self.write_type_tag(\"s\");\n self.write_varlen_bytes(v.as_bytes());\n Ok(())\n }\n\n fn serialize_bytes(self, v: &[u8]) -> Result<(), Self::Error> {\n self.write_type_tag(\"b\");\n self.write_varlen_bytes(v);\n Ok(())\n }\n\n fn serialize_none(self) -> Result<(), Self::Error> {\n self.write_type_tag(\"\");\n Ok(())\n }\n\n fn serialize_some(self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(self)\n }\n\n fn serialize_unit(self) -> Result<(), Self::Error> {\n self.write_type_tag(\"()\");\n Ok(())\n }\n\n fn serialize_unit_struct(self, name: &'static str) -> Result<(), Self::Error> {\n self.write_type_tag(\"US\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(())\n }\n\n fn serialize_unit_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n ) -> Result<(), Self::Error> {\n self.write_type_tag(\"UV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(())\n }\n\n fn serialize_newtype_struct(self, name: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.write_type_tag(\"NS\");\n self.write_varlen_bytes(name.as_bytes());\n value.serialize(self)\n }\n\n fn serialize_newtype_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n value: &T,\n ) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.write_type_tag(\"NV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n value.serialize(self)\n }\n\n fn serialize_seq(self, _len: Option) -> Result {\n self.write_type_tag(\"L\");\n Ok(self)\n }\n\n fn serialize_tuple(self, _len: usize) -> Result {\n self.write_type_tag(\"T\");\n Ok(self)\n }\n\n fn serialize_tuple_struct(\n self,\n name: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"TS\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(self)\n }\n\n fn serialize_tuple_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"TV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(self)\n }\n\n fn serialize_map(self, _len: Option) -> Result {\n self.write_type_tag(\"M\");\n Ok(self)\n }\n\n fn serialize_struct(\n self,\n name: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"S\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(self)\n }\n\n fn serialize_struct_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"SV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(self)\n }\n}\n\nimpl SerializeSeq for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTuple for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTupleStruct for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTupleVariant for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeMap for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n key.serialize(&mut **self)\n }\n\n fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeStruct for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.hasher.update(key.as_bytes());\n self.hasher.update(b\"\\n\");\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeStructVariant for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.hasher.update(key.as_bytes());\n self.hasher.update(b\"\\n\");\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n"], ["/cocoindex/src/server.rs", "use crate::prelude::*;\n\nuse crate::{lib_context::LibContext, service};\nuse axum::{Router, routing};\nuse tower::ServiceBuilder;\nuse tower_http::{\n cors::{AllowOrigin, CorsLayer},\n trace::TraceLayer,\n};\n\n#[derive(Deserialize, Debug)]\npub struct ServerSettings {\n pub address: String,\n #[serde(default)]\n pub cors_origins: Vec,\n}\n\n/// Initialize the server and return a future that will actually handle requests.\npub async fn init_server(\n lib_context: Arc,\n settings: ServerSettings,\n) -> Result> {\n let mut cors = CorsLayer::default();\n if !settings.cors_origins.is_empty() {\n let origins: Vec<_> = settings\n .cors_origins\n .iter()\n .map(|origin| origin.parse())\n .collect::>()?;\n cors = cors\n .allow_origin(AllowOrigin::list(origins))\n .allow_methods([\n axum::http::Method::GET,\n axum::http::Method::POST,\n axum::http::Method::DELETE,\n ])\n .allow_headers([axum::http::header::CONTENT_TYPE]);\n }\n let app = Router::new()\n .route(\n \"/cocoindex\",\n routing::get(|| async { \"CocoIndex is running!\" }),\n )\n .nest(\n \"/cocoindex/api\",\n Router::new()\n .route(\"/flows\", routing::get(service::flows::list_flows))\n .route(\n \"/flows/{flowInstName}\",\n routing::get(service::flows::get_flow),\n )\n .route(\n \"/flows/{flowInstName}/schema\",\n routing::get(service::flows::get_flow_schema),\n )\n .route(\n \"/flows/{flowInstName}/keys\",\n routing::get(service::flows::get_keys),\n )\n .route(\n \"/flows/{flowInstName}/data\",\n routing::get(service::flows::evaluate_data),\n )\n .route(\n \"/flows/{flowInstName}/rowStatus\",\n routing::get(service::flows::get_row_indexing_status),\n )\n .route(\n \"/flows/{flowInstName}/update\",\n routing::post(service::flows::update),\n )\n .layer(\n ServiceBuilder::new()\n .layer(TraceLayer::new_for_http())\n .layer(cors),\n )\n .with_state(lib_context.clone()),\n );\n\n let listener = tokio::net::TcpListener::bind(&settings.address)\n .await\n .context(format!(\"Failed to bind to address: {}\", settings.address))?;\n\n println!(\n \"Server running at http://{}/cocoindex\",\n listener.local_addr()?\n );\n let serve_fut = async { axum::serve(listener, app).await.unwrap() };\n Ok(serve_fut.boxed())\n}\n"], ["/cocoindex/src/execution/indexing_status.rs", "use crate::prelude::*;\n\nuse super::db_tracking;\nuse super::evaluator;\nuse futures::try_join;\n\n#[derive(Debug, Serialize)]\npub struct SourceRowLastProcessedInfo {\n pub source_ordinal: interface::Ordinal,\n pub processing_time: Option>,\n pub is_logic_current: bool,\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceRowInfo {\n pub ordinal: interface::Ordinal,\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceRowIndexingStatus {\n pub last_processed: Option,\n pub current: Option,\n}\n\npub async fn get_source_row_indexing_status(\n src_eval_ctx: &evaluator::SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n pool: &sqlx::PgPool,\n) -> Result {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let last_processed_fut = db_tracking::read_source_last_processed_info(\n setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n );\n let current_fut = src_eval_ctx.import_op.executor.get_value(\n src_eval_ctx.key,\n &interface::SourceExecutorGetOptions {\n include_value: false,\n include_ordinal: true,\n },\n );\n let (last_processed, current) = try_join!(last_processed_fut, current_fut)?;\n\n let last_processed = last_processed.map(|l| SourceRowLastProcessedInfo {\n source_ordinal: interface::Ordinal(l.processed_source_ordinal),\n processing_time: l\n .process_time_micros\n .and_then(chrono::DateTime::::from_timestamp_micros),\n is_logic_current: Some(src_eval_ctx.plan.logic_fingerprint.0.as_slice())\n == l.process_logic_fingerprint.as_deref(),\n });\n let current = SourceRowInfo {\n ordinal: current\n .ordinal\n .ok_or(anyhow::anyhow!(\"Ordinal is unavailable for the source\"))?,\n };\n Ok(SourceRowIndexingStatus {\n last_processed,\n current: Some(current),\n })\n}\n"], ["/cocoindex/src/llm/mod.rs", "use crate::prelude::*;\n\nuse crate::base::json_schema::ToJsonSchemaOptions;\nuse infer::Infer;\nuse schemars::schema::SchemaObject;\nuse std::borrow::Cow;\n\nstatic INFER: LazyLock = LazyLock::new(Infer::new);\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub enum LlmApiType {\n Ollama,\n OpenAi,\n Gemini,\n Anthropic,\n LiteLlm,\n OpenRouter,\n Voyage,\n Vllm,\n VertexAi,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct VertexAiConfig {\n pub project: String,\n pub region: Option,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum LlmApiConfig {\n VertexAi(VertexAiConfig),\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct LlmSpec {\n pub api_type: LlmApiType,\n pub address: Option,\n pub model: String,\n pub api_config: Option,\n}\n\n#[derive(Debug)]\npub enum OutputFormat<'a> {\n JsonSchema {\n name: Cow<'a, str>,\n schema: Cow<'a, SchemaObject>,\n },\n}\n\n#[derive(Debug)]\npub struct LlmGenerateRequest<'a> {\n pub model: &'a str,\n pub system_prompt: Option>,\n pub user_prompt: Cow<'a, str>,\n pub image: Option>,\n pub output_format: Option>,\n}\n\n#[derive(Debug)]\npub struct LlmGenerateResponse {\n pub text: String,\n}\n\n#[async_trait]\npub trait LlmGenerationClient: Send + Sync {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result;\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions;\n}\n\n#[derive(Debug)]\npub struct LlmEmbeddingRequest<'a> {\n pub model: &'a str,\n pub text: Cow<'a, str>,\n pub output_dimension: Option,\n pub task_type: Option>,\n}\n\npub struct LlmEmbeddingResponse {\n pub embedding: Vec,\n}\n\n#[async_trait]\npub trait LlmEmbeddingClient: Send + Sync {\n async fn embed_text<'req>(\n &self,\n request: LlmEmbeddingRequest<'req>,\n ) -> Result;\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option;\n}\n\nmod anthropic;\nmod gemini;\nmod litellm;\nmod ollama;\nmod openai;\nmod openrouter;\nmod vllm;\nmod voyage;\n\npub async fn new_llm_generation_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::Anthropic => {\n Box::new(anthropic::Client::new(address).await?) as Box\n }\n LlmApiType::LiteLlm => {\n Box::new(litellm::Client::new_litellm(address).await?) as Box\n }\n LlmApiType::OpenRouter => Box::new(openrouter::Client::new_openrouter(address).await?)\n as Box,\n LlmApiType::Voyage => {\n api_bail!(\"Voyage is not supported for generation\")\n }\n LlmApiType::Vllm => {\n Box::new(vllm::Client::new_vllm(address).await?) as Box\n }\n };\n Ok(client)\n}\n\npub async fn new_llm_embedding_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Voyage => {\n Box::new(voyage::Client::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::OpenRouter | LlmApiType::LiteLlm | LlmApiType::Vllm | LlmApiType::Anthropic => {\n api_bail!(\"Embedding is not supported for API type {:?}\", api_type)\n }\n };\n Ok(client)\n}\n\npub fn detect_image_mime_type(bytes: &[u8]) -> Result<&'static str> {\n let infer = &*INFER;\n match infer.get(bytes) {\n Some(info) if info.mime_type().starts_with(\"image/\") => Ok(info.mime_type()),\n _ => bail!(\"Unknown or unsupported image format\"),\n }\n}\n"], ["/cocoindex/src/execution/stats.rs", "use crate::prelude::*;\n\nuse std::{\n ops::AddAssign,\n sync::atomic::{AtomicI64, Ordering::Relaxed},\n};\n\n#[derive(Default, Serialize)]\npub struct Counter(pub AtomicI64);\n\nimpl Counter {\n pub fn inc(&self, by: i64) {\n self.0.fetch_add(by, Relaxed);\n }\n\n pub fn get(&self) -> i64 {\n self.0.load(Relaxed)\n }\n\n pub fn delta(&self, base: &Self) -> Counter {\n Counter(AtomicI64::new(self.get() - base.get()))\n }\n\n pub fn into_inner(self) -> i64 {\n self.0.into_inner()\n }\n\n pub fn merge(&self, delta: &Self) {\n self.0.fetch_add(delta.get(), Relaxed);\n }\n}\n\nimpl AddAssign for Counter {\n fn add_assign(&mut self, rhs: Self) {\n self.0.fetch_add(rhs.into_inner(), Relaxed);\n }\n}\n\nimpl Clone for Counter {\n fn clone(&self) -> Self {\n Self(AtomicI64::new(self.get()))\n }\n}\n\nimpl std::fmt::Display for Counter {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.get())\n }\n}\n\nimpl std::fmt::Debug for Counter {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.get())\n }\n}\n\n#[derive(Debug, Serialize, Default, Clone)]\npub struct UpdateStats {\n pub num_no_change: Counter,\n pub num_insertions: Counter,\n pub num_deletions: Counter,\n /// Number of source rows that were updated.\n pub num_updates: Counter,\n /// Number of source rows that were reprocessed because of logic change.\n pub num_reprocesses: Counter,\n pub num_errors: Counter,\n}\n\nimpl UpdateStats {\n pub fn delta(&self, base: &Self) -> Self {\n UpdateStats {\n num_no_change: self.num_no_change.delta(&base.num_no_change),\n num_insertions: self.num_insertions.delta(&base.num_insertions),\n num_deletions: self.num_deletions.delta(&base.num_deletions),\n num_updates: self.num_updates.delta(&base.num_updates),\n num_reprocesses: self.num_reprocesses.delta(&base.num_reprocesses),\n num_errors: self.num_errors.delta(&base.num_errors),\n }\n }\n\n pub fn merge(&self, delta: &Self) {\n self.num_no_change.merge(&delta.num_no_change);\n self.num_insertions.merge(&delta.num_insertions);\n self.num_deletions.merge(&delta.num_deletions);\n self.num_updates.merge(&delta.num_updates);\n self.num_reprocesses.merge(&delta.num_reprocesses);\n self.num_errors.merge(&delta.num_errors);\n }\n\n pub fn has_any_change(&self) -> bool {\n self.num_insertions.get() > 0\n || self.num_deletions.get() > 0\n || self.num_updates.get() > 0\n || self.num_reprocesses.get() > 0\n || self.num_errors.get() > 0\n }\n}\n\nimpl std::fmt::Display for UpdateStats {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let mut messages = Vec::new();\n let num_errors = self.num_errors.get();\n if num_errors > 0 {\n messages.push(format!(\"{num_errors} source rows FAILED\"));\n }\n\n let num_skipped = self.num_no_change.get();\n if num_skipped > 0 {\n messages.push(format!(\"{num_skipped} source rows NO CHANGE\"));\n }\n\n let num_insertions = self.num_insertions.get();\n let num_deletions = self.num_deletions.get();\n let num_updates = self.num_updates.get();\n let num_reprocesses = self.num_reprocesses.get();\n let num_source_rows = num_insertions + num_deletions + num_updates + num_reprocesses;\n if num_source_rows > 0 {\n messages.push(format!(\n \"{num_source_rows} source rows processed ({num_insertions} ADDED, {num_deletions} REMOVED, {num_updates} UPDATED, {num_reprocesses} REPROCESSED on flow change)\",\n ));\n }\n\n if !messages.is_empty() {\n write!(f, \"{}\", messages.join(\"; \"))?;\n } else {\n write!(f, \"No changes\")?;\n }\n\n Ok(())\n }\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceUpdateInfo {\n pub source_name: String,\n pub stats: UpdateStats,\n}\n\nimpl std::fmt::Display for SourceUpdateInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.source_name, self.stats)\n }\n}\n\n#[derive(Debug, Serialize)]\npub struct IndexUpdateInfo {\n pub sources: Vec,\n}\n\nimpl std::fmt::Display for IndexUpdateInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n for source in self.sources.iter() {\n writeln!(f, \"{source}\")?;\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/builder/analyzed_flow.rs", "use crate::{ops::interface::FlowInstanceContext, prelude::*};\n\nuse super::{analyzer, plan};\nuse crate::service::error::{SharedError, SharedResultExt, shared_ok};\n\npub struct AnalyzedFlow {\n pub flow_instance: spec::FlowInstanceSpec,\n pub data_schema: schema::FlowSchema,\n pub setup_state: exec_ctx::AnalyzedSetupState,\n\n pub flow_instance_ctx: Arc,\n\n /// It's None if the flow is not up to date\n pub execution_plan: Shared, SharedError>>>,\n}\n\nimpl AnalyzedFlow {\n pub async fn from_flow_instance(\n flow_instance: crate::base::spec::FlowInstanceSpec,\n flow_instance_ctx: Arc,\n ) -> Result {\n let (data_schema, setup_state, execution_plan_fut) =\n analyzer::analyze_flow(&flow_instance, flow_instance_ctx.clone()).await?;\n let execution_plan = async move {\n shared_ok(Arc::new(\n execution_plan_fut.await.map_err(SharedError::new)?,\n ))\n }\n .boxed()\n .shared();\n let result = Self {\n flow_instance,\n data_schema,\n setup_state,\n flow_instance_ctx,\n execution_plan,\n };\n Ok(result)\n }\n\n pub async fn get_execution_plan(&self) -> Result> {\n let execution_plan = self.execution_plan.clone().await.std_result()?;\n Ok(execution_plan)\n }\n}\n\npub struct AnalyzedTransientFlow {\n pub transient_flow_instance: spec::TransientFlowSpec,\n pub data_schema: schema::FlowSchema,\n pub execution_plan: plan::TransientExecutionPlan,\n pub output_type: schema::EnrichedValueType,\n}\n\nimpl AnalyzedTransientFlow {\n pub async fn from_transient_flow(\n transient_flow: spec::TransientFlowSpec,\n py_exec_ctx: Option,\n ) -> Result {\n let ctx = analyzer::build_flow_instance_context(&transient_flow.name, py_exec_ctx);\n let (output_type, data_schema, execution_plan_fut) =\n analyzer::analyze_transient_flow(&transient_flow, ctx).await?;\n Ok(Self {\n transient_flow_instance: transient_flow,\n data_schema,\n execution_plan: execution_plan_fut.await?,\n output_type,\n })\n }\n}\n"], ["/cocoindex/src/service/error.rs", "use crate::prelude::*;\n\nuse axum::{\n Json,\n http::StatusCode,\n response::{IntoResponse, Response},\n};\nuse pyo3::{exceptions::PyException, prelude::*};\nuse std::{\n error::Error,\n fmt::{Debug, Display},\n};\n\n#[derive(Debug)]\npub struct ApiError {\n pub err: anyhow::Error,\n pub status_code: StatusCode,\n}\n\nimpl ApiError {\n pub fn new(message: &str, status_code: StatusCode) -> Self {\n Self {\n err: anyhow!(\"{}\", message),\n status_code,\n }\n }\n}\n\nimpl Display for ApiError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Display::fmt(&self.err, f)\n }\n}\n\nimpl Error for ApiError {\n fn source(&self) -> Option<&(dyn Error + 'static)> {\n self.err.source()\n }\n}\n\n#[derive(Serialize)]\nstruct ErrorResponse {\n error: String,\n}\n\nimpl IntoResponse for ApiError {\n fn into_response(self) -> Response {\n debug!(\"Internal server error:\\n{:?}\", self.err);\n let error_response = ErrorResponse {\n error: self.err.to_string(),\n };\n (self.status_code, Json(error_response)).into_response()\n }\n}\n\nimpl From for ApiError {\n fn from(err: anyhow::Error) -> ApiError {\n if err.is::() {\n return err.downcast::().unwrap();\n }\n Self {\n err,\n status_code: StatusCode::INTERNAL_SERVER_ERROR,\n }\n }\n}\n\nimpl From for PyErr {\n fn from(val: ApiError) -> Self {\n PyException::new_err(val.err.to_string())\n }\n}\n\n#[derive(Clone)]\npub struct SharedError {\n pub err: Arc,\n}\n\nimpl SharedError {\n pub fn new(err: anyhow::Error) -> Self {\n Self { err: Arc::new(err) }\n }\n}\nimpl Debug for SharedError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.err, f)\n }\n}\n\nimpl Display for SharedError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Display::fmt(&self.err, f)\n }\n}\n\nimpl From for SharedError {\n fn from(err: E) -> Self {\n Self {\n err: Arc::new(anyhow::Error::from(err)),\n }\n }\n}\n\nimpl AsRef for SharedError {\n fn as_ref(&self) -> &(dyn std::error::Error + 'static) {\n self.err.as_ref().as_ref()\n }\n}\n\nimpl AsRef for SharedError {\n fn as_ref(&self) -> &(dyn std::error::Error + Send + Sync + 'static) {\n self.err.as_ref().as_ref()\n }\n}\n\npub fn shared_ok(value: T) -> Result {\n Ok(value)\n}\n\npub type SharedResult = Result;\n\npub struct SharedErrorWrapper(SharedError);\n\nimpl Display for SharedErrorWrapper {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.0, f)\n }\n}\n\nimpl Debug for SharedErrorWrapper {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.0, f)\n }\n}\n\nimpl Error for SharedErrorWrapper {\n fn source(&self) -> Option<&(dyn Error + 'static)> {\n self.0.err.as_ref().source()\n }\n}\n\npub trait SharedResultExt {\n fn std_result(self) -> Result;\n}\n\nimpl SharedResultExt for Result {\n fn std_result(self) -> Result {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(SharedErrorWrapper(err)),\n }\n }\n}\n\npub trait SharedResultExtRef<'a, T> {\n fn std_result(self) -> Result<&'a T, SharedErrorWrapper>;\n}\n\nimpl<'a, T> SharedResultExtRef<'a, T> for &'a Result {\n fn std_result(self) -> Result<&'a T, SharedErrorWrapper> {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(SharedErrorWrapper(err.clone())),\n }\n }\n}\n\npub fn invariance_violation() -> anyhow::Error {\n anyhow::anyhow!(\"Invariance violation\")\n}\n\n#[macro_export]\nmacro_rules! api_bail {\n ( $fmt:literal $(, $($arg:tt)*)?) => {\n return Err($crate::service::error::ApiError::new(&format!($fmt $(, $($arg)*)?), axum::http::StatusCode::BAD_REQUEST).into())\n };\n}\n\n#[macro_export]\nmacro_rules! api_error {\n ( $fmt:literal $(, $($arg:tt)*)?) => {\n $crate::service::error::ApiError::new(&format!($fmt $(, $($arg)*)?), axum::http::StatusCode::BAD_REQUEST)\n };\n}\n"], ["/cocoindex/src/utils/db.rs", "#[derive(Debug, Clone, PartialEq, Eq)]\npub struct ValidIdentifier(pub String);\n\nimpl TryFrom for ValidIdentifier {\n type Error = anyhow::Error;\n\n fn try_from(s: String) -> Result {\n if !s.is_empty() && s.chars().all(|c| c.is_alphanumeric() || c == '_') {\n Ok(ValidIdentifier(s))\n } else {\n Err(anyhow::anyhow!(\"Invalid identifier: {s:?}\"))\n }\n }\n}\n\nimpl std::fmt::Display for ValidIdentifier {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Display::fmt(&self.0, f)\n }\n}\n\nimpl std::ops::Deref for ValidIdentifier {\n type Target = String;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\npub enum WriteAction {\n Insert,\n Update,\n}\n\npub fn sanitize_identifier(s: &str) -> String {\n let mut result = String::new();\n for c in s.chars() {\n if c.is_alphanumeric() || c == '_' {\n result.push(c);\n } else {\n result.push_str(\"__\");\n }\n }\n result\n}\n"], ["/cocoindex/src/settings.rs", "use serde::Deserialize;\n\n#[derive(Deserialize, Debug)]\npub struct DatabaseConnectionSpec {\n pub url: String,\n pub user: Option,\n pub password: Option,\n}\n\n#[derive(Deserialize, Debug, Default)]\npub struct GlobalExecutionOptions {\n pub source_max_inflight_rows: Option,\n pub source_max_inflight_bytes: Option,\n}\n\n#[derive(Deserialize, Debug, Default)]\npub struct Settings {\n #[serde(default)]\n pub database: Option,\n #[serde(default)]\n #[allow(dead_code)] // Used via serialization/deserialization to Python\n pub app_namespace: String,\n #[serde(default)]\n pub global_execution_options: GlobalExecutionOptions,\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_settings_deserialize_with_database() {\n let json = r#\"{\n \"database\": {\n \"url\": \"postgresql://localhost:5432/test\",\n \"user\": \"testuser\",\n \"password\": \"testpass\"\n },\n \"app_namespace\": \"test_app\"\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_some());\n let db = settings.database.unwrap();\n assert_eq!(db.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db.user, Some(\"testuser\".to_string()));\n assert_eq!(db.password, Some(\"testpass\".to_string()));\n assert_eq!(settings.app_namespace, \"test_app\");\n }\n\n #[test]\n fn test_settings_deserialize_without_database() {\n let json = r#\"{\n \"app_namespace\": \"test_app\"\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_none());\n assert_eq!(settings.app_namespace, \"test_app\");\n }\n\n #[test]\n fn test_settings_deserialize_empty_object() {\n let json = r#\"{}\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_none());\n assert_eq!(settings.app_namespace, \"\");\n }\n\n #[test]\n fn test_settings_deserialize_database_without_user_password() {\n let json = r#\"{\n \"database\": {\n \"url\": \"postgresql://localhost:5432/test\"\n }\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_some());\n let db = settings.database.unwrap();\n assert_eq!(db.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db.user, None);\n assert_eq!(db.password, None);\n assert_eq!(settings.app_namespace, \"\");\n }\n\n #[test]\n fn test_database_connection_spec_deserialize() {\n let json = r#\"{\n \"url\": \"postgresql://localhost:5432/test\",\n \"user\": \"testuser\",\n \"password\": \"testpass\"\n }\"#;\n\n let db_spec: DatabaseConnectionSpec = serde_json::from_str(json).unwrap();\n\n assert_eq!(db_spec.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db_spec.user, Some(\"testuser\".to_string()));\n assert_eq!(db_spec.password, Some(\"testpass\".to_string()));\n }\n}\n"], ["/cocoindex/src/setup/auth_registry.rs", "use std::collections::hash_map;\n\nuse crate::prelude::*;\n\npub struct AuthRegistry {\n entries: RwLock>,\n}\n\nimpl Default for AuthRegistry {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl AuthRegistry {\n pub fn new() -> Self {\n Self {\n entries: RwLock::new(HashMap::new()),\n }\n }\n\n pub fn add(&self, key: String, value: serde_json::Value) -> Result<()> {\n let mut entries = self.entries.write().unwrap();\n match entries.entry(key) {\n hash_map::Entry::Occupied(entry) => {\n api_bail!(\"Auth entry already exists: {}\", entry.key());\n }\n hash_map::Entry::Vacant(entry) => {\n entry.insert(value);\n }\n }\n Ok(())\n }\n\n pub fn get(&self, entry_ref: &spec::AuthEntryReference) -> Result {\n let entries = self.entries.read().unwrap();\n match entries.get(&entry_ref.key) {\n Some(value) => Ok(serde_json::from_value(value.clone())?),\n None => api_bail!(\n \"Auth entry `{key}` not found.\\n\\\n Hint: If you're not referencing `{key}` in your flow, it will likely be caused by a previously persisted target using it. \\\n You need to bring back the definition for the auth entry `{key}`, so that CocoIndex will be able to do a cleanup in the next `setup` run. \\\n See https://cocoindex.io/docs/core/flow_def#auth-registry for more details.\",\n key = entry_ref.key\n ),\n }\n }\n}\n"], ["/cocoindex/src/builder/plan.rs", "use crate::prelude::*;\n\nuse crate::ops::interface::*;\nuse crate::utils::fingerprint::{Fingerprint, Fingerprinter};\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedLocalFieldReference {\n /// Must be non-empty.\n pub fields_idx: Vec,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedFieldReference {\n pub local: AnalyzedLocalFieldReference,\n /// How many levels up the scope the field is at.\n /// 0 means the current scope.\n #[serde(skip_serializing_if = \"u32_is_zero\")]\n pub scope_up_level: u32,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedLocalCollectorReference {\n pub collector_idx: u32,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedCollectorReference {\n pub local: AnalyzedLocalCollectorReference,\n /// How many levels up the scope the field is at.\n /// 0 means the current scope.\n #[serde(skip_serializing_if = \"u32_is_zero\")]\n pub scope_up_level: u32,\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct AnalyzedStructMapping {\n pub fields: Vec,\n}\n\n#[derive(Debug, Clone, Serialize)]\n#[serde(tag = \"kind\")]\npub enum AnalyzedValueMapping {\n Constant { value: value::Value },\n Field(AnalyzedFieldReference),\n Struct(AnalyzedStructMapping),\n}\n\n#[derive(Debug, Clone)]\npub struct AnalyzedOpOutput {\n pub field_idx: u32,\n}\n\npub struct AnalyzedImportOp {\n pub name: String,\n pub executor: Box,\n pub output: AnalyzedOpOutput,\n pub primary_key_type: schema::ValueType,\n pub refresh_options: spec::SourceRefreshOptions,\n\n pub concurrency_controller: concur_control::CombinedConcurrencyController,\n}\n\npub struct AnalyzedFunctionExecInfo {\n pub enable_cache: bool,\n pub behavior_version: Option,\n\n /// Fingerprinter of the function's behavior.\n pub fingerprinter: Fingerprinter,\n /// To deserialize cached value.\n pub output_type: schema::ValueType,\n}\n\npub struct AnalyzedTransformOp {\n pub name: String,\n pub inputs: Vec,\n pub function_exec_info: AnalyzedFunctionExecInfo,\n pub executor: Box,\n pub output: AnalyzedOpOutput,\n}\n\npub struct AnalyzedForEachOp {\n pub name: String,\n pub local_field_ref: AnalyzedLocalFieldReference,\n pub op_scope: AnalyzedOpScope,\n pub concurrency_controller: concur_control::ConcurrencyController,\n}\n\npub struct AnalyzedCollectOp {\n pub name: String,\n pub has_auto_uuid_field: bool,\n pub input: AnalyzedStructMapping,\n pub collector_ref: AnalyzedCollectorReference,\n /// Fingerprinter of the collector's schema. Used to decide when to reuse auto-generated UUIDs.\n pub fingerprinter: Fingerprinter,\n}\n\npub enum AnalyzedPrimaryKeyDef {\n Fields(Vec),\n}\n\npub struct AnalyzedExportOp {\n pub name: String,\n pub input: AnalyzedLocalCollectorReference,\n pub export_target_factory: Arc,\n pub export_context: Arc,\n pub primary_key_def: AnalyzedPrimaryKeyDef,\n pub primary_key_type: schema::ValueType,\n /// idx for value fields - excluding the primary key field.\n pub value_fields: Vec,\n /// If true, value is never changed on the same primary key.\n /// This is guaranteed if the primary key contains auto-generated UUIDs.\n pub value_stable: bool,\n}\n\npub struct AnalyzedExportTargetOpGroup {\n pub target_factory: Arc,\n pub op_idx: Vec,\n}\n\npub enum AnalyzedReactiveOp {\n Transform(AnalyzedTransformOp),\n ForEach(AnalyzedForEachOp),\n Collect(AnalyzedCollectOp),\n}\n\npub struct AnalyzedOpScope {\n pub reactive_ops: Vec,\n pub collector_len: usize,\n}\n\npub struct ExecutionPlan {\n pub logic_fingerprint: Fingerprint,\n\n pub import_ops: Vec,\n pub op_scope: AnalyzedOpScope,\n pub export_ops: Vec,\n pub export_op_groups: Vec,\n}\n\npub struct TransientExecutionPlan {\n pub input_fields: Vec,\n pub op_scope: AnalyzedOpScope,\n pub output_value: AnalyzedValueMapping,\n}\n\nfn u32_is_zero(v: &u32) -> bool {\n *v == 0\n}\n"], ["/cocoindex/src/utils/immutable.rs", "#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]\npub enum RefList<'a, T> {\n #[default]\n Nil,\n\n Cons(T, &'a RefList<'a, T>),\n}\n\nimpl<'a, T> RefList<'a, T> {\n pub fn prepend(&'a self, head: T) -> Self {\n Self::Cons(head, self)\n }\n\n pub fn iter(&'a self) -> impl Iterator {\n self\n }\n\n pub fn head(&'a self) -> Option<&'a T> {\n match self {\n RefList::Nil => None,\n RefList::Cons(head, _) => Some(head),\n }\n }\n\n pub fn headn(&'a self, n: usize) -> Option<&'a T> {\n match self {\n RefList::Nil => None,\n RefList::Cons(head, tail) => {\n if n == 0 {\n Some(head)\n } else {\n tail.headn(n - 1)\n }\n }\n }\n }\n\n pub fn tail(&'a self) -> Option<&'a RefList<'a, T>> {\n match self {\n RefList::Nil => None,\n RefList::Cons(_, tail) => Some(tail),\n }\n }\n\n pub fn tailn(&'a self, n: usize) -> Option<&'a RefList<'a, T>> {\n if n == 0 {\n Some(self)\n } else {\n match self {\n RefList::Nil => None,\n RefList::Cons(_, tail) => tail.tailn(n - 1),\n }\n }\n }\n}\n\nimpl<'a, T> Iterator for &'a RefList<'a, T> {\n type Item = &'a T;\n\n fn next(&mut self) -> Option {\n let current = *self;\n match current {\n RefList::Nil => None,\n RefList::Cons(head, tail) => {\n *self = *tail;\n Some(head)\n }\n }\n }\n}\n"], ["/cocoindex/src/ops/registration.rs", "use super::{\n factory_bases::*, functions, registry::ExecutorFactoryRegistry, sdk::ExecutorFactory, sources,\n targets,\n};\nuse anyhow::Result;\nuse std::sync::{LazyLock, RwLock};\n\nfn register_executor_factories(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n let reqwest_client = reqwest::Client::new();\n\n sources::local_file::Factory.register(registry)?;\n sources::google_drive::Factory.register(registry)?;\n sources::amazon_s3::Factory.register(registry)?;\n sources::azure_blob::Factory.register(registry)?;\n\n functions::parse_json::Factory.register(registry)?;\n functions::split_recursively::register(registry)?;\n functions::extract_by_llm::Factory.register(registry)?;\n functions::embed_text::register(registry)?;\n\n targets::postgres::Factory::default().register(registry)?;\n targets::qdrant::register(registry)?;\n targets::kuzu::register(registry, reqwest_client)?;\n\n targets::neo4j::Factory::new().register(registry)?;\n\n Ok(())\n}\n\nstatic EXECUTOR_FACTORY_REGISTRY: LazyLock> = LazyLock::new(|| {\n let mut registry = ExecutorFactoryRegistry::new();\n register_executor_factories(&mut registry).expect(\"Failed to register executor factories\");\n RwLock::new(registry)\n});\n\npub fn get_optional_executor_factory(kind: &str) -> Option {\n let registry = EXECUTOR_FACTORY_REGISTRY.read().unwrap();\n registry.get(kind).cloned()\n}\n\npub fn get_executor_factory(kind: &str) -> Result {\n get_optional_executor_factory(kind)\n .ok_or_else(|| anyhow::anyhow!(\"Executor factory not found for op kind: {}\", kind))\n}\n\npub fn register_factory(name: String, factory: ExecutorFactory) -> Result<()> {\n let mut registry = EXECUTOR_FACTORY_REGISTRY.write().unwrap();\n registry.register(name, factory)\n}\n"], ["/cocoindex/src/ops/registry.rs", "use super::interface::ExecutorFactory;\nuse anyhow::Result;\nuse std::collections::HashMap;\n\npub struct ExecutorFactoryRegistry {\n factories: HashMap,\n}\n\nimpl Default for ExecutorFactoryRegistry {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl ExecutorFactoryRegistry {\n pub fn new() -> Self {\n Self {\n factories: HashMap::new(),\n }\n }\n\n pub fn register(&mut self, name: String, factory: ExecutorFactory) -> Result<()> {\n match self.factories.entry(name) {\n std::collections::hash_map::Entry::Occupied(entry) => Err(anyhow::anyhow!(\n \"Factory with name already exists: {}\",\n entry.key()\n )),\n std::collections::hash_map::Entry::Vacant(entry) => {\n entry.insert(factory);\n Ok(())\n }\n }\n }\n\n pub fn get(&self, name: &str) -> Option<&ExecutorFactory> {\n self.factories.get(name)\n }\n}\n"], ["/cocoindex/src/llm/vllm.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_vllm(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"http://127.0.0.1:8000/v1\".to_string());\n let api_key = std::env::var(\"VLLM_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/llm/litellm.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_litellm(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"http://127.0.0.1:4000\".to_string());\n let api_key = std::env::var(\"LITELLM_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/llm/openrouter.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_openrouter(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"https://openrouter.ai/api/v1\".to_string());\n let api_key = std::env::var(\"OPENROUTER_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/prelude.rs", "#![allow(unused_imports)]\n\npub(crate) use anyhow::{Context, Result};\npub(crate) use async_trait::async_trait;\npub(crate) use chrono::{DateTime, Utc};\npub(crate) use futures::{FutureExt, StreamExt};\npub(crate) use futures::{\n future::{BoxFuture, Shared},\n prelude::*,\n stream::BoxStream,\n};\npub(crate) use indexmap::{IndexMap, IndexSet};\npub(crate) use itertools::Itertools;\npub(crate) use serde::{Deserialize, Serialize, de::DeserializeOwned};\npub(crate) use std::any::Any;\npub(crate) use std::borrow::Cow;\npub(crate) use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};\npub(crate) use std::hash::Hash;\npub(crate) use std::sync::{Arc, LazyLock, Mutex, OnceLock, RwLock, Weak};\n\npub(crate) use crate::base::{self, schema, spec, value};\npub(crate) use crate::builder::{self, exec_ctx, plan};\npub(crate) use crate::execution;\npub(crate) use crate::lib_context::{FlowContext, LibContext, get_lib_context, get_runtime};\npub(crate) use crate::ops::interface;\npub(crate) use crate::service::error::{ApiError, invariance_violation};\npub(crate) use crate::setup;\npub(crate) use crate::setup::AuthRegistry;\npub(crate) use crate::utils::{self, concur_control, retryable};\npub(crate) use crate::{api_bail, api_error};\n\npub(crate) use anyhow::{anyhow, bail};\npub(crate) use async_stream::{stream, try_stream};\npub(crate) use log::{debug, error, info, trace, warn};\n\npub(crate) use derivative::Derivative;\n"], ["/cocoindex/src/base/field_attrs.rs", "use const_format::concatcp;\n\npub static COCOINDEX_PREFIX: &str = \"cocoindex.io/\";\n\n/// Present for bytes and str. It points to fields that represents the original file name for the data.\n/// Type: AnalyzedValueMapping\npub static CONTENT_FILENAME: &str = concatcp!(COCOINDEX_PREFIX, \"content_filename\");\n\n/// Present for bytes and str. It points to fields that represents mime types for the data.\n/// Type: AnalyzedValueMapping\npub static CONTENT_MIME_TYPE: &str = concatcp!(COCOINDEX_PREFIX, \"content_mime_type\");\n\n/// Present for chunks. It points to fields that the chunks are for.\n/// Type: AnalyzedValueMapping\npub static CHUNK_BASE_TEXT: &str = concatcp!(COCOINDEX_PREFIX, \"chunk_base_text\");\n\n/// Base text for an embedding vector.\npub static _EMBEDDING_ORIGIN_TEXT: &str = concatcp!(COCOINDEX_PREFIX, \"embedding_origin_text\");\n"], ["/cocoindex/src/ops/sources/mod.rs", "pub mod amazon_s3;\npub mod azure_blob;\npub mod google_drive;\npub mod local_file;\n"], ["/cocoindex/src/base/mod.rs", "pub mod duration;\npub mod field_attrs;\npub mod json_schema;\npub mod schema;\npub mod spec;\npub mod value;\n"], ["/cocoindex/src/lib.rs", "mod base;\nmod builder;\nmod execution;\nmod lib_context;\nmod llm;\nmod ops;\nmod prelude;\nmod py;\nmod server;\nmod service;\nmod settings;\nmod setup;\nmod utils;\n"], ["/cocoindex/src/builder/mod.rs", "pub mod analyzer;\npub mod exec_ctx;\npub mod flow_builder;\npub mod plan;\n\nmod analyzed_flow;\n\npub use analyzed_flow::AnalyzedFlow;\npub use analyzed_flow::AnalyzedTransientFlow;\n"], ["/cocoindex/src/ops/mod.rs", "pub mod interface;\npub mod registry;\n\n// All operations\nmod factory_bases;\nmod functions;\nmod sources;\nmod targets;\n\nmod registration;\npub(crate) use registration::*;\npub(crate) mod py_factory;\n\n// SDK is used for help registration for operations.\nmod sdk;\n"], ["/cocoindex/src/ops/functions/mod.rs", "pub mod embed_text;\npub mod extract_by_llm;\npub mod parse_json;\npub mod split_recursively;\n\n#[cfg(test)]\nmod test_utils;\n"], ["/cocoindex/src/setup/mod.rs", "mod auth_registry;\nmod db_metadata;\nmod driver;\nmod states;\n\npub mod components;\n\npub use auth_registry::AuthRegistry;\npub use driver::*;\npub use states::*;\n"], ["/cocoindex/src/execution/mod.rs", "pub(crate) mod db_tracking_setup;\npub(crate) mod dumper;\npub(crate) mod evaluator;\npub(crate) mod indexing_status;\npub(crate) mod memoization;\npub(crate) mod row_indexer;\npub(crate) mod source_indexer;\npub(crate) mod stats;\n\nmod live_updater;\npub(crate) use live_updater::*;\n\nmod db_tracking;\n"], ["/cocoindex/src/service/mod.rs", "pub(crate) mod error;\npub(crate) mod flows;\n"], ["/cocoindex/src/ops/targets/mod.rs", "mod shared;\n\npub mod kuzu;\npub mod neo4j;\npub mod postgres;\npub mod qdrant;\n"], ["/cocoindex/src/utils/mod.rs", "pub mod concur_control;\npub mod db;\npub mod fingerprint;\npub mod immutable;\npub mod retryable;\npub mod yaml_ser;\n"], ["/cocoindex/src/ops/targets/shared/mod.rs", "pub mod property_graph;\npub mod table_columns;\n"]], "task_instance_info": {"created_time": "2025-08-21 01:30:28", "created_task_model": "DeepSeek-R1", "function_skeleton": "async fn poll_sqs (&self, sqs_context: &Arc) -> Result> {}", "code_description": "This code segment functions to: Monitor a message queue for storage service event notifications, filter relevant object change events based on configured criteria, convert encoded resource identifiers, and manage message lifecycle with acknowledgment handling for processed events."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```rust\n[TASK_BEGIN]\n async fn poll_sqs(&self, sqs_context: &Arc) -> Result> {\n let resp = sqs_context.client\n .receive_message()\n .queue_url(&sqs_context.queue_url)\n .max_number_of_messages(10)\n .wait_time_seconds(20)\n .send()\n .await?;\n\n let mut changes = Vec::new();\n for msg in resp.messages.unwrap_or_default() {\n let body = msg.body().unwrap_or_default();\n let event: S3EventNotification = serde_json::from_str(body)?;\n \n for record in event.records {\n let Some(s3) = record.s3 else { continue };\n let key = decode_form_encoded_url(&s3.object.key)?;\n \n let variant = match record.event_name.as_str() {\n \"ObjectCreated:Put\" => SourceChangeVariant::Add,\n \"ObjectRemoved:Delete\" => SourceChangeVariant::Delete,\n _ => continue,\n };\n\n changes.push(SourceChangeMessage {\n variant,\n key: KeyValue::Str(key),\n bucket: s3.bucket.name,\n });\n }\n\n if let Some(receipt_handle) = msg.receipt_handle() {\n sqs_context.delete_message(receipt_handle.to_string()).await?;\n }\n }\n \n Ok(changes)\n }\n[TASK_END]\n```", "inference_time": "2025-08-21 01-31-08"}, "editdistance_info": {"edit_distance": 37.6744, "calculate_time": "2025-08-21 01:31:08", "true_code_clean": "async fn poll_sqs(&self, sqs_context: &Arc) -> Result> {\n let resp = sqs_context\n .client\n .receive_message()\n .queue_url(&sqs_context.queue_url)\n .max_number_of_messages(10)\n .wait_time_seconds(20)\n .send()\n .await?;\n let messages = if let Some(messages) = resp.messages {\n messages\n } else {\n return Ok(Vec::new());\n };\n let mut change_messages = vec![];\n for message in messages.into_iter() {\n if let Some(body) = message.body {\n let notification: S3EventNotification = serde_json::from_str(&body)?;\n let mut changes = vec![];\n for record in notification.records {\n let s3 = if let Some(s3) = record.s3 {\n s3\n } else {\n continue;\n };\n if s3.bucket.name != self.bucket_name {\n continue;\n }\n if !self\n .prefix\n .as_ref()\n .is_none_or(|prefix| s3.object.key.starts_with(prefix))\n {\n continue;\n }\n if record.event_name.starts_with(\"ObjectCreated:\")\n || record.event_name.starts_with(\"ObjectRemoved:\")\n {\n let decoded_key = decode_form_encoded_url(&s3.object.key)?;\n changes.push(SourceChange {\n key: KeyValue::Str(decoded_key),\n data: None,\n });\n }\n }\n if let Some(receipt_handle) = message.receipt_handle {\n if !changes.is_empty() {\n let sqs_context = sqs_context.clone();\n change_messages.push(SourceChangeMessage {\n changes,\n ack_fn: Some(Box::new(move || {\n async move { sqs_context.delete_message(receipt_handle).await }\n .boxed()\n })),\n });\n } else {\n sqs_context.delete_message(receipt_handle).await?;\n }\n }\n }\n }\n Ok(change_messages)\n }", "predict_code_clean": "async fn poll_sqs(&self, sqs_context: &Arc) -> Result> {\n let resp = sqs_context.client\n .receive_message()\n .queue_url(&sqs_context.queue_url)\n .max_number_of_messages(10)\n .wait_time_seconds(20)\n .send()\n .await?;\n let mut changes = Vec::new();\n for msg in resp.messages.unwrap_or_default() {\n let body = msg.body().unwrap_or_default();\n let event: S3EventNotification = serde_json::from_str(body)?;\n for record in event.records {\n let Some(s3) = record.s3 else { continue };\n let key = decode_form_encoded_url(&s3.object.key)?;\n let variant = match record.event_name.as_str() {\n \"ObjectCreated:Put\" => SourceChangeVariant::Add,\n \"ObjectRemoved:Delete\" => SourceChangeVariant::Delete,\n _ => continue,\n };\n changes.push(SourceChangeMessage {\n variant,\n key: KeyValue::Str(key),\n bucket: s3.bucket.name,\n });\n }\n if let Some(receipt_handle) = msg.receipt_handle() {\n sqs_context.delete_message(receipt_handle.to_string()).await?;\n }\n }\n Ok(changes)\n }"}} {"repo_name": "cocoindex", "file_name": "/cocoindex/src/execution/db_tracking_setup.rs", "inference_info": {"prefix_code": "use crate::prelude::*;\n\nuse crate::setup::{CombinedState, ResourceSetupInfo, ResourceSetupStatus, SetupChangeType};\nuse serde::{Deserialize, Serialize};\nuse sqlx::PgPool;\n\npub fn default_tracking_table_name(flow_name: &str) -> String {\n format!(\n \"{}__cocoindex_tracking\",\n utils::db::sanitize_identifier(flow_name)\n )\n}\n\npub const CURRENT_TRACKING_TABLE_VERSION: i32 = 1;\n\nasync fn upgrade_tracking_table(\n pool: &PgPool,\n table_name: &str,\n existing_version_id: i32,\n target_version_id: i32,\n) -> Result<()> {\n if existing_version_id < 1 && target_version_id >= 1 {\n let query = format!(\n \"CREATE TABLE IF NOT EXISTS {table_name} (\n source_id INTEGER NOT NULL,\n source_key JSONB NOT NULL,\n\n -- Update in the precommit phase: after evaluation done, before really applying the changes to the target storage.\n max_process_ordinal BIGINT NOT NULL,\n staging_target_keys JSONB NOT NULL,\n memoization_info JSONB,\n\n -- Update after applying the changes to the target storage.\n processed_source_ordinal BIGINT,\n process_logic_fingerprint BYTEA,\n process_ordinal BIGINT,\n process_time_micros BIGINT,\n target_keys JSONB,\n\n PRIMARY KEY (source_id, source_key)\n );\",\n );\n sqlx::query(&query).execute(pool).await?;\n }\n\n Ok(())\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TrackingTableSetupState {\n pub table_name: String,\n pub version_id: i32,\n}\n\n#[derive(Debug)]\npub struct TrackingTableSetupStatus {\n pub desired_state: Option,\n\n pub legacy_table_names: Vec,\n\n pub min_existing_version_id: Option,\n pub source_ids_to_delete: Vec,\n}\n\nimpl TrackingTableSetupStatus {\n pub fn new(\n desired: Option<&TrackingTableSetupState>,\n existing: &CombinedState,\n source_ids_to_delete: Vec,\n ) -> Option {\n let legacy_table_names = existing\n .legacy_values(desired, |v| &v.table_name)\n .into_iter()\n .cloned()\n .collect();\n let min_existing_version_id = existing\n .always_exists()\n .then(|| existing.possible_versions().map(|v| v.version_id).min())\n .flatten();\n if desired.is_some() || min_existing_version_id.is_some() {\n Some(Self {\n desired_state: desired.cloned(),\n legacy_table_names,\n min_existing_version_id,\n source_ids_to_delete,\n })\n } else {\n None\n }\n }\n\n pub fn into_setup_info(\n self,\n ) -> ResourceSetupInfo<(), TrackingTableSetupState, TrackingTableSetupStatus> {\n ResourceSetupInfo {\n key: (),\n state: self.desired_state.clone(),\n description: \"Tracking Table\".to_string(),\n setup_status: Some(self),\n legacy_key: None,\n }\n }\n}\n\nimpl ResourceSetupStatus for TrackingTableSetupStatus {\n ", "suffix_code": "\n\n fn change_type(&self) -> SetupChangeType {\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(_)) => SetupChangeType::Create,\n (Some(min_version_id), Some(desired)) => {\n if min_version_id == desired.version_id && self.legacy_table_names.is_empty() {\n SetupChangeType::NoChange\n } else if min_version_id < desired.version_id {\n SetupChangeType::Update\n } else {\n SetupChangeType::Invalid\n }\n }\n (Some(_), None) => SetupChangeType::Delete,\n (None, None) => SetupChangeType::NoChange,\n }\n }\n}\n\nimpl TrackingTableSetupStatus {\n pub async fn apply_change(&self) -> Result<()> {\n let lib_context = get_lib_context()?;\n let pool = lib_context.require_builtin_db_pool()?;\n if let Some(desired) = &self.desired_state {\n for lagacy_name in self.legacy_table_names.iter() {\n let query = format!(\n \"ALTER TABLE IF EXISTS {} RENAME TO {}\",\n lagacy_name, desired.table_name\n );\n sqlx::query(&query).execute(pool).await?;\n }\n\n if self.min_existing_version_id != Some(desired.version_id) {\n upgrade_tracking_table(\n pool,\n &desired.table_name,\n self.min_existing_version_id.unwrap_or(0),\n desired.version_id,\n )\n .await?;\n }\n } else {\n for lagacy_name in self.legacy_table_names.iter() {\n let query = format!(\"DROP TABLE IF EXISTS {lagacy_name}\");\n sqlx::query(&query).execute(pool).await?;\n }\n return Ok(());\n }\n Ok(())\n }\n}\n", "middle_code": "fn describe_changes(&self) -> Vec {\n let mut changes: Vec = vec![];\n if self.desired_state.is_some() && !self.legacy_table_names.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Rename legacy tracking tables: {}. \",\n self.legacy_table_names.join(\", \")\n )));\n }\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(state)) => {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Create the tracking table: {}. \",\n state.table_name\n )));\n }\n (Some(min_version_id), Some(desired)) => {\n if min_version_id < desired.version_id {\n changes.push(setup::ChangeDescription::Action(\n \"Update the tracking table. \".into(),\n ));\n }\n }\n (Some(_), None) => changes.push(setup::ChangeDescription::Action(format!(\n \"Drop existing tracking table: {}. \",\n self.legacy_table_names.join(\", \")\n ))),\n (None, None) => (),\n }\n if !self.source_ids_to_delete.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Delete source IDs: {}. \",\n self.source_ids_to_delete\n .iter()\n .map(|id| id.to_string())\n .collect::>()\n .join(\", \")\n )));\n }\n changes\n }", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "rust", "sub_task_type": null}, "context_code": [["/cocoindex/src/setup/driver.rs", "use crate::{\n lib_context::{FlowContext, FlowExecutionContext, LibSetupContext},\n ops::{\n get_optional_executor_factory,\n interface::{ExportTargetFactory, FlowInstanceContext},\n },\n prelude::*,\n};\n\nuse sqlx::PgPool;\nuse std::{\n fmt::{Debug, Display},\n str::FromStr,\n};\n\nuse super::{AllSetupStates, GlobalSetupStatus};\nuse super::{\n CombinedState, DesiredMode, ExistingMode, FlowSetupState, FlowSetupStatus, ObjectSetupStatus,\n ObjectStatus, ResourceIdentifier, ResourceSetupInfo, ResourceSetupStatus, SetupChangeType,\n StateChange, TargetSetupState, db_metadata,\n};\nuse crate::execution::db_tracking_setup;\nuse crate::ops::interface::ExecutorFactory;\nuse std::fmt::Write;\n\nenum MetadataRecordType {\n FlowVersion,\n FlowMetadata,\n TrackingTable,\n Target(String),\n}\n\nimpl Display for MetadataRecordType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n MetadataRecordType::FlowVersion => f.write_str(db_metadata::FLOW_VERSION_RESOURCE_TYPE),\n MetadataRecordType::FlowMetadata => write!(f, \"FlowMetadata\"),\n MetadataRecordType::TrackingTable => write!(f, \"TrackingTable\"),\n MetadataRecordType::Target(target_id) => write!(f, \"Target:{target_id}\"),\n }\n }\n}\n\nimpl std::str::FromStr for MetadataRecordType {\n type Err = anyhow::Error;\n\n fn from_str(s: &str) -> Result {\n if s == db_metadata::FLOW_VERSION_RESOURCE_TYPE {\n Ok(Self::FlowVersion)\n } else if s == \"FlowMetadata\" {\n Ok(Self::FlowMetadata)\n } else if s == \"TrackingTable\" {\n Ok(Self::TrackingTable)\n } else if let Some(target_id) = s.strip_prefix(\"Target:\") {\n Ok(Self::Target(target_id.to_string()))\n } else {\n anyhow::bail!(\"Invalid MetadataRecordType string: {}\", s)\n }\n }\n}\n\nfn from_metadata_record(\n state: Option,\n staging_changes: sqlx::types::Json>>,\n legacy_state_key: Option,\n) -> Result> {\n let current: Option = state.map(serde_json::from_value).transpose()?;\n let staging: Vec> = (staging_changes.0.into_iter())\n .map(|sc| -> Result<_> {\n Ok(match sc {\n StateChange::Upsert(v) => StateChange::Upsert(serde_json::from_value(v)?),\n StateChange::Delete => StateChange::Delete,\n })\n })\n .collect::>()?;\n Ok(CombinedState {\n current,\n staging,\n legacy_state_key,\n })\n}\n\nfn get_export_target_factory(\n target_type: &str,\n) -> Option> {\n match get_optional_executor_factory(target_type) {\n Some(ExecutorFactory::ExportTarget(factory)) => Some(factory),\n _ => None,\n }\n}\n\npub async fn get_existing_setup_state(pool: &PgPool) -> Result> {\n let setup_metadata_records = db_metadata::read_setup_metadata(pool).await?;\n\n let setup_metadata_records = if let Some(records) = setup_metadata_records {\n records\n } else {\n return Ok(AllSetupStates::default());\n };\n\n // Group setup metadata records by flow name\n let setup_metadata_records = setup_metadata_records.into_iter().fold(\n BTreeMap::>::new(),\n |mut acc, record| {\n acc.entry(record.flow_name.clone())\n .or_default()\n .push(record);\n acc\n },\n );\n\n let flows = setup_metadata_records\n .into_iter()\n .map(|(flow_name, metadata_records)| -> anyhow::Result<_> {\n let mut flow_ss = FlowSetupState::default();\n for metadata_record in metadata_records {\n let state = metadata_record.state;\n let staging_changes = metadata_record.staging_changes;\n match MetadataRecordType::from_str(&metadata_record.resource_type)? {\n MetadataRecordType::FlowVersion => {\n flow_ss.seen_flow_metadata_version =\n db_metadata::parse_flow_version(&state);\n }\n MetadataRecordType::FlowMetadata => {\n flow_ss.metadata = from_metadata_record(state, staging_changes, None)?;\n }\n MetadataRecordType::TrackingTable => {\n flow_ss.tracking_table =\n from_metadata_record(state, staging_changes, None)?;\n }\n MetadataRecordType::Target(target_type) => {\n let normalized_key = {\n if let Some(factory) = get_export_target_factory(&target_type) {\n factory.normalize_setup_key(&metadata_record.key)?\n } else {\n metadata_record.key.clone()\n }\n };\n let combined_state = from_metadata_record(\n state,\n staging_changes,\n (normalized_key != metadata_record.key).then_some(metadata_record.key),\n )?;\n flow_ss.targets.insert(\n super::ResourceIdentifier {\n key: normalized_key,\n target_kind: target_type,\n },\n combined_state,\n );\n }\n }\n }\n Ok((flow_name, flow_ss))\n })\n .collect::>()?;\n\n Ok(AllSetupStates {\n has_metadata_table: true,\n flows,\n })\n}\n\nfn diff_state(\n existing_state: Option<&E>,\n desired_state: Option<&D>,\n diff: impl Fn(Option<&E>, &D) -> Option>,\n) -> Option>\nwhere\n E: PartialEq,\n{\n match (existing_state, desired_state) {\n (None, None) => None,\n (Some(_), None) => Some(StateChange::Delete),\n (existing_state, Some(desired_state)) => {\n if existing_state.map(|e| e == desired_state).unwrap_or(false) {\n None\n } else {\n diff(existing_state, desired_state)\n }\n }\n }\n}\n\nfn to_object_status(existing: Option, desired: Option) -> Option {\n Some(match (&existing, &desired) {\n (Some(_), None) => ObjectStatus::Deleted,\n (None, Some(_)) => ObjectStatus::New,\n (Some(_), Some(_)) => ObjectStatus::Existing,\n (None, None) => return None,\n })\n}\n\n#[derive(Debug, Default)]\nstruct GroupedResourceStates {\n desired: Option,\n existing: CombinedState,\n}\n\nfn group_resource_states<'a>(\n desired: impl Iterator,\n existing: impl Iterator)>,\n) -> Result> {\n let mut grouped: IndexMap<&'a ResourceIdentifier, GroupedResourceStates> = desired\n .into_iter()\n .map(|(key, state)| {\n (\n key,\n GroupedResourceStates {\n desired: Some(state.clone()),\n existing: CombinedState::default(),\n },\n )\n })\n .collect();\n for (key, state) in existing {\n let entry = grouped.entry(key);\n if state.current.is_some() {\n if let indexmap::map::Entry::Occupied(entry) = &entry {\n if entry.get().existing.current.is_some() {\n bail!(\"Duplicate existing state for key: {}\", entry.key());\n }\n }\n }\n let entry = entry.or_default();\n if let Some(current) = &state.current {\n entry.existing.current = Some(current.clone());\n }\n if let Some(legacy_state_key) = &state.legacy_state_key {\n if entry\n .existing\n .legacy_state_key\n .as_ref()\n .is_some_and(|v| v != legacy_state_key)\n {\n warn!(\n \"inconsistent legacy key: {:?}, {:?}\",\n key, entry.existing.legacy_state_key\n );\n }\n entry.existing.legacy_state_key = Some(legacy_state_key.clone());\n }\n for s in state.staging.iter() {\n match s {\n StateChange::Upsert(v) => {\n entry.existing.staging.push(StateChange::Upsert(v.clone()))\n }\n StateChange::Delete => entry.existing.staging.push(StateChange::Delete),\n }\n }\n }\n Ok(grouped)\n}\n\npub async fn check_flow_setup_status(\n desired_state: Option<&FlowSetupState>,\n existing_state: Option<&FlowSetupState>,\n flow_instance_ctx: &Arc,\n) -> Result {\n let metadata_change = diff_state(\n existing_state.map(|e| &e.metadata),\n desired_state.map(|d| &d.metadata),\n |_, desired_state| Some(StateChange::Upsert(desired_state.clone())),\n );\n\n let new_source_ids = desired_state\n .iter()\n .flat_map(|d| d.metadata.sources.values().map(|v| v.source_id))\n .collect::>();\n let tracking_table_change = db_tracking_setup::TrackingTableSetupStatus::new(\n desired_state.map(|d| &d.tracking_table),\n &existing_state\n .map(|e| Cow::Borrowed(&e.tracking_table))\n .unwrap_or_default(),\n (existing_state.iter())\n .flat_map(|state| state.metadata.possible_versions())\n .flat_map(|metadata| {\n metadata\n .sources\n .values()\n .map(|v| v.source_id)\n .filter(|id| !new_source_ids.contains(id))\n })\n .collect::>()\n .into_iter()\n .collect(),\n );\n\n let mut target_resources = Vec::new();\n let mut unknown_resources = Vec::new();\n\n let grouped_target_resources = group_resource_states(\n desired_state.iter().flat_map(|d| d.targets.iter()),\n existing_state.iter().flat_map(|e| e.targets.iter()),\n )?;\n for (resource_id, v) in grouped_target_resources.into_iter() {\n let factory = match get_export_target_factory(&resource_id.target_kind) {\n Some(factory) => factory,\n None => {\n unknown_resources.push(resource_id.clone());\n continue;\n }\n };\n let state = v.desired.clone();\n let target_state = v\n .desired\n .and_then(|state| (!state.common.setup_by_user).then_some(state.state));\n let existing_without_setup_by_user = CombinedState {\n current: v\n .existing\n .current\n .and_then(|s| s.state_unless_setup_by_user()),\n staging: v\n .existing\n .staging\n .into_iter()\n .filter_map(|s| match s {\n StateChange::Upsert(s) => {\n s.state_unless_setup_by_user().map(StateChange::Upsert)\n }\n StateChange::Delete => Some(StateChange::Delete),\n })\n .collect(),\n legacy_state_key: v.existing.legacy_state_key.clone(),\n };\n let never_setup_by_sys = target_state.is_none()\n && existing_without_setup_by_user.current.is_none()\n && existing_without_setup_by_user.staging.is_empty();\n let setup_status = if never_setup_by_sys {\n None\n } else {\n Some(\n factory\n .check_setup_status(\n &resource_id.key,\n target_state,\n existing_without_setup_by_user,\n flow_instance_ctx.clone(),\n )\n .await?,\n )\n };\n target_resources.push(ResourceSetupInfo {\n key: resource_id.clone(),\n state,\n description: factory.describe_resource(&resource_id.key)?,\n setup_status,\n legacy_key: v\n .existing\n .legacy_state_key\n .map(|legacy_state_key| ResourceIdentifier {\n target_kind: resource_id.target_kind.clone(),\n key: legacy_state_key,\n }),\n });\n }\n Ok(FlowSetupStatus {\n status: to_object_status(existing_state, desired_state),\n seen_flow_metadata_version: existing_state.and_then(|s| s.seen_flow_metadata_version),\n metadata_change,\n tracking_table: tracking_table_change.map(|c| c.into_setup_info()),\n target_resources,\n unknown_resources,\n })\n}\n\nstruct ResourceSetupChangeItem<'a, K: 'a, C: ResourceSetupStatus> {\n key: &'a K,\n setup_status: &'a C,\n}\n\nasync fn maybe_update_resource_setup<\n 'a,\n K: 'a,\n S: 'a,\n C: ResourceSetupStatus,\n ChangeApplierResultFut: Future>,\n>(\n resource_kind: &str,\n write: &mut (dyn std::io::Write + Send),\n resources: impl Iterator>,\n apply_change: impl FnOnce(Vec>) -> ChangeApplierResultFut,\n) -> Result<()> {\n let mut changes = Vec::new();\n for resource in resources {\n if let Some(setup_status) = &resource.setup_status {\n if setup_status.change_type() != SetupChangeType::NoChange {\n changes.push(ResourceSetupChangeItem {\n key: &resource.key,\n setup_status,\n });\n writeln!(write, \"{}:\", resource.description)?;\n for change in setup_status.describe_changes() {\n match change {\n setup::ChangeDescription::Action(action) => {\n writeln!(write, \" - {action}\")?;\n }\n setup::ChangeDescription::Note(_) => {}\n }\n }\n }\n }\n }\n if !changes.is_empty() {\n write!(write, \"Pushing change for {resource_kind}...\")?;\n apply_change(changes).await?;\n writeln!(write, \"DONE\")?;\n }\n Ok(())\n}\n\nasync fn apply_changes_for_flow(\n write: &mut (dyn std::io::Write + Send),\n flow_ctx: &FlowContext,\n flow_status: &FlowSetupStatus,\n existing_setup_state: &mut Option>,\n pool: &PgPool,\n) -> Result<()> {\n let Some(status) = flow_status.status else {\n return Ok(());\n };\n let verb = match status {\n ObjectStatus::New => \"Creating\",\n ObjectStatus::Deleted => \"Deleting\",\n ObjectStatus::Existing => \"Updating resources for \",\n _ => bail!(\"invalid flow status\"),\n };\n write!(write, \"\\n{verb} flow {}:\\n\", flow_ctx.flow_name())?;\n\n let mut update_info =\n HashMap::::new();\n\n if let Some(metadata_change) = &flow_status.metadata_change {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::FlowMetadata.to_string(),\n serde_json::Value::Null,\n ),\n db_metadata::StateUpdateInfo::new(metadata_change.desired_state(), None)?,\n );\n }\n if let Some(tracking_table) = &flow_status.tracking_table {\n if tracking_table\n .setup_status\n .as_ref()\n .map(|c| c.change_type() != SetupChangeType::NoChange)\n .unwrap_or_default()\n {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::TrackingTable.to_string(),\n serde_json::Value::Null,\n ),\n db_metadata::StateUpdateInfo::new(tracking_table.state.as_ref(), None)?,\n );\n }\n }\n\n for target_resource in &flow_status.target_resources {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::Target(target_resource.key.target_kind.clone()).to_string(),\n target_resource.key.key.clone(),\n ),\n db_metadata::StateUpdateInfo::new(\n target_resource.state.as_ref(),\n target_resource.legacy_key.as_ref().map(|k| {\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::Target(k.target_kind.clone()).to_string(),\n k.key.clone(),\n )\n }),\n )?,\n );\n }\n\n let new_version_id = db_metadata::stage_changes_for_flow(\n flow_ctx.flow_name(),\n flow_status.seen_flow_metadata_version,\n &update_info,\n pool,\n )\n .await?;\n\n if let Some(tracking_table) = &flow_status.tracking_table {\n maybe_update_resource_setup(\n \"tracking table\",\n write,\n std::iter::once(tracking_table),\n |setup_status| setup_status[0].setup_status.apply_change(),\n )\n .await?;\n }\n\n let mut setup_status_by_target_kind = IndexMap::<&str, Vec<_>>::new();\n for target_resource in &flow_status.target_resources {\n setup_status_by_target_kind\n .entry(target_resource.key.target_kind.as_str())\n .or_default()\n .push(target_resource);\n }\n for (target_kind, resources) in setup_status_by_target_kind.into_iter() {\n maybe_update_resource_setup(\n target_kind,\n write,\n resources.into_iter(),\n |setup_status| async move {\n let factory = get_export_target_factory(target_kind).ok_or_else(|| {\n anyhow::anyhow!(\"No factory found for target kind: {}\", target_kind)\n })?;\n factory\n .apply_setup_changes(\n setup_status\n .into_iter()\n .map(|s| interface::ResourceSetupChangeItem {\n key: &s.key.key,\n setup_status: s.setup_status.as_ref(),\n })\n .collect(),\n flow_ctx.flow.flow_instance_ctx.clone(),\n )\n .await?;\n Ok(())\n },\n )\n .await?;\n }\n\n let is_deletion = status == ObjectStatus::Deleted;\n db_metadata::commit_changes_for_flow(\n flow_ctx.flow_name(),\n new_version_id,\n &update_info,\n is_deletion,\n pool,\n )\n .await?;\n if is_deletion {\n *existing_setup_state = None;\n } else {\n let (existing_metadata, existing_tracking_table, existing_targets) =\n match std::mem::take(existing_setup_state) {\n Some(s) => (Some(s.metadata), Some(s.tracking_table), s.targets),\n None => Default::default(),\n };\n let metadata = CombinedState::from_change(\n existing_metadata,\n flow_status\n .metadata_change\n .as_ref()\n .map(|v| v.desired_state()),\n );\n let tracking_table = CombinedState::from_change(\n existing_tracking_table,\n flow_status.tracking_table.as_ref().map(|c| {\n c.setup_status\n .as_ref()\n .and_then(|c| c.desired_state.as_ref())\n }),\n );\n let mut targets = existing_targets;\n for target_resource in &flow_status.target_resources {\n match &target_resource.state {\n Some(state) => {\n targets.insert(\n target_resource.key.clone(),\n CombinedState::from_desired(state.clone()),\n );\n }\n None => {\n targets.shift_remove(&target_resource.key);\n }\n }\n }\n *existing_setup_state = Some(setup::FlowSetupState {\n metadata,\n tracking_table,\n seen_flow_metadata_version: Some(new_version_id),\n targets,\n });\n }\n\n writeln!(write, \"Done for flow {}\", flow_ctx.flow_name())?;\n Ok(())\n}\n\nasync fn apply_global_changes(\n write: &mut (dyn std::io::Write + Send),\n setup_status: &GlobalSetupStatus,\n all_setup_states: &mut AllSetupStates,\n) -> Result<()> {\n maybe_update_resource_setup(\n \"metadata table\",\n write,\n std::iter::once(&setup_status.metadata_table),\n |setup_status| setup_status[0].setup_status.apply_change(),\n )\n .await?;\n\n if setup_status\n .metadata_table\n .setup_status\n .as_ref()\n .is_some_and(|c| c.change_type() == SetupChangeType::Create)\n {\n all_setup_states.has_metadata_table = true;\n }\n\n Ok(())\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum FlowSetupChangeAction {\n Setup,\n Drop,\n}\npub struct SetupChangeBundle {\n pub action: FlowSetupChangeAction,\n pub flow_names: Vec,\n}\n\nimpl SetupChangeBundle {\n async fn get_flow_setup_status<'a>(\n setup_ctx: &LibSetupContext,\n flow_ctx: &'a FlowContext,\n flow_exec_ctx: &'a FlowExecutionContext,\n action: &FlowSetupChangeAction,\n buffer: &'a mut Option,\n ) -> Result<&'a FlowSetupStatus> {\n let result = match action {\n FlowSetupChangeAction::Setup => &flow_exec_ctx.setup_status,\n FlowSetupChangeAction::Drop => {\n let existing_state = setup_ctx.all_setup_states.flows.get(flow_ctx.flow_name());\n buffer.insert(\n check_flow_setup_status(None, existing_state, &flow_ctx.flow.flow_instance_ctx)\n .await?,\n )\n }\n };\n Ok(result)\n }\n\n pub async fn describe(&self, lib_context: &LibContext) -> Result<(String, bool)> {\n let mut text = String::new();\n let mut is_up_to_date = true;\n\n let setup_ctx = lib_context\n .require_persistence_ctx()?\n .setup_ctx\n .read()\n .await;\n let setup_ctx = &*setup_ctx;\n\n if self.action == FlowSetupChangeAction::Setup {\n is_up_to_date = is_up_to_date && setup_ctx.global_setup_status.is_up_to_date();\n write!(&mut text, \"{}\", setup_ctx.global_setup_status)?;\n }\n\n for flow_name in &self.flow_names {\n let flow_ctx = {\n let flows = lib_context.flows.lock().unwrap();\n flows\n .get(flow_name)\n .ok_or_else(|| anyhow::anyhow!(\"Flow instance not found: {flow_name}\"))?\n .clone()\n };\n let flow_exec_ctx = flow_ctx.get_execution_ctx_for_setup().read().await;\n\n let mut setup_status_buffer = None;\n let setup_status = Self::get_flow_setup_status(\n setup_ctx,\n &flow_ctx,\n &flow_exec_ctx,\n &self.action,\n &mut setup_status_buffer,\n )\n .await?;\n\n is_up_to_date = is_up_to_date && setup_status.is_up_to_date();\n write!(\n &mut text,\n \"{}\",\n setup::FormattedFlowSetupStatus(flow_name, setup_status)\n )?;\n }\n Ok((text, is_up_to_date))\n }\n\n pub async fn apply(\n &self,\n lib_context: &LibContext,\n write: &mut (dyn std::io::Write + Send),\n ) -> Result<()> {\n let persistence_ctx = lib_context.require_persistence_ctx()?;\n let mut setup_ctx = persistence_ctx.setup_ctx.write().await;\n let setup_ctx = &mut *setup_ctx;\n\n if self.action == FlowSetupChangeAction::Setup\n && !setup_ctx.global_setup_status.is_up_to_date()\n {\n apply_global_changes(\n write,\n &setup_ctx.global_setup_status,\n &mut setup_ctx.all_setup_states,\n )\n .await?;\n setup_ctx.global_setup_status =\n GlobalSetupStatus::from_setup_states(&setup_ctx.all_setup_states);\n }\n\n for flow_name in &self.flow_names {\n let flow_ctx = {\n let flows = lib_context.flows.lock().unwrap();\n flows\n .get(flow_name)\n .ok_or_else(|| anyhow::anyhow!(\"Flow instance not found: {flow_name}\"))?\n .clone()\n };\n let mut flow_exec_ctx = flow_ctx.get_execution_ctx_for_setup().write().await;\n\n let mut setup_status_buffer = None;\n let setup_status = Self::get_flow_setup_status(\n setup_ctx,\n &flow_ctx,\n &flow_exec_ctx,\n &self.action,\n &mut setup_status_buffer,\n )\n .await?;\n if setup_status.is_up_to_date() {\n continue;\n }\n\n let mut flow_states = setup_ctx.all_setup_states.flows.remove(flow_name);\n apply_changes_for_flow(\n write,\n &flow_ctx,\n setup_status,\n &mut flow_states,\n &persistence_ctx.builtin_db_pool,\n )\n .await?;\n\n flow_exec_ctx\n .update_setup_state(&flow_ctx.flow, flow_states.as_ref())\n .await?;\n if let Some(flow_states) = flow_states {\n setup_ctx\n .all_setup_states\n .flows\n .insert(flow_name.to_string(), flow_states);\n }\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/setup/db_metadata.rs", "use crate::prelude::*;\n\nuse super::{ResourceSetupInfo, ResourceSetupStatus, SetupChangeType, StateChange};\nuse crate::utils::db::WriteAction;\nuse axum::http::StatusCode;\nuse sqlx::PgPool;\n\nconst SETUP_METADATA_TABLE_NAME: &str = \"cocoindex_setup_metadata\";\npub const FLOW_VERSION_RESOURCE_TYPE: &str = \"__FlowVersion\";\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SetupMetadataRecord {\n pub flow_name: String,\n // e.g. \"Flow\", \"SourceTracking\", \"Target:{TargetType}\"\n pub resource_type: String,\n pub key: serde_json::Value,\n pub state: Option,\n pub staging_changes: sqlx::types::Json>>,\n}\n\npub fn parse_flow_version(state: &Option) -> Option {\n match state {\n Some(serde_json::Value::Number(n)) => n.as_u64(),\n _ => None,\n }\n}\n\n/// Returns None if metadata table doesn't exist.\npub async fn read_setup_metadata(pool: &PgPool) -> Result>> {\n let mut db_conn = pool.acquire().await?;\n let query_str = format!(\n \"SELECT flow_name, resource_type, key, state, staging_changes FROM {SETUP_METADATA_TABLE_NAME}\",\n );\n let metadata = sqlx::query_as(&query_str).fetch_all(&mut *db_conn).await;\n let result = match metadata {\n Ok(metadata) => Some(metadata),\n Err(err) => {\n let exists: Option = sqlx::query_scalar(\n \"SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = $1)\",\n )\n .bind(SETUP_METADATA_TABLE_NAME)\n .fetch_one(&mut *db_conn)\n .await?;\n if !exists.unwrap_or(false) {\n None\n } else {\n return Err(err.into());\n }\n }\n };\n Ok(result)\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct ResourceTypeKey {\n pub resource_type: String,\n pub key: serde_json::Value,\n}\n\nimpl ResourceTypeKey {\n pub fn new(resource_type: String, key: serde_json::Value) -> Self {\n Self { resource_type, key }\n }\n}\n\nstatic VERSION_RESOURCE_TYPE_ID: LazyLock = LazyLock::new(|| ResourceTypeKey {\n resource_type: FLOW_VERSION_RESOURCE_TYPE.to_string(),\n key: serde_json::Value::Null,\n});\n\nasync fn read_metadata_records_for_flow(\n flow_name: &str,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT flow_name, resource_type, key, state, staging_changes FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1\",\n );\n let metadata: Vec = sqlx::query_as(&query_str)\n .bind(flow_name)\n .fetch_all(db_executor)\n .await?;\n let result = metadata\n .into_iter()\n .map(|m| {\n (\n ResourceTypeKey {\n resource_type: m.resource_type.clone(),\n key: m.key.clone(),\n },\n m,\n )\n })\n .collect();\n Ok(result)\n}\n\nasync fn read_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT state FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n );\n let state: Option = sqlx::query_scalar(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .fetch_optional(db_executor)\n .await?;\n Ok(state)\n}\n\nasync fn upsert_staging_changes(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n staging_changes: Vec>,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {SETUP_METADATA_TABLE_NAME} (flow_name, resource_type, key, staging_changes) VALUES ($1, $2, $3, $4)\",\n ),\n WriteAction::Update => format!(\n \"UPDATE {SETUP_METADATA_TABLE_NAME} SET staging_changes = $4 WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n ),\n };\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .bind(sqlx::types::Json(staging_changes))\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\nasync fn upsert_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n state: &serde_json::Value,\n action: WriteAction,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {SETUP_METADATA_TABLE_NAME} (flow_name, resource_type, key, state, staging_changes) VALUES ($1, $2, $3, $4, $5)\",\n ),\n WriteAction::Update => format!(\n \"UPDATE {SETUP_METADATA_TABLE_NAME} SET state = $4, staging_changes = $5 WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n ),\n };\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .bind(sqlx::types::Json(state))\n .bind(sqlx::types::Json(Vec::::new()))\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\nasync fn delete_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"DELETE FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n );\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\npub struct StateUpdateInfo {\n pub desired_state: Option,\n pub legacy_key: Option,\n}\n\nimpl StateUpdateInfo {\n pub fn new(\n desired_state: Option<&impl Serialize>,\n legacy_key: Option,\n ) -> Result {\n Ok(Self {\n desired_state: desired_state\n .as_ref()\n .map(serde_json::to_value)\n .transpose()?,\n legacy_key,\n })\n }\n}\n\npub async fn stage_changes_for_flow(\n flow_name: &str,\n seen_metadata_version: Option,\n resource_update_info: &HashMap,\n pool: &PgPool,\n) -> Result {\n let mut txn = pool.begin().await?;\n let mut existing_records = read_metadata_records_for_flow(flow_name, &mut *txn).await?;\n let latest_metadata_version = existing_records\n .get(&VERSION_RESOURCE_TYPE_ID)\n .and_then(|m| parse_flow_version(&m.state));\n if seen_metadata_version < latest_metadata_version {\n return Err(ApiError::new(\n \"seen newer version in the metadata table\",\n StatusCode::CONFLICT,\n ))?;\n }\n let new_metadata_version = seen_metadata_version.unwrap_or_default() + 1;\n upsert_state(\n flow_name,\n &VERSION_RESOURCE_TYPE_ID,\n &serde_json::Value::Number(new_metadata_version.into()),\n if latest_metadata_version.is_some() {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n &mut *txn,\n )\n .await?;\n\n for (type_id, update_info) in resource_update_info {\n let existing = existing_records.remove(type_id);\n let change = match &update_info.desired_state {\n Some(desired_state) => StateChange::Upsert(desired_state.clone()),\n None => StateChange::Delete,\n };\n let mut new_staging_changes = vec![];\n if let Some(legacy_key) = &update_info.legacy_key {\n if let Some(legacy_record) = existing_records.remove(legacy_key) {\n new_staging_changes.extend(legacy_record.staging_changes.0);\n delete_state(flow_name, legacy_key, &mut *txn).await?;\n }\n }\n let (action, existing_staging_changes) = match existing {\n Some(existing) => {\n let existing_staging_changes = existing.staging_changes.0;\n if existing_staging_changes.iter().all(|c| c != &change) {\n new_staging_changes.push(change);\n }\n (WriteAction::Update, existing_staging_changes)\n }\n None => {\n if update_info.desired_state.is_some() {\n new_staging_changes.push(change);\n }\n (WriteAction::Insert, vec![])\n }\n };\n if !new_staging_changes.is_empty() {\n upsert_staging_changes(\n flow_name,\n type_id,\n [existing_staging_changes, new_staging_changes].concat(),\n &mut *txn,\n action,\n )\n .await?;\n }\n }\n txn.commit().await?;\n Ok(new_metadata_version)\n}\n\npub async fn commit_changes_for_flow(\n flow_name: &str,\n curr_metadata_version: u64,\n state_updates: &HashMap,\n delete_version: bool,\n pool: &PgPool,\n) -> Result<()> {\n let mut txn = pool.begin().await?;\n let latest_metadata_version =\n parse_flow_version(&read_state(flow_name, &VERSION_RESOURCE_TYPE_ID, &mut *txn).await?);\n if latest_metadata_version != Some(curr_metadata_version) {\n return Err(ApiError::new(\n \"seen newer version in the metadata table\",\n StatusCode::CONFLICT,\n ))?;\n }\n for (type_id, update_info) in state_updates.iter() {\n match &update_info.desired_state {\n Some(desired_state) => {\n upsert_state(\n flow_name,\n type_id,\n desired_state,\n WriteAction::Update,\n &mut *txn,\n )\n .await?;\n }\n None => {\n delete_state(flow_name, type_id, &mut *txn).await?;\n }\n }\n }\n if delete_version {\n delete_state(flow_name, &VERSION_RESOURCE_TYPE_ID, &mut *txn).await?;\n }\n txn.commit().await?;\n Ok(())\n}\n\n#[derive(Debug)]\npub struct MetadataTableSetup {\n pub metadata_table_missing: bool,\n}\n\nimpl MetadataTableSetup {\n pub fn into_setup_info(self) -> ResourceSetupInfo<(), (), MetadataTableSetup> {\n ResourceSetupInfo {\n key: (),\n state: None,\n description: \"CocoIndex Metadata Table\".to_string(),\n setup_status: Some(self),\n legacy_key: None,\n }\n }\n}\n\nimpl ResourceSetupStatus for MetadataTableSetup {\n fn describe_changes(&self) -> Vec {\n if self.metadata_table_missing {\n vec![setup::ChangeDescription::Action(format!(\n \"Create the cocoindex metadata table {SETUP_METADATA_TABLE_NAME}\"\n ))]\n } else {\n vec![]\n }\n }\n\n fn change_type(&self) -> SetupChangeType {\n if self.metadata_table_missing {\n SetupChangeType::Create\n } else {\n SetupChangeType::NoChange\n }\n }\n}\n\nimpl MetadataTableSetup {\n pub async fn apply_change(&self) -> Result<()> {\n if !self.metadata_table_missing {\n return Ok(());\n }\n let lib_context = get_lib_context()?;\n let pool = lib_context.require_builtin_db_pool()?;\n let query_str = format!(\n \"CREATE TABLE IF NOT EXISTS {SETUP_METADATA_TABLE_NAME} (\n flow_name TEXT NOT NULL,\n resource_type TEXT NOT NULL,\n key JSONB NOT NULL,\n state JSONB,\n staging_changes JSONB NOT NULL,\n\n PRIMARY KEY (flow_name, resource_type, key)\n )\n \",\n );\n sqlx::query(&query_str).execute(pool).await?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/targets/postgres.rs", "use crate::prelude::*;\n\nuse super::shared::table_columns::{\n TableColumnsSchema, TableMainSetupAction, TableUpsertionAction, check_table_compatibility,\n};\nuse crate::base::spec::{self, *};\nuse crate::ops::sdk::*;\nuse crate::settings::DatabaseConnectionSpec;\nuse async_trait::async_trait;\nuse indexmap::{IndexMap, IndexSet};\nuse itertools::Itertools;\nuse serde::Serialize;\nuse sqlx::PgPool;\nuse sqlx::postgres::types::PgRange;\nuse std::ops::Bound;\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n database: Option>,\n table_name: Option,\n}\nconst BIND_LIMIT: usize = 65535;\n\nfn key_value_fields_iter<'a>(\n key_fields_schema: &[FieldSchema],\n key_value: &'a KeyValue,\n) -> Result<&'a [KeyValue]> {\n let slice = if key_fields_schema.len() == 1 {\n std::slice::from_ref(key_value)\n } else {\n match key_value {\n KeyValue::Struct(fields) => fields,\n _ => bail!(\"expect struct key value\"),\n }\n };\n Ok(slice)\n}\n\nfn convertible_to_pgvector(vec_schema: &VectorTypeSchema) -> bool {\n if vec_schema.dimension.is_some() {\n matches!(\n *vec_schema.element_type,\n BasicValueType::Float32 | BasicValueType::Float64 | BasicValueType::Int64\n )\n } else {\n false\n }\n}\n\nfn bind_key_field<'arg>(\n builder: &mut sqlx::QueryBuilder<'arg, sqlx::Postgres>,\n key_value: &'arg KeyValue,\n) -> Result<()> {\n match key_value {\n KeyValue::Bytes(v) => {\n builder.push_bind(&**v);\n }\n KeyValue::Str(v) => {\n builder.push_bind(&**v);\n }\n KeyValue::Bool(v) => {\n builder.push_bind(v);\n }\n KeyValue::Int64(v) => {\n builder.push_bind(v);\n }\n KeyValue::Range(v) => {\n builder.push_bind(PgRange {\n start: Bound::Included(v.start as i64),\n end: Bound::Excluded(v.end as i64),\n });\n }\n KeyValue::Uuid(v) => {\n builder.push_bind(v);\n }\n KeyValue::Date(v) => {\n builder.push_bind(v);\n }\n KeyValue::Struct(fields) => {\n builder.push_bind(sqlx::types::Json(fields));\n }\n }\n Ok(())\n}\n\nfn bind_value_field<'arg>(\n builder: &mut sqlx::QueryBuilder<'arg, sqlx::Postgres>,\n field_schema: &'arg FieldSchema,\n value: &'arg Value,\n) -> Result<()> {\n match &value {\n Value::Basic(v) => match v {\n BasicValue::Bytes(v) => {\n builder.push_bind(&**v);\n }\n BasicValue::Str(v) => {\n builder.push_bind(&**v);\n }\n BasicValue::Bool(v) => {\n builder.push_bind(v);\n }\n BasicValue::Int64(v) => {\n builder.push_bind(v);\n }\n BasicValue::Float32(v) => {\n builder.push_bind(v);\n }\n BasicValue::Float64(v) => {\n builder.push_bind(v);\n }\n BasicValue::Range(v) => {\n builder.push_bind(PgRange {\n start: Bound::Included(v.start as i64),\n end: Bound::Excluded(v.end as i64),\n });\n }\n BasicValue::Uuid(v) => {\n builder.push_bind(v);\n }\n BasicValue::Date(v) => {\n builder.push_bind(v);\n }\n BasicValue::Time(v) => {\n builder.push_bind(v);\n }\n BasicValue::LocalDateTime(v) => {\n builder.push_bind(v);\n }\n BasicValue::OffsetDateTime(v) => {\n builder.push_bind(v);\n }\n BasicValue::TimeDelta(v) => {\n builder.push_bind(v);\n }\n BasicValue::Json(v) => {\n builder.push_bind(sqlx::types::Json(&**v));\n }\n BasicValue::Vector(v) => match &field_schema.value_type.typ {\n ValueType::Basic(BasicValueType::Vector(vs)) if convertible_to_pgvector(vs) => {\n let vec = v\n .iter()\n .map(|v| {\n Ok(match v {\n BasicValue::Float32(v) => *v,\n BasicValue::Float64(v) => *v as f32,\n BasicValue::Int64(v) => *v as f32,\n v => bail!(\"unexpected vector element type: {}\", v.kind()),\n })\n })\n .collect::>>()?;\n builder.push_bind(pgvector::Vector::from(vec));\n }\n _ => {\n builder.push_bind(sqlx::types::Json(v));\n }\n },\n BasicValue::UnionVariant { .. } => {\n builder.push_bind(sqlx::types::Json(TypedValue {\n t: &field_schema.value_type.typ,\n v: value,\n }));\n }\n },\n Value::Null => {\n builder.push(\"NULL\");\n }\n v => {\n builder.push_bind(sqlx::types::Json(TypedValue {\n t: &field_schema.value_type.typ,\n v,\n }));\n }\n };\n Ok(())\n}\n\npub struct ExportContext {\n db_ref: Option>,\n db_pool: PgPool,\n key_fields_schema: Vec,\n value_fields_schema: Vec,\n upsert_sql_prefix: String,\n upsert_sql_suffix: String,\n delete_sql_prefix: String,\n}\n\nimpl ExportContext {\n fn new(\n db_ref: Option>,\n db_pool: PgPool,\n table_name: String,\n key_fields_schema: Vec,\n value_fields_schema: Vec,\n ) -> Result {\n let key_fields = key_fields_schema\n .iter()\n .map(|f| format!(\"\\\"{}\\\"\", f.name))\n .collect::>()\n .join(\", \");\n let all_fields = (key_fields_schema.iter().chain(value_fields_schema.iter()))\n .map(|f| format!(\"\\\"{}\\\"\", f.name))\n .collect::>()\n .join(\", \");\n let set_value_fields = value_fields_schema\n .iter()\n .map(|f| format!(\"\\\"{}\\\" = EXCLUDED.\\\"{}\\\"\", f.name, f.name))\n .collect::>()\n .join(\", \");\n\n Ok(Self {\n db_ref,\n db_pool,\n upsert_sql_prefix: format!(\"INSERT INTO {table_name} ({all_fields}) VALUES \"),\n upsert_sql_suffix: if value_fields_schema.is_empty() {\n format!(\" ON CONFLICT ({key_fields}) DO NOTHING;\")\n } else {\n format!(\" ON CONFLICT ({key_fields}) DO UPDATE SET {set_value_fields};\")\n },\n delete_sql_prefix: format!(\"DELETE FROM {table_name} WHERE \"),\n key_fields_schema,\n value_fields_schema,\n })\n }\n}\n\nimpl ExportContext {\n async fn upsert(\n &self,\n upserts: &[interface::ExportTargetUpsertEntry],\n txn: &mut sqlx::PgTransaction<'_>,\n ) -> Result<()> {\n let num_parameters = self.key_fields_schema.len() + self.value_fields_schema.len();\n for upsert_chunk in upserts.chunks(BIND_LIMIT / num_parameters) {\n let mut query_builder = sqlx::QueryBuilder::new(&self.upsert_sql_prefix);\n for (i, upsert) in upsert_chunk.iter().enumerate() {\n if i > 0 {\n query_builder.push(\",\");\n }\n query_builder.push(\" (\");\n for (j, key_value) in key_value_fields_iter(&self.key_fields_schema, &upsert.key)?\n .iter()\n .enumerate()\n {\n if j > 0 {\n query_builder.push(\", \");\n }\n bind_key_field(&mut query_builder, key_value)?;\n }\n if self.value_fields_schema.len() != upsert.value.fields.len() {\n bail!(\n \"unmatched value length: {} vs {}\",\n self.value_fields_schema.len(),\n upsert.value.fields.len()\n );\n }\n for (schema, value) in self\n .value_fields_schema\n .iter()\n .zip(upsert.value.fields.iter())\n {\n query_builder.push(\", \");\n bind_value_field(&mut query_builder, schema, value)?;\n }\n query_builder.push(\")\");\n }\n query_builder.push(&self.upsert_sql_suffix);\n query_builder.build().execute(&mut **txn).await?;\n }\n Ok(())\n }\n\n async fn delete(\n &self,\n deletions: &[interface::ExportTargetDeleteEntry],\n txn: &mut sqlx::PgTransaction<'_>,\n ) -> Result<()> {\n // TODO: Find a way to batch delete.\n for deletion in deletions.iter() {\n let mut query_builder = sqlx::QueryBuilder::new(\"\");\n query_builder.push(&self.delete_sql_prefix);\n for (i, (schema, value)) in self\n .key_fields_schema\n .iter()\n .zip(key_value_fields_iter(&self.key_fields_schema, &deletion.key)?.iter())\n .enumerate()\n {\n if i > 0 {\n query_builder.push(\" AND \");\n }\n query_builder.push(\"\\\"\");\n query_builder.push(schema.name.as_str());\n query_builder.push(\"\\\"\");\n query_builder.push(\"=\");\n bind_key_field(&mut query_builder, value)?;\n }\n query_builder.build().execute(&mut **txn).await?;\n }\n Ok(())\n }\n}\n\n#[derive(Default)]\npub struct Factory {}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct TableId {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n database: Option>,\n table_name: String,\n}\n\nimpl std::fmt::Display for TableId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.table_name)?;\n if let Some(database) = &self.database {\n write!(f, \" (database: {database})\")?;\n }\n Ok(())\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SetupState {\n #[serde(flatten)]\n columns: TableColumnsSchema,\n\n vector_indexes: BTreeMap,\n}\n\nimpl SetupState {\n fn new(\n table_id: &TableId,\n key_fields_schema: &[FieldSchema],\n value_fields_schema: &[FieldSchema],\n index_options: &IndexOptions,\n ) -> Self {\n Self {\n columns: TableColumnsSchema {\n key_columns: key_fields_schema\n .iter()\n .map(|f| (f.name.clone(), f.value_type.typ.without_attrs()))\n .collect(),\n value_columns: value_fields_schema\n .iter()\n .map(|f| (f.name.clone(), f.value_type.typ.without_attrs()))\n .collect(),\n },\n vector_indexes: index_options\n .vector_indexes\n .iter()\n .map(|v| (to_vector_index_name(&table_id.table_name, v), v.clone()))\n .collect(),\n }\n }\n\n fn uses_pgvector(&self) -> bool {\n self.columns\n .value_columns\n .iter()\n .any(|(_, value)| match &value {\n ValueType::Basic(BasicValueType::Vector(vec_schema)) => {\n convertible_to_pgvector(vec_schema)\n }\n _ => false,\n })\n }\n}\n\nfn to_column_type_sql(column_type: &ValueType) -> String {\n match column_type {\n ValueType::Basic(basic_type) => match basic_type {\n BasicValueType::Bytes => \"bytea\".into(),\n BasicValueType::Str => \"text\".into(),\n BasicValueType::Bool => \"boolean\".into(),\n BasicValueType::Int64 => \"bigint\".into(),\n BasicValueType::Float32 => \"real\".into(),\n BasicValueType::Float64 => \"double precision\".into(),\n BasicValueType::Range => \"int8range\".into(),\n BasicValueType::Uuid => \"uuid\".into(),\n BasicValueType::Date => \"date\".into(),\n BasicValueType::Time => \"time\".into(),\n BasicValueType::LocalDateTime => \"timestamp\".into(),\n BasicValueType::OffsetDateTime => \"timestamp with time zone\".into(),\n BasicValueType::TimeDelta => \"interval\".into(),\n BasicValueType::Json => \"jsonb\".into(),\n BasicValueType::Vector(vec_schema) => {\n if convertible_to_pgvector(vec_schema) {\n format!(\"vector({})\", vec_schema.dimension.unwrap_or(0))\n } else {\n \"jsonb\".into()\n }\n }\n BasicValueType::Union(_) => \"jsonb\".into(),\n },\n _ => \"jsonb\".into(),\n }\n}\n\nimpl<'a> From<&'a SetupState> for Cow<'a, TableColumnsSchema> {\n fn from(val: &'a SetupState) -> Self {\n Cow::Owned(TableColumnsSchema {\n key_columns: val\n .columns\n .key_columns\n .iter()\n .map(|(k, v)| (k.clone(), to_column_type_sql(v)))\n .collect(),\n value_columns: val\n .columns\n .value_columns\n .iter()\n .map(|(k, v)| (k.clone(), to_column_type_sql(v)))\n .collect(),\n })\n }\n}\n\n#[derive(Debug)]\npub struct TableSetupAction {\n table_action: TableMainSetupAction,\n indexes_to_delete: IndexSet,\n indexes_to_create: IndexMap,\n}\n\n#[derive(Debug)]\npub struct SetupStatus {\n create_pgvector_extension: bool,\n actions: TableSetupAction,\n vector_as_jsonb_columns: Vec<(String, ValueType)>,\n}\n\nimpl SetupStatus {\n fn new(desired_state: Option, existing: setup::CombinedState) -> Self {\n let table_action =\n TableMainSetupAction::from_states(desired_state.as_ref(), &existing, false);\n let vector_as_jsonb_columns = desired_state\n .as_ref()\n .iter()\n .flat_map(|s| {\n s.columns.value_columns.iter().filter_map(|(name, schema)| {\n if let ValueType::Basic(BasicValueType::Vector(vec_schema)) = schema\n && !convertible_to_pgvector(vec_schema)\n {\n let is_touched = match &table_action.table_upsertion {\n Some(TableUpsertionAction::Create { values, .. }) => {\n values.contains_key(name)\n }\n Some(TableUpsertionAction::Update {\n columns_to_upsert, ..\n }) => columns_to_upsert.contains_key(name),\n None => false,\n };\n if is_touched {\n Some((name.clone(), schema.clone()))\n } else {\n None\n }\n } else {\n None\n }\n })\n })\n .collect::>();\n let (indexes_to_delete, indexes_to_create) = desired_state\n .as_ref()\n .map(|desired| {\n (\n existing\n .possible_versions()\n .flat_map(|v| v.vector_indexes.keys())\n .filter(|index_name| !desired.vector_indexes.contains_key(*index_name))\n .cloned()\n .collect::>(),\n desired\n .vector_indexes\n .iter()\n .filter(|(name, def)| {\n !existing.always_exists()\n || existing\n .possible_versions()\n .any(|v| v.vector_indexes.get(*name) != Some(def))\n })\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect::>(),\n )\n })\n .unwrap_or_default();\n let create_pgvector_extension = desired_state\n .as_ref()\n .map(|s| s.uses_pgvector())\n .unwrap_or(false)\n && !existing.current.map(|s| s.uses_pgvector()).unwrap_or(false);\n\n Self {\n create_pgvector_extension,\n actions: TableSetupAction {\n table_action,\n indexes_to_delete,\n indexes_to_create,\n },\n vector_as_jsonb_columns,\n }\n }\n}\n\nfn to_vector_similarity_metric_sql(metric: VectorSimilarityMetric) -> &'static str {\n match metric {\n VectorSimilarityMetric::CosineSimilarity => \"vector_cosine_ops\",\n VectorSimilarityMetric::L2Distance => \"vector_l2_ops\",\n VectorSimilarityMetric::InnerProduct => \"vector_ip_ops\",\n }\n}\n\nfn to_index_spec_sql(index_spec: &VectorIndexDef) -> Cow<'static, str> {\n format!(\n \"USING hnsw ({} {})\",\n index_spec.field_name,\n to_vector_similarity_metric_sql(index_spec.metric)\n )\n .into()\n}\n\nfn to_vector_index_name(table_name: &str, vector_index_def: &spec::VectorIndexDef) -> String {\n format!(\n \"{}__{}__{}\",\n table_name,\n vector_index_def.field_name,\n to_vector_similarity_metric_sql(vector_index_def.metric)\n )\n}\n\nfn describe_index_spec(index_name: &str, index_spec: &VectorIndexDef) -> String {\n format!(\"{} {}\", index_name, to_index_spec_sql(index_spec))\n}\n\nimpl setup::ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut descriptions = self.actions.table_action.describe_changes();\n for (column_name, schema) in self.vector_as_jsonb_columns.iter() {\n descriptions.push(setup::ChangeDescription::Note(format!(\n \"Field `{}` has type `{}`. Only number vector with fixed size is supported by pgvector. It will be stored as `jsonb`.\",\n column_name,\n schema\n )));\n }\n if self.create_pgvector_extension {\n descriptions.push(setup::ChangeDescription::Action(\n \"Create pg_vector extension (if not exists)\".to_string(),\n ));\n }\n if !self.actions.indexes_to_delete.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Delete indexes from table: {}\",\n self.actions.indexes_to_delete.iter().join(\", \"),\n )));\n }\n if !self.actions.indexes_to_create.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Create indexes in table: {}\",\n self.actions\n .indexes_to_create\n .iter()\n .map(|(index_name, index_spec)| describe_index_spec(index_name, index_spec))\n .join(\", \"),\n )));\n }\n descriptions\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n let has_other_update = !self.actions.indexes_to_create.is_empty()\n || !self.actions.indexes_to_delete.is_empty();\n self.actions.table_action.change_type(has_other_update)\n }\n}\n\nimpl SetupStatus {\n async fn apply_change(&self, db_pool: &PgPool, table_name: &str) -> Result<()> {\n if self.actions.table_action.drop_existing {\n sqlx::query(&format!(\"DROP TABLE IF EXISTS {table_name}\"))\n .execute(db_pool)\n .await?;\n }\n if self.create_pgvector_extension {\n sqlx::query(\"CREATE EXTENSION IF NOT EXISTS vector;\")\n .execute(db_pool)\n .await?;\n }\n for index_name in self.actions.indexes_to_delete.iter() {\n let sql = format!(\"DROP INDEX IF EXISTS {index_name}\");\n sqlx::query(&sql).execute(db_pool).await?;\n }\n if let Some(table_upsertion) = &self.actions.table_action.table_upsertion {\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n let mut fields = (keys\n .iter()\n .map(|(name, typ)| format!(\"\\\"{name}\\\" {typ} NOT NULL\")))\n .chain(values.iter().map(|(name, typ)| format!(\"\\\"{name}\\\" {typ}\")));\n let sql = format!(\n \"CREATE TABLE IF NOT EXISTS {table_name} ({}, PRIMARY KEY ({}))\",\n fields.join(\", \"),\n keys.keys().join(\", \")\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n for column_name in columns_to_delete.iter() {\n let sql = format!(\n \"ALTER TABLE {table_name} DROP COLUMN IF EXISTS \\\"{column_name}\\\"\",\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n for (column_name, column_type) in columns_to_upsert.iter() {\n let sql = format!(\n \"ALTER TABLE {table_name} DROP COLUMN IF EXISTS \\\"{column_name}\\\", ADD COLUMN \\\"{column_name}\\\" {column_type}\"\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n }\n }\n }\n for (index_name, index_spec) in self.actions.indexes_to_create.iter() {\n let sql = format!(\n \"CREATE INDEX IF NOT EXISTS {index_name} ON {table_name} {}\",\n to_index_spec_sql(index_spec)\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n Ok(())\n }\n}\n\nasync fn get_db_pool(\n db_ref: Option<&spec::AuthEntryReference>,\n auth_registry: &AuthRegistry,\n) -> Result {\n let lib_context = get_lib_context()?;\n let db_conn_spec = db_ref\n .as_ref()\n .map(|db_ref| auth_registry.get(db_ref))\n .transpose()?;\n let db_pool = match db_conn_spec {\n Some(db_conn_spec) => lib_context.db_pools.get_pool(&db_conn_spec).await?,\n None => lib_context.require_builtin_db_pool()?.clone(),\n };\n Ok(db_pool)\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = ();\n type SetupState = SetupState;\n type SetupStatus = SetupStatus;\n type Key = TableId;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Postgres\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n _declarations: Vec<()>,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(TableId, SetupState)>,\n )> {\n let data_coll_output = data_collections\n .into_iter()\n .map(|d| {\n let table_id = TableId {\n database: d.spec.database.clone(),\n table_name: d.spec.table_name.unwrap_or_else(|| {\n utils::db::sanitize_identifier(&format!(\n \"{}__{}\",\n context.flow_instance_name, d.name\n ))\n }),\n };\n let setup_state = SetupState::new(\n &table_id,\n &d.key_fields_schema,\n &d.value_fields_schema,\n &d.index_options,\n );\n let table_name = table_id.table_name.clone();\n let db_ref = d.spec.database;\n let auth_registry = context.auth_registry.clone();\n let export_context = Box::pin(async move {\n let db_pool = get_db_pool(db_ref.as_ref(), &auth_registry).await?;\n let export_context = Arc::new(ExportContext::new(\n db_ref,\n db_pool.clone(),\n table_name,\n d.key_fields_schema,\n d.value_fields_schema,\n )?);\n Ok(export_context)\n });\n Ok(TypedExportDataCollectionBuildOutput {\n setup_key: table_id,\n desired_setup_state: setup_state,\n export_context,\n })\n })\n .collect::>>()?;\n Ok((data_coll_output, vec![]))\n }\n\n async fn check_setup_status(\n &self,\n _key: TableId,\n desired: Option,\n existing: setup::CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n Ok(SetupStatus::new(desired, existing))\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(check_table_compatibility(\n &desired.columns,\n &existing.columns,\n ))\n }\n\n fn describe_resource(&self, key: &TableId) -> Result {\n Ok(format!(\"Postgres table {}\", key.table_name))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut mut_groups_by_db_ref = HashMap::new();\n for mutation in mutations.iter() {\n mut_groups_by_db_ref\n .entry(mutation.export_context.db_ref.clone())\n .or_insert_with(Vec::new)\n .push(mutation);\n }\n for mut_groups in mut_groups_by_db_ref.values() {\n let db_pool = &mut_groups\n .first()\n .ok_or_else(|| anyhow!(\"empty group\"))?\n .export_context\n .db_pool;\n let mut txn = db_pool.begin().await?;\n for mut_group in mut_groups.iter() {\n mut_group\n .export_context\n .upsert(&mut_group.mutation.upserts, &mut txn)\n .await?;\n }\n for mut_group in mut_groups.iter() {\n mut_group\n .export_context\n .delete(&mut_group.mutation.deletes, &mut txn)\n .await?;\n }\n txn.commit().await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n for change in changes.iter() {\n let db_pool = get_db_pool(change.key.database.as_ref(), &context.auth_registry).await?;\n change\n .setup_status\n .apply_change(&db_pool, &change.key.table_name)\n .await?;\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/setup/states.rs", "/// Concepts:\n/// - Resource: some setup that needs to be tracked and maintained.\n/// - Setup State: current state of a resource.\n/// - Staging Change: states changes that may not be really applied yet.\n/// - Combined Setup State: Setup State + Staging Change.\n/// - Status Check: information about changes that are being applied / need to be applied.\n///\n/// Resource hierarchy:\n/// - [resource: setup metadata table] /// - Flow\n/// - [resource: metadata]\n/// - [resource: tracking table]\n/// - Target\n/// - [resource: target-specific stuff]\nuse crate::prelude::*;\n\nuse indenter::indented;\nuse owo_colors::{AnsiColors, OwoColorize};\nuse std::any::Any;\nuse std::fmt::Debug;\nuse std::fmt::{Display, Write};\nuse std::hash::Hash;\n\nuse super::db_metadata;\nuse crate::execution::db_tracking_setup::{\n self, TrackingTableSetupState, TrackingTableSetupStatus,\n};\n\nconst INDENT: &str = \" \";\n\npub trait StateMode: Clone + Copy {\n type State: Debug + Clone;\n type DefaultState: Debug + Clone + Default;\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct DesiredMode;\nimpl StateMode for DesiredMode {\n type State = T;\n type DefaultState = T;\n}\n\n#[derive(Debug, Clone)]\npub struct CombinedState {\n pub current: Option,\n pub staging: Vec>,\n /// Legacy state keys that no longer identical to the latest serialized form (usually caused by code change).\n /// They will be deleted when the next change is applied.\n pub legacy_state_key: Option,\n}\n\nimpl CombinedState {\n pub fn from_desired(desired: T) -> Self {\n Self {\n current: Some(desired),\n staging: vec![],\n legacy_state_key: None,\n }\n }\n\n pub fn from_change(prev: Option>, change: Option>) -> Self\n where\n T: Clone,\n {\n Self {\n current: match change {\n Some(Some(state)) => Some(state.clone()),\n Some(None) => None,\n None => prev.and_then(|v| v.current),\n },\n staging: vec![],\n legacy_state_key: None,\n }\n }\n\n pub fn possible_versions(&self) -> impl Iterator {\n self.current\n .iter()\n .chain(self.staging.iter().flat_map(|s| s.state().into_iter()))\n }\n\n pub fn always_exists(&self) -> bool {\n self.current.is_some() && self.staging.iter().all(|s| !s.is_delete())\n }\n\n pub fn legacy_values &V>(\n &self,\n desired: Option<&T>,\n f: F,\n ) -> BTreeSet<&V> {\n let desired_value = desired.map(&f);\n self.possible_versions()\n .map(f)\n .filter(|v| Some(*v) != desired_value)\n .collect()\n }\n}\n\nimpl Default for CombinedState {\n fn default() -> Self {\n Self {\n current: None,\n staging: vec![],\n legacy_state_key: None,\n }\n }\n}\n\nimpl PartialEq for CombinedState {\n fn eq(&self, other: &T) -> bool {\n self.staging.is_empty() && self.current.as_ref() == Some(other)\n }\n}\n\n#[derive(Clone, Copy)]\npub struct ExistingMode;\nimpl StateMode for ExistingMode {\n type State = CombinedState;\n type DefaultState = CombinedState;\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub enum StateChange {\n Upsert(State),\n Delete,\n}\n\nimpl StateChange {\n pub fn is_delete(&self) -> bool {\n matches!(self, StateChange::Delete)\n }\n\n pub fn desired_state(&self) -> Option<&State> {\n match self {\n StateChange::Upsert(state) => Some(state),\n StateChange::Delete => None,\n }\n }\n\n pub fn state(&self) -> Option<&State> {\n match self {\n StateChange::Upsert(state) => Some(state),\n StateChange::Delete => None,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct SourceSetupState {\n pub source_id: i32,\n pub key_schema: schema::ValueType,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct ResourceIdentifier {\n pub key: serde_json::Value,\n pub target_kind: String,\n}\n\nimpl Display for ResourceIdentifier {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}:{}\", self.target_kind, self.key)\n }\n}\n\n/// Common state (i.e. not specific to a target kind) for a target.\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TargetSetupStateCommon {\n pub target_id: i32,\n pub schema_version_id: i32,\n pub max_schema_version_id: i32,\n #[serde(default)]\n pub setup_by_user: bool,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TargetSetupState {\n pub common: TargetSetupStateCommon,\n\n pub state: serde_json::Value,\n}\n\nimpl TargetSetupState {\n pub fn state_unless_setup_by_user(self) -> Option {\n (!self.common.setup_by_user).then_some(self.state)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]\npub struct FlowSetupMetadata {\n pub last_source_id: i32,\n pub last_target_id: i32,\n pub sources: BTreeMap,\n}\n\n#[derive(Debug, Clone)]\npub struct FlowSetupState {\n // The version number for the flow, last seen in the metadata table.\n pub seen_flow_metadata_version: Option,\n pub metadata: Mode::DefaultState,\n pub tracking_table: Mode::State,\n pub targets: IndexMap>,\n}\n\nimpl Default for FlowSetupState {\n fn default() -> Self {\n Self {\n seen_flow_metadata_version: None,\n metadata: Default::default(),\n tracking_table: Default::default(),\n targets: IndexMap::new(),\n }\n }\n}\n\nimpl PartialEq for FlowSetupState {\n fn eq(&self, other: &Self) -> bool {\n self.metadata == other.metadata\n && self.tracking_table == other.tracking_table\n && self.targets == other.targets\n }\n}\n\n#[derive(Debug, Clone)]\npub struct AllSetupStates {\n pub has_metadata_table: bool,\n pub flows: BTreeMap>,\n}\n\nimpl Default for AllSetupStates {\n fn default() -> Self {\n Self {\n has_metadata_table: false,\n flows: BTreeMap::new(),\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum SetupChangeType {\n NoChange,\n Create,\n Update,\n Delete,\n Invalid,\n}\n\npub enum ChangeDescription {\n Action(String),\n Note(String),\n}\n\npub trait ResourceSetupStatus: Send + Sync + Debug + Any + 'static {\n fn describe_changes(&self) -> Vec;\n\n fn change_type(&self) -> SetupChangeType;\n}\n\nimpl ResourceSetupStatus for Box {\n fn describe_changes(&self) -> Vec {\n self.as_ref().describe_changes()\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.as_ref().change_type()\n }\n}\n\nimpl ResourceSetupStatus for std::convert::Infallible {\n fn describe_changes(&self) -> Vec {\n unreachable!()\n }\n\n fn change_type(&self) -> SetupChangeType {\n unreachable!()\n }\n}\n\n#[derive(Debug)]\npub struct ResourceSetupInfo {\n pub key: K,\n pub state: Option,\n pub description: String,\n\n /// If `None`, the resource is managed by users.\n pub setup_status: Option,\n\n pub legacy_key: Option,\n}\n\nimpl std::fmt::Display for ResourceSetupInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let status_code = match self.setup_status.as_ref().map(|c| c.change_type()) {\n Some(SetupChangeType::NoChange) => \"READY\",\n Some(SetupChangeType::Create) => \"TO CREATE\",\n Some(SetupChangeType::Update) => \"TO UPDATE\",\n Some(SetupChangeType::Delete) => \"TO DELETE\",\n Some(SetupChangeType::Invalid) => \"INVALID\",\n None => \"USER MANAGED\",\n };\n let status_str = format!(\"[ {status_code:^9} ]\");\n let status_full = status_str.color(AnsiColors::Cyan);\n let desc_colored = &self.description;\n writeln!(f, \"{status_full} {desc_colored}\")?;\n if let Some(setup_status) = &self.setup_status {\n let changes = setup_status.describe_changes();\n if !changes.is_empty() {\n let mut f = indented(f).with_str(INDENT);\n writeln!(f, \"\")?;\n for change in changes {\n match change {\n ChangeDescription::Action(action) => {\n writeln!(\n f,\n \"{} {}\",\n \"TODO:\".color(AnsiColors::BrightBlack).bold(),\n action.color(AnsiColors::BrightBlack)\n )?;\n }\n ChangeDescription::Note(note) => {\n writeln!(\n f,\n \"{} {}\",\n \"NOTE:\".color(AnsiColors::Yellow).bold(),\n note.color(AnsiColors::Yellow)\n )?;\n }\n }\n }\n writeln!(f)?;\n }\n }\n Ok(())\n }\n}\n\nimpl ResourceSetupInfo {\n pub fn is_up_to_date(&self) -> bool {\n self.setup_status\n .as_ref()\n .is_none_or(|c| c.change_type() == SetupChangeType::NoChange)\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum ObjectStatus {\n Invalid,\n New,\n Existing,\n Deleted,\n}\n\npub trait ObjectSetupStatus {\n fn status(&self) -> Option;\n fn is_up_to_date(&self) -> bool;\n}\n\n#[derive(Debug)]\npub struct FlowSetupStatus {\n pub status: Option,\n pub seen_flow_metadata_version: Option,\n\n pub metadata_change: Option>,\n\n pub tracking_table:\n Option>,\n pub target_resources:\n Vec>>,\n\n pub unknown_resources: Vec,\n}\n\nimpl ObjectSetupStatus for FlowSetupStatus {\n fn status(&self) -> Option {\n self.status\n }\n\n fn is_up_to_date(&self) -> bool {\n self.metadata_change.is_none()\n && self\n .tracking_table\n .as_ref()\n .is_none_or(|t| t.is_up_to_date())\n && self\n .target_resources\n .iter()\n .all(|target| target.is_up_to_date())\n }\n}\n\n#[derive(Debug)]\npub struct GlobalSetupStatus {\n pub metadata_table: ResourceSetupInfo<(), (), db_metadata::MetadataTableSetup>,\n}\n\nimpl GlobalSetupStatus {\n pub fn from_setup_states(setup_states: &AllSetupStates) -> Self {\n Self {\n metadata_table: db_metadata::MetadataTableSetup {\n metadata_table_missing: !setup_states.has_metadata_table,\n }\n .into_setup_info(),\n }\n }\n\n pub fn is_up_to_date(&self) -> bool {\n self.metadata_table.is_up_to_date()\n }\n}\n\npub struct ObjectSetupStatusCode<'a, Status: ObjectSetupStatus>(&'a Status);\nimpl std::fmt::Display for ObjectSetupStatusCode<'_, Status> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let Some(status) = self.0.status() else {\n return Ok(());\n };\n write!(\n f,\n \"[ {:^9} ]\",\n match status {\n ObjectStatus::New => \"TO CREATE\",\n ObjectStatus::Existing =>\n if self.0.is_up_to_date() {\n \"READY\"\n } else {\n \"TO UPDATE\"\n },\n ObjectStatus::Deleted => \"TO DELETE\",\n ObjectStatus::Invalid => \"INVALID\",\n }\n )\n }\n}\n\nimpl std::fmt::Display for GlobalSetupStatus {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n writeln!(f, \"{}\", self.metadata_table)\n }\n}\n\npub struct FormattedFlowSetupStatus<'a>(pub &'a str, pub &'a FlowSetupStatus);\n\nimpl std::fmt::Display for FormattedFlowSetupStatus<'_> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let flow_ssc = self.1;\n if flow_ssc.status.is_none() {\n return Ok(());\n }\n\n writeln!(\n f,\n \"{} Flow: {}\",\n ObjectSetupStatusCode(flow_ssc)\n .to_string()\n .color(AnsiColors::Cyan),\n self.0\n )?;\n\n let mut f = indented(f).with_str(INDENT);\n if let Some(tracking_table) = &flow_ssc.tracking_table {\n write!(f, \"{tracking_table}\")?;\n }\n for target_resource in &flow_ssc.target_resources {\n write!(f, \"{target_resource}\")?;\n }\n for resource in &flow_ssc.unknown_resources {\n writeln!(f, \"[ UNKNOWN ] {resource}\")?;\n }\n\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/targets/neo4j.rs", "use crate::prelude::*;\n\nuse super::shared::property_graph::*;\n\nuse crate::setup::components::{self, State, apply_component_changes};\nuse crate::setup::{ResourceSetupStatus, SetupChangeType};\nuse crate::{ops::sdk::*, setup::CombinedState};\n\nuse indoc::formatdoc;\nuse neo4rs::{BoltType, ConfigBuilder, Graph};\nuse std::fmt::Write;\nuse tokio::sync::OnceCell;\n\nconst DEFAULT_DB: &str = \"neo4j\";\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n uri: String,\n user: String,\n password: String,\n db: Option,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n connection: spec::AuthEntryReference,\n mapping: GraphElementMapping,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Declaration {\n connection: spec::AuthEntryReference,\n #[serde(flatten)]\n decl: GraphDeclaration,\n}\n\ntype Neo4jGraphElement = GraphElementType;\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct GraphKey {\n uri: String,\n db: String,\n}\n\nimpl GraphKey {\n fn from_spec(spec: &ConnectionSpec) -> Self {\n Self {\n uri: spec.uri.clone(),\n db: spec.db.clone().unwrap_or_else(|| DEFAULT_DB.to_string()),\n }\n }\n}\n\nimpl retryable::IsRetryable for neo4rs::Error {\n fn is_retryable(&self) -> bool {\n match self {\n neo4rs::Error::ConnectionError => true,\n neo4rs::Error::Neo4j(e) => e.kind() == neo4rs::Neo4jErrorKind::Transient,\n _ => false,\n }\n }\n}\n\n#[derive(Default)]\npub struct GraphPool {\n graphs: Mutex>>>>,\n}\n\nimpl GraphPool {\n async fn get_graph(&self, spec: &ConnectionSpec) -> Result> {\n let graph_key = GraphKey::from_spec(spec);\n let cell = {\n let mut graphs = self.graphs.lock().unwrap();\n graphs.entry(graph_key).or_default().clone()\n };\n let graph = cell\n .get_or_try_init(|| async {\n let mut config_builder = ConfigBuilder::default()\n .uri(spec.uri.clone())\n .user(spec.user.clone())\n .password(spec.password.clone());\n if let Some(db) = &spec.db {\n config_builder = config_builder.db(db.clone());\n }\n anyhow::Ok(Arc::new(Graph::connect(config_builder.build()?).await?))\n })\n .await?;\n Ok(graph.clone())\n }\n\n async fn get_graph_for_key(\n &self,\n key: &Neo4jGraphElement,\n auth_registry: &AuthRegistry,\n ) -> Result> {\n let spec = auth_registry.get::(&key.connection)?;\n self.get_graph(&spec).await\n }\n}\n\npub struct ExportContext {\n connection_ref: AuthEntryReference,\n graph: Arc,\n\n create_order: u8,\n\n delete_cypher: String,\n insert_cypher: String,\n delete_before_upsert: bool,\n\n analyzed_data_coll: AnalyzedDataCollection,\n\n key_field_params: Vec,\n src_key_field_params: Vec,\n tgt_key_field_params: Vec,\n}\n\nfn json_value_to_bolt_value(value: &serde_json::Value) -> Result {\n let bolt_value = match value {\n serde_json::Value::Null => BoltType::Null(neo4rs::BoltNull),\n serde_json::Value::Bool(v) => BoltType::Boolean(neo4rs::BoltBoolean::new(*v)),\n serde_json::Value::Number(v) => {\n if let Some(i) = v.as_i64() {\n BoltType::Integer(neo4rs::BoltInteger::new(i))\n } else if let Some(f) = v.as_f64() {\n BoltType::Float(neo4rs::BoltFloat::new(f))\n } else {\n anyhow::bail!(\"Unsupported JSON number: {}\", v)\n }\n }\n serde_json::Value::String(v) => BoltType::String(neo4rs::BoltString::new(v)),\n serde_json::Value::Array(v) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(json_value_to_bolt_value)\n .collect::>()?,\n }),\n serde_json::Value::Object(v) => BoltType::Map(neo4rs::BoltMap {\n value: v\n .into_iter()\n .map(|(k, v)| Ok((neo4rs::BoltString::new(k), json_value_to_bolt_value(v)?)))\n .collect::>()?,\n }),\n };\n Ok(bolt_value)\n}\n\nfn key_to_bolt(key: &KeyValue, schema: &schema::ValueType) -> Result {\n value_to_bolt(&key.into(), schema)\n}\n\nfn field_values_to_bolt<'a>(\n field_values: impl IntoIterator,\n schema: impl IntoIterator,\n) -> Result {\n let bolt_value = BoltType::Map(neo4rs::BoltMap {\n value: std::iter::zip(schema, field_values)\n .map(|(schema, value)| {\n Ok((\n neo4rs::BoltString::new(&schema.name),\n value_to_bolt(value, &schema.value_type.typ)?,\n ))\n })\n .collect::>()?,\n });\n Ok(bolt_value)\n}\n\nfn mapped_field_values_to_bolt(\n fields_schema: &[schema::FieldSchema],\n fields_input_idx: &[usize],\n field_values: &FieldValues,\n) -> Result {\n let bolt_value = BoltType::Map(neo4rs::BoltMap {\n value: std::iter::zip(fields_schema.iter(), fields_input_idx.iter())\n .map(|(schema, field_idx)| {\n Ok((\n neo4rs::BoltString::new(&schema.name),\n value_to_bolt(&field_values.fields[*field_idx], &schema.value_type.typ)?,\n ))\n })\n .collect::>()?,\n });\n Ok(bolt_value)\n}\n\nfn basic_value_to_bolt(value: &BasicValue, schema: &BasicValueType) -> Result {\n let bolt_value = match value {\n BasicValue::Bytes(v) => {\n BoltType::Bytes(neo4rs::BoltBytes::new(bytes::Bytes::from_owner(v.clone())))\n }\n BasicValue::Str(v) => BoltType::String(neo4rs::BoltString::new(v)),\n BasicValue::Bool(v) => BoltType::Boolean(neo4rs::BoltBoolean::new(*v)),\n BasicValue::Int64(v) => BoltType::Integer(neo4rs::BoltInteger::new(*v)),\n BasicValue::Float64(v) => BoltType::Float(neo4rs::BoltFloat::new(*v)),\n BasicValue::Float32(v) => BoltType::Float(neo4rs::BoltFloat::new(*v as f64)),\n BasicValue::Range(v) => BoltType::List(neo4rs::BoltList {\n value: [\n BoltType::Integer(neo4rs::BoltInteger::new(v.start as i64)),\n BoltType::Integer(neo4rs::BoltInteger::new(v.end as i64)),\n ]\n .into(),\n }),\n BasicValue::Uuid(v) => BoltType::String(neo4rs::BoltString::new(&v.to_string())),\n BasicValue::Date(v) => BoltType::Date(neo4rs::BoltDate::from(*v)),\n BasicValue::Time(v) => BoltType::LocalTime(neo4rs::BoltLocalTime::from(*v)),\n BasicValue::LocalDateTime(v) => {\n BoltType::LocalDateTime(neo4rs::BoltLocalDateTime::from(*v))\n }\n BasicValue::OffsetDateTime(v) => BoltType::DateTime(neo4rs::BoltDateTime::from(*v)),\n BasicValue::TimeDelta(v) => BoltType::Duration(neo4rs::BoltDuration::new(\n neo4rs::BoltInteger { value: 0 },\n neo4rs::BoltInteger { value: 0 },\n neo4rs::BoltInteger {\n value: v.num_seconds(),\n },\n v.subsec_nanos().into(),\n )),\n BasicValue::Vector(v) => match schema {\n BasicValueType::Vector(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|v| basic_value_to_bolt(v, &t.element_type))\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-vector type got vector value: {}\", schema),\n },\n BasicValue::Json(v) => json_value_to_bolt_value(v)?,\n BasicValue::UnionVariant { tag_id, value } => match schema {\n BasicValueType::Union(s) => {\n let typ = s\n .types\n .get(*tag_id)\n .ok_or_else(|| anyhow::anyhow!(\"Invalid `tag_id`: {}\", tag_id))?;\n\n basic_value_to_bolt(value, typ)?\n }\n _ => anyhow::bail!(\"Non-union type got union value: {}\", schema),\n },\n };\n Ok(bolt_value)\n}\n\nfn value_to_bolt(value: &Value, schema: &schema::ValueType) -> Result {\n let bolt_value = match value {\n Value::Null => BoltType::Null(neo4rs::BoltNull),\n Value::Basic(v) => match schema {\n ValueType::Basic(t) => basic_value_to_bolt(v, t)?,\n _ => anyhow::bail!(\"Non-basic type got basic value: {}\", schema),\n },\n Value::Struct(v) => match schema {\n ValueType::Struct(t) => field_values_to_bolt(v.fields.iter(), t.fields.iter())?,\n _ => anyhow::bail!(\"Non-struct type got struct value: {}\", schema),\n },\n Value::UTable(v) | Value::LTable(v) => match schema {\n ValueType::Table(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|v| field_values_to_bolt(v.0.fields.iter(), t.row.fields.iter()))\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-table type got table value: {}\", schema),\n },\n Value::KTable(v) => match schema {\n ValueType::Table(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|(k, v)| {\n field_values_to_bolt(\n std::iter::once(&Into::::into(k.clone()))\n .chain(v.0.fields.iter()),\n t.row.fields.iter(),\n )\n })\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-table type got table value: {}\", schema),\n },\n };\n Ok(bolt_value)\n}\n\nconst CORE_KEY_PARAM_PREFIX: &str = \"key\";\nconst CORE_PROPS_PARAM: &str = \"props\";\nconst SRC_KEY_PARAM_PREFIX: &str = \"source_key\";\nconst SRC_PROPS_PARAM: &str = \"source_props\";\nconst TGT_KEY_PARAM_PREFIX: &str = \"target_key\";\nconst TGT_PROPS_PARAM: &str = \"target_props\";\nconst CORE_ELEMENT_MATCHER_VAR: &str = \"e\";\nconst SELF_CONTAINED_TAG_FIELD_NAME: &str = \"__self_contained\";\n\nimpl ExportContext {\n fn build_key_field_params_n_literal<'a>(\n param_prefix: &str,\n key_fields: impl Iterator,\n ) -> (Vec, String) {\n let (params, items): (Vec, Vec) = key_fields\n .into_iter()\n .enumerate()\n .map(|(i, name)| {\n let param = format!(\"{param_prefix}_{i}\");\n let item = format!(\"{name}: ${param}\");\n (param, item)\n })\n .unzip();\n (params, format!(\"{{{}}}\", items.into_iter().join(\", \")))\n }\n\n fn new(\n graph: Arc,\n spec: Spec,\n analyzed_data_coll: AnalyzedDataCollection,\n ) -> Result {\n let (key_field_params, key_fields_literal) = Self::build_key_field_params_n_literal(\n CORE_KEY_PARAM_PREFIX,\n analyzed_data_coll.schema.key_fields.iter().map(|f| &f.name),\n );\n let result = match spec.mapping {\n GraphElementMapping::Node(node_spec) => {\n let delete_cypher = formatdoc! {\"\n OPTIONAL MATCH (old_node:{label} {key_fields_literal})\n WITH old_node\n SET old_node.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\n WITH old_node\n WHERE NOT (old_node)--()\n DELETE old_node\n FINISH\n \",\n label = node_spec.label,\n };\n\n let insert_cypher = formatdoc! {\"\n MERGE (new_node:{label} {key_fields_literal})\n SET new_node.{SELF_CONTAINED_TAG_FIELD_NAME} = TRUE{optional_set_props}\n FINISH\n \",\n label = node_spec.label,\n optional_set_props = if !analyzed_data_coll.value_fields_input_idx.is_empty() {\n format!(\", new_node += ${CORE_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n };\n\n Self {\n connection_ref: spec.connection,\n graph,\n create_order: 0,\n delete_cypher,\n insert_cypher,\n delete_before_upsert: false,\n analyzed_data_coll,\n key_field_params,\n src_key_field_params: vec![],\n tgt_key_field_params: vec![],\n }\n }\n GraphElementMapping::Relationship(rel_spec) => {\n let delete_cypher = formatdoc! {\"\n OPTIONAL MATCH (old_src)-[old_rel:{rel_type} {key_fields_literal}]->(old_tgt)\n\n DELETE old_rel\n\n WITH collect(old_src) + collect(old_tgt) AS nodes_to_check\n UNWIND nodes_to_check AS node\n WITH DISTINCT node\n WHERE NOT COALESCE(node.{SELF_CONTAINED_TAG_FIELD_NAME}, FALSE)\n AND COUNT{{ (node)--() }} = 0\n DELETE node\n\n FINISH\n \",\n rel_type = rel_spec.rel_type,\n };\n\n let analyzed_rel = analyzed_data_coll\n .rel\n .as_ref()\n .ok_or_else(invariance_violation)?;\n let analyzed_src = &analyzed_rel.source;\n let analyzed_tgt = &analyzed_rel.target;\n\n let (src_key_field_params, src_key_fields_literal) =\n Self::build_key_field_params_n_literal(\n SRC_KEY_PARAM_PREFIX,\n analyzed_src.schema.key_fields.iter().map(|f| &f.name),\n );\n let (tgt_key_field_params, tgt_key_fields_literal) =\n Self::build_key_field_params_n_literal(\n TGT_KEY_PARAM_PREFIX,\n analyzed_tgt.schema.key_fields.iter().map(|f| &f.name),\n );\n\n let insert_cypher = formatdoc! {\"\n MERGE (new_src:{src_node_label} {src_key_fields_literal})\n {optional_set_src_props}\n\n MERGE (new_tgt:{tgt_node_label} {tgt_key_fields_literal})\n {optional_set_tgt_props}\n\n MERGE (new_src)-[new_rel:{rel_type} {key_fields_literal}]->(new_tgt)\n {optional_set_rel_props}\n\n FINISH\n \",\n src_node_label = rel_spec.source.label,\n optional_set_src_props = if analyzed_src.has_value_fields() {\n format!(\"SET new_src += ${SRC_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n tgt_node_label = rel_spec.target.label,\n optional_set_tgt_props = if analyzed_tgt.has_value_fields() {\n format!(\"SET new_tgt += ${TGT_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n rel_type = rel_spec.rel_type,\n optional_set_rel_props = if !analyzed_data_coll.value_fields_input_idx.is_empty() {\n format!(\"SET new_rel += ${CORE_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n };\n Self {\n connection_ref: spec.connection,\n graph,\n create_order: 1,\n delete_cypher,\n insert_cypher,\n delete_before_upsert: true,\n analyzed_data_coll,\n key_field_params,\n src_key_field_params,\n tgt_key_field_params,\n }\n }\n };\n Ok(result)\n }\n\n fn bind_key_field_params<'a>(\n query: neo4rs::Query,\n params: &[String],\n type_val: impl Iterator,\n ) -> Result {\n let mut query = query;\n for (i, (typ, val)) in type_val.enumerate() {\n query = query.param(¶ms[i], value_to_bolt(val, typ)?);\n }\n Ok(query)\n }\n\n fn bind_rel_key_field_params(\n &self,\n query: neo4rs::Query,\n val: &KeyValue,\n ) -> Result {\n let mut query = query;\n for (i, val) in val\n .fields_iter(self.analyzed_data_coll.schema.key_fields.len())?\n .enumerate()\n {\n query = query.param(\n &self.key_field_params[i],\n key_to_bolt(\n val,\n &self.analyzed_data_coll.schema.key_fields[i].value_type.typ,\n )?,\n );\n }\n Ok(query)\n }\n\n fn add_upsert_queries(\n &self,\n upsert: &ExportTargetUpsertEntry,\n queries: &mut Vec,\n ) -> Result<()> {\n if self.delete_before_upsert {\n queries.push(\n self.bind_rel_key_field_params(neo4rs::query(&self.delete_cypher), &upsert.key)?,\n );\n }\n\n let value = &upsert.value;\n let mut query =\n self.bind_rel_key_field_params(neo4rs::query(&self.insert_cypher), &upsert.key)?;\n\n if let Some(analyzed_rel) = &self.analyzed_data_coll.rel {\n let bind_params = |query: neo4rs::Query,\n analyzed: &AnalyzedGraphElementFieldMapping,\n key_field_params: &[String]|\n -> Result {\n let mut query = Self::bind_key_field_params(\n query,\n key_field_params,\n std::iter::zip(\n analyzed.schema.key_fields.iter(),\n analyzed.fields_input_idx.key.iter(),\n )\n .map(|(f, field_idx)| (&f.value_type.typ, &value.fields[*field_idx])),\n )?;\n if analyzed.has_value_fields() {\n query = query.param(\n SRC_PROPS_PARAM,\n mapped_field_values_to_bolt(\n &analyzed.schema.value_fields,\n &analyzed.fields_input_idx.value,\n value,\n )?,\n );\n }\n Ok(query)\n };\n query = bind_params(query, &analyzed_rel.source, &self.src_key_field_params)?;\n query = bind_params(query, &analyzed_rel.target, &self.tgt_key_field_params)?;\n }\n\n if !self.analyzed_data_coll.value_fields_input_idx.is_empty() {\n query = query.param(\n CORE_PROPS_PARAM,\n mapped_field_values_to_bolt(\n &self.analyzed_data_coll.schema.value_fields,\n &self.analyzed_data_coll.value_fields_input_idx,\n value,\n )?,\n );\n }\n queries.push(query);\n Ok(())\n }\n\n fn add_delete_queries(\n &self,\n delete_key: &value::KeyValue,\n queries: &mut Vec,\n ) -> Result<()> {\n queries\n .push(self.bind_rel_key_field_params(neo4rs::query(&self.delete_cypher), delete_key)?);\n Ok(())\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct SetupState {\n key_field_names: Vec,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n dependent_node_labels: Vec,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n sub_components: Vec,\n}\n\nimpl SetupState {\n fn new(\n schema: &GraphElementSchema,\n index_options: &IndexOptions,\n dependent_node_labels: Vec,\n ) -> Result {\n let key_field_names: Vec =\n schema.key_fields.iter().map(|f| f.name.clone()).collect();\n let mut sub_components = vec![];\n sub_components.push(ComponentState {\n object_label: schema.elem_type.clone(),\n index_def: IndexDef::KeyConstraint {\n field_names: key_field_names.clone(),\n },\n });\n let value_field_types = schema\n .value_fields\n .iter()\n .map(|f| (f.name.as_str(), &f.value_type.typ))\n .collect::>();\n for index_def in index_options.vector_indexes.iter() {\n sub_components.push(ComponentState {\n object_label: schema.elem_type.clone(),\n index_def: IndexDef::from_vector_index_def(\n index_def,\n value_field_types\n .get(index_def.field_name.as_str())\n .ok_or_else(|| {\n api_error!(\n \"Unknown field name for vector index: {}\",\n index_def.field_name\n )\n })?,\n )?,\n });\n }\n Ok(Self {\n key_field_names,\n dependent_node_labels,\n sub_components,\n })\n }\n\n fn check_compatible(&self, existing: &Self) -> SetupStateCompatibility {\n if self.key_field_names == existing.key_field_names {\n SetupStateCompatibility::Compatible\n } else {\n SetupStateCompatibility::NotCompatible\n }\n }\n}\n\nimpl IntoIterator for SetupState {\n type Item = ComponentState;\n type IntoIter = std::vec::IntoIter;\n\n fn into_iter(self) -> Self::IntoIter {\n self.sub_components.into_iter()\n }\n}\n#[derive(Debug, Default)]\nstruct DataClearAction {\n dependent_node_labels: Vec,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\nenum ComponentKind {\n KeyConstraint,\n VectorIndex,\n}\n\nimpl ComponentKind {\n fn describe(&self) -> &str {\n match self {\n ComponentKind::KeyConstraint => \"KEY CONSTRAINT\",\n ComponentKind::VectorIndex => \"VECTOR INDEX\",\n }\n }\n}\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct ComponentKey {\n kind: ComponentKind,\n name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\nenum IndexDef {\n KeyConstraint {\n field_names: Vec,\n },\n VectorIndex {\n field_name: String,\n metric: spec::VectorSimilarityMetric,\n vector_size: usize,\n },\n}\n\nimpl IndexDef {\n fn from_vector_index_def(\n index_def: &spec::VectorIndexDef,\n field_typ: &schema::ValueType,\n ) -> Result {\n Ok(Self::VectorIndex {\n field_name: index_def.field_name.clone(),\n vector_size: (match field_typ {\n schema::ValueType::Basic(schema::BasicValueType::Vector(schema)) => {\n schema.dimension\n }\n _ => None,\n })\n .ok_or_else(|| {\n api_error!(\"Vector index field must be a vector with fixed dimension\")\n })?,\n metric: index_def.metric,\n })\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\npub struct ComponentState {\n object_label: ElementType,\n index_def: IndexDef,\n}\n\nimpl components::State for ComponentState {\n fn key(&self) -> ComponentKey {\n let prefix = match &self.object_label {\n ElementType::Relationship(_) => \"r\",\n ElementType::Node(_) => \"n\",\n };\n let label = self.object_label.label();\n match &self.index_def {\n IndexDef::KeyConstraint { .. } => ComponentKey {\n kind: ComponentKind::KeyConstraint,\n name: format!(\"{prefix}__{label}__key\"),\n },\n IndexDef::VectorIndex {\n field_name, metric, ..\n } => ComponentKey {\n kind: ComponentKind::VectorIndex,\n name: format!(\"{prefix}__{label}__{field_name}__{metric}__vidx\"),\n },\n }\n }\n}\n\npub struct SetupComponentOperator {\n graph_pool: Arc,\n conn_spec: ConnectionSpec,\n}\n\n#[async_trait]\nimpl components::SetupOperator for SetupComponentOperator {\n type Key = ComponentKey;\n type State = ComponentState;\n type SetupState = SetupState;\n type Context = ();\n\n fn describe_key(&self, key: &Self::Key) -> String {\n format!(\"{} {}\", key.kind.describe(), key.name)\n }\n\n fn describe_state(&self, state: &Self::State) -> String {\n let key_desc = self.describe_key(&state.key());\n let label = state.object_label.label();\n match &state.index_def {\n IndexDef::KeyConstraint { field_names } => {\n format!(\"{key_desc} ON {label} (key: {})\", field_names.join(\", \"))\n }\n IndexDef::VectorIndex {\n field_name,\n metric,\n vector_size,\n } => {\n format!(\n \"{key_desc} ON {label} (field_name: {field_name}, vector_size: {vector_size}, metric: {metric})\",\n )\n }\n }\n }\n\n fn is_up_to_date(&self, current: &ComponentState, desired: &ComponentState) -> bool {\n current == desired\n }\n\n async fn create(&self, state: &ComponentState, _context: &Self::Context) -> Result<()> {\n let graph = self.graph_pool.get_graph(&self.conn_spec).await?;\n let key = state.key();\n let qualifier = CORE_ELEMENT_MATCHER_VAR;\n let matcher = state.object_label.matcher(qualifier);\n let query = neo4rs::query(&match &state.index_def {\n IndexDef::KeyConstraint { field_names } => {\n let key_type = match &state.object_label {\n ElementType::Node(_) => \"NODE\",\n ElementType::Relationship(_) => \"RELATIONSHIP\",\n };\n format!(\n \"CREATE CONSTRAINT {name} IF NOT EXISTS FOR {matcher} REQUIRE {field_names} IS {key_type} KEY\",\n name = key.name,\n field_names = build_composite_field_names(qualifier, field_names),\n )\n }\n IndexDef::VectorIndex {\n field_name,\n metric,\n vector_size,\n } => {\n formatdoc! {\"\n CREATE VECTOR INDEX {name} IF NOT EXISTS\n FOR {matcher} ON {qualifier}.{field_name}\n OPTIONS {{\n indexConfig: {{\n `vector.dimensions`: {vector_size},\n `vector.similarity_function`: '{metric}'\n }}\n }}\",\n name = key.name,\n }\n }\n });\n Ok(graph.run(query).await?)\n }\n\n async fn delete(&self, key: &ComponentKey, _context: &Self::Context) -> Result<()> {\n let graph = self.graph_pool.get_graph(&self.conn_spec).await?;\n let query = neo4rs::query(&format!(\n \"DROP {kind} {name} IF EXISTS\",\n kind = match key.kind {\n ComponentKind::KeyConstraint => \"CONSTRAINT\",\n ComponentKind::VectorIndex => \"INDEX\",\n },\n name = key.name,\n ));\n Ok(graph.run(query).await?)\n }\n}\n\nfn build_composite_field_names(qualifier: &str, field_names: &[String]) -> String {\n let strs = field_names\n .iter()\n .map(|name| format!(\"{qualifier}.{name}\"))\n .join(\", \");\n if field_names.len() == 1 {\n strs\n } else {\n format!(\"({strs})\")\n }\n}\n#[derive(Debug)]\npub struct GraphElementDataSetupStatus {\n data_clear: Option,\n change_type: SetupChangeType,\n}\n\nimpl GraphElementDataSetupStatus {\n fn new(desired_state: Option<&SetupState>, existing: &CombinedState) -> Self {\n let mut data_clear: Option = None;\n for v in existing.possible_versions() {\n if desired_state.as_ref().is_none_or(|desired| {\n desired.check_compatible(v) == SetupStateCompatibility::NotCompatible\n }) {\n data_clear\n .get_or_insert_default()\n .dependent_node_labels\n .extend(v.dependent_node_labels.iter().cloned());\n }\n }\n\n let change_type = match (desired_state, existing.possible_versions().next()) {\n (Some(_), Some(_)) => {\n if data_clear.is_none() {\n SetupChangeType::NoChange\n } else {\n SetupChangeType::Update\n }\n }\n (Some(_), None) => SetupChangeType::Create,\n (None, Some(_)) => SetupChangeType::Delete,\n (None, None) => SetupChangeType::NoChange,\n };\n\n Self {\n data_clear,\n change_type,\n }\n }\n}\n\nimpl ResourceSetupStatus for GraphElementDataSetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n if let Some(data_clear) = &self.data_clear {\n let mut desc = \"Clear data\".to_string();\n if !data_clear.dependent_node_labels.is_empty() {\n write!(\n &mut desc,\n \"; dependents {}\",\n data_clear\n .dependent_node_labels\n .iter()\n .map(|l| format!(\"{}\", ElementType::Node(l.clone())))\n .join(\", \")\n )\n .unwrap();\n }\n result.push(setup::ChangeDescription::Action(desc));\n }\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.change_type\n }\n}\n\nasync fn clear_graph_element_data(\n graph: &Graph,\n key: &Neo4jGraphElement,\n is_self_contained: bool,\n) -> Result<()> {\n let var_name = CORE_ELEMENT_MATCHER_VAR;\n let matcher = key.typ.matcher(var_name);\n let query_string = match key.typ {\n ElementType::Node(_) => {\n let optional_reset_self_contained = if is_self_contained {\n formatdoc! {\"\n WITH {var_name}\n SET {var_name}.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\n \"}\n } else {\n \"\".to_string()\n };\n formatdoc! {\"\n CALL {{\n MATCH {matcher}\n {optional_reset_self_contained}\n WITH {var_name} WHERE NOT ({var_name})--() DELETE {var_name}\n }} IN TRANSACTIONS\n \"}\n }\n ElementType::Relationship(_) => {\n formatdoc! {\"\n CALL {{\n MATCH {matcher} WITH {var_name} DELETE {var_name}\n }} IN TRANSACTIONS\n \"}\n }\n };\n let delete_query = neo4rs::query(&query_string);\n graph.run(delete_query).await?;\n Ok(())\n}\n\n/// Factory for Neo4j relationships\npub struct Factory {\n graph_pool: Arc,\n}\n\nimpl Factory {\n pub fn new() -> Self {\n Self {\n graph_pool: Arc::default(),\n }\n }\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = Declaration;\n type SetupState = SetupState;\n type SetupStatus = (\n GraphElementDataSetupStatus,\n components::SetupStatus,\n );\n type Key = Neo4jGraphElement;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Neo4j\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(Neo4jGraphElement, SetupState)>,\n )> {\n let (analyzed_data_colls, declared_graph_elements) = analyze_graph_mappings(\n data_collections\n .iter()\n .map(|d| DataCollectionGraphMappingInput {\n auth_ref: &d.spec.connection,\n mapping: &d.spec.mapping,\n index_options: &d.index_options,\n key_fields_schema: d.key_fields_schema.clone(),\n value_fields_schema: d.value_fields_schema.clone(),\n }),\n declarations.iter().map(|d| (&d.connection, &d.decl)),\n )?;\n let data_coll_output = std::iter::zip(data_collections, analyzed_data_colls)\n .map(|(data_coll, analyzed)| {\n let setup_key = Neo4jGraphElement {\n connection: data_coll.spec.connection.clone(),\n typ: analyzed.schema.elem_type.clone(),\n };\n let desired_setup_state = SetupState::new(\n &analyzed.schema,\n &data_coll.index_options,\n analyzed\n .dependent_node_labels()\n .into_iter()\n .map(|s| s.to_string())\n .collect(),\n )?;\n\n let conn_spec = context\n .auth_registry\n .get::(&data_coll.spec.connection)?;\n let factory = self.clone();\n let export_context = async move {\n Ok(Arc::new(ExportContext::new(\n factory.graph_pool.get_graph(&conn_spec).await?,\n data_coll.spec,\n analyzed,\n )?))\n }\n .boxed();\n\n Ok(TypedExportDataCollectionBuildOutput {\n export_context,\n setup_key,\n desired_setup_state,\n })\n })\n .collect::>>()?;\n let decl_output = std::iter::zip(declarations, declared_graph_elements)\n .map(|(decl, graph_elem_schema)| {\n let setup_state =\n SetupState::new(&graph_elem_schema, &decl.decl.index_options, vec![])?;\n let setup_key = GraphElementType {\n connection: decl.connection,\n typ: graph_elem_schema.elem_type.clone(),\n };\n Ok((setup_key, setup_state))\n })\n .collect::>>()?;\n Ok((data_coll_output, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n key: Neo4jGraphElement,\n desired: Option,\n existing: CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result {\n let conn_spec = flow_instance_ctx\n .auth_registry\n .get::(&key.connection)?;\n let data_status = GraphElementDataSetupStatus::new(desired.as_ref(), &existing);\n let components = components::SetupStatus::create(\n SetupComponentOperator {\n graph_pool: self.graph_pool.clone(),\n conn_spec,\n },\n desired,\n existing,\n )?;\n Ok((data_status, components))\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(desired.check_compatible(existing))\n }\n\n fn describe_resource(&self, key: &Neo4jGraphElement) -> Result {\n Ok(format!(\"Neo4j {}\", key.typ))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut muts_by_graph = HashMap::new();\n for mut_with_ctx in mutations.iter() {\n muts_by_graph\n .entry(&mut_with_ctx.export_context.connection_ref)\n .or_insert_with(Vec::new)\n .push(mut_with_ctx);\n }\n let retry_options = retryable::RetryOptions::default();\n for muts in muts_by_graph.values_mut() {\n muts.sort_by_key(|m| m.export_context.create_order);\n let graph = &muts[0].export_context.graph;\n retryable::run(\n async || {\n let mut queries = vec![];\n for mut_with_ctx in muts.iter() {\n let export_ctx = &mut_with_ctx.export_context;\n for upsert in mut_with_ctx.mutation.upserts.iter() {\n export_ctx.add_upsert_queries(upsert, &mut queries)?;\n }\n }\n for mut_with_ctx in muts.iter().rev() {\n let export_ctx = &mut_with_ctx.export_context;\n for deletion in mut_with_ctx.mutation.deletes.iter() {\n export_ctx.add_delete_queries(&deletion.key, &mut queries)?;\n }\n }\n let mut txn = graph.start_txn().await?;\n txn.run_queries(queries).await?;\n txn.commit().await?;\n retryable::Ok(())\n },\n &retry_options,\n )\n .await\n .map_err(Into::::into)?\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n // Relationships first, then nodes, as relationships need to be deleted before nodes they referenced.\n let mut relationship_types = IndexSet::<&Neo4jGraphElement>::new();\n let mut node_labels = IndexSet::<&Neo4jGraphElement>::new();\n let mut dependent_node_labels = IndexSet::::new();\n\n let mut components = vec![];\n for change in changes.iter() {\n if let Some(data_clear) = &change.setup_status.0.data_clear {\n match &change.key.typ {\n ElementType::Relationship(_) => {\n relationship_types.insert(&change.key);\n for label in &data_clear.dependent_node_labels {\n dependent_node_labels.insert(Neo4jGraphElement {\n connection: change.key.connection.clone(),\n typ: ElementType::Node(label.clone()),\n });\n }\n }\n ElementType::Node(_) => {\n node_labels.insert(&change.key);\n }\n }\n }\n components.push(&change.setup_status.1);\n }\n\n // Relationships have no dependency, so can be cleared first.\n for rel_type in relationship_types.into_iter() {\n let graph = self\n .graph_pool\n .get_graph_for_key(rel_type, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, rel_type, true).await?;\n }\n // Clear standalone nodes, which is simpler than dependent nodes.\n for node_label in node_labels.iter() {\n let graph = self\n .graph_pool\n .get_graph_for_key(node_label, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, node_label, true).await?;\n }\n // Clear dependent nodes if they're not covered by standalone nodes.\n for node_label in dependent_node_labels.iter() {\n if !node_labels.contains(node_label) {\n let graph = self\n .graph_pool\n .get_graph_for_key(node_label, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, node_label, false).await?;\n }\n }\n\n apply_component_changes(components, &()).await?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/execution/row_indexer.rs", "use crate::prelude::*;\n\nuse futures::future::try_join_all;\nuse sqlx::PgPool;\nuse std::collections::{HashMap, HashSet};\n\nuse super::db_tracking::{self, TrackedTargetKeyInfo, read_source_tracking_info_for_processing};\nuse super::db_tracking_setup;\nuse super::evaluator::{\n EvaluateSourceEntryOutput, SourceRowEvaluationContext, evaluate_source_entry,\n};\nuse super::memoization::{EvaluationMemory, EvaluationMemoryOptions, StoredMemoizationInfo};\nuse super::stats;\n\nuse crate::base::value::{self, FieldValues, KeyValue};\nuse crate::builder::plan::*;\nuse crate::ops::interface::{\n ExportTargetMutation, ExportTargetUpsertEntry, Ordinal, SourceExecutorGetOptions,\n};\nuse crate::utils::db::WriteAction;\nuse crate::utils::fingerprint::{Fingerprint, Fingerprinter};\n\npub fn extract_primary_key(\n primary_key_def: &AnalyzedPrimaryKeyDef,\n record: &FieldValues,\n) -> Result {\n match primary_key_def {\n AnalyzedPrimaryKeyDef::Fields(fields) => {\n KeyValue::from_values(fields.iter().map(|field| &record.fields[*field]))\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]\npub enum SourceVersionKind {\n #[default]\n UnknownLogic,\n DifferentLogic,\n CurrentLogic,\n NonExistence,\n}\n\n#[derive(Debug, Clone, Default)]\npub struct SourceVersion {\n pub ordinal: Ordinal,\n pub kind: SourceVersionKind,\n}\n\nimpl SourceVersion {\n pub fn from_stored(\n stored_ordinal: Option,\n stored_fp: &Option>,\n curr_fp: Fingerprint,\n ) -> Self {\n Self {\n ordinal: Ordinal(stored_ordinal),\n kind: match &stored_fp {\n Some(stored_fp) => {\n if stored_fp.as_slice() == curr_fp.0.as_slice() {\n SourceVersionKind::CurrentLogic\n } else {\n SourceVersionKind::DifferentLogic\n }\n }\n None => SourceVersionKind::UnknownLogic,\n },\n }\n }\n\n pub fn from_stored_processing_info(\n info: &db_tracking::SourceTrackingInfoForProcessing,\n curr_fp: Fingerprint,\n ) -> Self {\n Self::from_stored(\n info.processed_source_ordinal,\n &info.process_logic_fingerprint,\n curr_fp,\n )\n }\n\n pub fn from_stored_precommit_info(\n info: &db_tracking::SourceTrackingInfoForPrecommit,\n curr_fp: Fingerprint,\n ) -> Self {\n Self::from_stored(\n info.processed_source_ordinal,\n &info.process_logic_fingerprint,\n curr_fp,\n )\n }\n\n pub fn from_current_with_ordinal(ordinal: Ordinal) -> Self {\n Self {\n ordinal,\n kind: SourceVersionKind::CurrentLogic,\n }\n }\n\n pub fn from_current_data(data: &interface::SourceData) -> Self {\n let kind = match &data.value {\n interface::SourceValue::Existence(_) => SourceVersionKind::CurrentLogic,\n interface::SourceValue::NonExistence => SourceVersionKind::NonExistence,\n };\n Self {\n ordinal: data.ordinal,\n kind,\n }\n }\n\n pub fn should_skip(\n &self,\n target: &SourceVersion,\n update_stats: Option<&stats::UpdateStats>,\n ) -> bool {\n // Ordinal indicates monotonic invariance - always respect ordinal order\n // Never process older ordinals to maintain consistency\n let should_skip = match (self.ordinal.0, target.ordinal.0) {\n (Some(existing_ordinal), Some(target_ordinal)) => {\n // Skip if target ordinal is older, or same ordinal with same/older logic version\n existing_ordinal > target_ordinal\n || (existing_ordinal == target_ordinal && self.kind >= target.kind)\n }\n _ => false,\n };\n if should_skip {\n if let Some(update_stats) = update_stats {\n update_stats.num_no_change.inc(1);\n }\n }\n should_skip\n }\n}\n\npub enum SkippedOr {\n Normal(T),\n Skipped(SourceVersion),\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\nstruct TargetKeyPair {\n pub key: serde_json::Value,\n pub additional_key: serde_json::Value,\n}\n\n#[derive(Default)]\nstruct TrackingInfoForTarget<'a> {\n export_op: Option<&'a AnalyzedExportOp>,\n\n // Existing keys info. Keyed by target key.\n // Will be removed after new rows for the same key are added into `new_staging_keys_info` and `mutation.upserts`,\n // hence all remaining ones are to be deleted.\n existing_staging_keys_info: HashMap)>>,\n existing_keys_info: HashMap)>>,\n\n // New keys info for staging.\n new_staging_keys_info: Vec,\n\n // Mutation to apply to the target storage.\n mutation: ExportTargetMutation,\n}\n\n#[derive(Debug)]\nstruct PrecommitData<'a> {\n evaluate_output: &'a EvaluateSourceEntryOutput,\n memoization_info: &'a StoredMemoizationInfo,\n}\nstruct PrecommitMetadata {\n source_entry_exists: bool,\n process_ordinal: i64,\n existing_process_ordinal: Option,\n new_target_keys: db_tracking::TrackedTargetKeyForSource,\n}\nstruct PrecommitOutput {\n metadata: PrecommitMetadata,\n target_mutations: HashMap,\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn precommit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n logic_fp: Fingerprint,\n data: Option>,\n process_timestamp: &chrono::DateTime,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n export_ops: &[AnalyzedExportOp],\n export_ops_exec_ctx: &[exec_ctx::ExportOpExecutionContext],\n update_stats: &stats::UpdateStats,\n pool: &PgPool,\n) -> Result> {\n let mut txn = pool.begin().await?;\n\n let tracking_info = db_tracking::read_source_tracking_info_for_precommit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n if let Some(tracking_info) = &tracking_info {\n let existing_source_version =\n SourceVersion::from_stored_precommit_info(tracking_info, logic_fp);\n if existing_source_version.should_skip(source_version, Some(update_stats)) {\n return Ok(SkippedOr::Skipped(existing_source_version));\n }\n }\n let tracking_info_exists = tracking_info.is_some();\n let process_ordinal = (tracking_info\n .as_ref()\n .map(|info| info.max_process_ordinal)\n .unwrap_or(0)\n + 1)\n .max(process_timestamp.timestamp_millis());\n let existing_process_ordinal = tracking_info.as_ref().and_then(|info| info.process_ordinal);\n\n let mut tracking_info_for_targets = HashMap::::new();\n for (export_op, export_op_exec_ctx) in\n std::iter::zip(export_ops.iter(), export_ops_exec_ctx.iter())\n {\n tracking_info_for_targets\n .entry(export_op_exec_ctx.target_id)\n .or_default()\n .export_op = Some(export_op);\n }\n\n // Collect `tracking_info_for_targets` from existing tracking info.\n if let Some(info) = tracking_info {\n let sqlx::types::Json(staging_target_keys) = info.staging_target_keys;\n for (target_id, keys_info) in staging_target_keys.into_iter() {\n let target_info = tracking_info_for_targets.entry(target_id).or_default();\n for key_info in keys_info.into_iter() {\n target_info\n .existing_staging_keys_info\n .entry(TargetKeyPair {\n key: key_info.key,\n additional_key: key_info.additional_key,\n })\n .or_default()\n .push((key_info.process_ordinal, key_info.fingerprint));\n }\n }\n\n if let Some(sqlx::types::Json(target_keys)) = info.target_keys {\n for (target_id, keys_info) in target_keys.into_iter() {\n let target_info = tracking_info_for_targets.entry(target_id).or_default();\n for key_info in keys_info.into_iter() {\n target_info\n .existing_keys_info\n .entry(TargetKeyPair {\n key: key_info.key,\n additional_key: key_info.additional_key,\n })\n .or_default()\n .push((key_info.process_ordinal, key_info.fingerprint));\n }\n }\n }\n }\n\n let mut new_target_keys_info = db_tracking::TrackedTargetKeyForSource::default();\n if let Some(data) = &data {\n for (export_op, export_op_exec_ctx) in\n std::iter::zip(export_ops.iter(), export_ops_exec_ctx.iter())\n {\n let target_info = tracking_info_for_targets\n .entry(export_op_exec_ctx.target_id)\n .or_default();\n let mut keys_info = Vec::new();\n let collected_values =\n &data.evaluate_output.collected_values[export_op.input.collector_idx as usize];\n for value in collected_values.iter() {\n let primary_key = extract_primary_key(&export_op.primary_key_def, value)?;\n let primary_key_json = serde_json::to_value(&primary_key)?;\n\n let mut field_values = FieldValues {\n fields: Vec::with_capacity(export_op.value_fields.len()),\n };\n for field in export_op.value_fields.iter() {\n field_values\n .fields\n .push(value.fields[*field as usize].clone());\n }\n let additional_key = export_op.export_target_factory.extract_additional_key(\n &primary_key,\n &field_values,\n export_op.export_context.as_ref(),\n )?;\n let target_key_pair = TargetKeyPair {\n key: primary_key_json,\n additional_key,\n };\n let existing_target_keys = target_info.existing_keys_info.remove(&target_key_pair);\n let existing_staging_target_keys = target_info\n .existing_staging_keys_info\n .remove(&target_key_pair);\n\n let curr_fp = if !export_op.value_stable {\n Some(\n Fingerprinter::default()\n .with(&field_values)?\n .into_fingerprint(),\n )\n } else {\n None\n };\n if existing_target_keys\n .as_ref()\n .map(|keys| !keys.is_empty() && keys.iter().all(|(_, fp)| fp == &curr_fp))\n .unwrap_or(false)\n && existing_staging_target_keys\n .map(|keys| keys.iter().all(|(_, fp)| fp == &curr_fp))\n .unwrap_or(true)\n {\n // Already exists, with exactly the same value fingerprint.\n // Nothing need to be changed, except carrying over the existing target keys info.\n let (existing_ordinal, existing_fp) = existing_target_keys\n .ok_or_else(invariance_violation)?\n .into_iter()\n .next()\n .ok_or_else(invariance_violation)?;\n keys_info.push(TrackedTargetKeyInfo {\n key: target_key_pair.key,\n additional_key: target_key_pair.additional_key,\n process_ordinal: existing_ordinal,\n fingerprint: existing_fp,\n });\n } else {\n // Entry with new value. Needs to be upserted.\n let tracked_target_key = TrackedTargetKeyInfo {\n key: target_key_pair.key.clone(),\n additional_key: target_key_pair.additional_key.clone(),\n process_ordinal,\n fingerprint: curr_fp,\n };\n target_info.mutation.upserts.push(ExportTargetUpsertEntry {\n key: primary_key,\n additional_key: target_key_pair.additional_key,\n value: field_values,\n });\n target_info\n .new_staging_keys_info\n .push(tracked_target_key.clone());\n keys_info.push(tracked_target_key);\n }\n }\n new_target_keys_info.push((export_op_exec_ctx.target_id, keys_info));\n }\n }\n\n let mut new_staging_target_keys = db_tracking::TrackedTargetKeyForSource::default();\n let mut target_mutations = HashMap::with_capacity(export_ops.len());\n for (target_id, target_tracking_info) in tracking_info_for_targets.into_iter() {\n let legacy_keys: HashSet = target_tracking_info\n .existing_keys_info\n .into_keys()\n .chain(target_tracking_info.existing_staging_keys_info.into_keys())\n .collect();\n\n let mut new_staging_keys_info = target_tracking_info.new_staging_keys_info;\n // Add tracking info for deletions.\n new_staging_keys_info.extend(legacy_keys.iter().map(|key| TrackedTargetKeyInfo {\n key: key.key.clone(),\n additional_key: key.additional_key.clone(),\n process_ordinal,\n fingerprint: None,\n }));\n new_staging_target_keys.push((target_id, new_staging_keys_info));\n\n if let Some(export_op) = target_tracking_info.export_op {\n let mut mutation = target_tracking_info.mutation;\n mutation.deletes.reserve(legacy_keys.len());\n for legacy_key in legacy_keys.into_iter() {\n let key = value::Value::::from_json(\n legacy_key.key,\n &export_op.primary_key_type,\n )?\n .as_key()?;\n mutation.deletes.push(interface::ExportTargetDeleteEntry {\n key,\n additional_key: legacy_key.additional_key,\n });\n }\n target_mutations.insert(target_id, mutation);\n }\n }\n\n db_tracking::precommit_source_tracking_info(\n source_id,\n source_key_json,\n process_ordinal,\n new_staging_target_keys,\n data.as_ref().map(|data| data.memoization_info),\n db_setup,\n &mut *txn,\n if tracking_info_exists {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n )\n .await?;\n\n txn.commit().await?;\n\n Ok(SkippedOr::Normal(PrecommitOutput {\n metadata: PrecommitMetadata {\n source_entry_exists: data.is_some(),\n process_ordinal,\n existing_process_ordinal,\n new_target_keys: new_target_keys_info,\n },\n target_mutations,\n }))\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn commit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n logic_fingerprint: &[u8],\n precommit_metadata: PrecommitMetadata,\n process_timestamp: &chrono::DateTime,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n pool: &PgPool,\n) -> Result<()> {\n let mut txn = pool.begin().await?;\n\n let tracking_info = db_tracking::read_source_tracking_info_for_commit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n let tracking_info_exists = tracking_info.is_some();\n if tracking_info.as_ref().and_then(|info| info.process_ordinal)\n >= Some(precommit_metadata.process_ordinal)\n {\n return Ok(());\n }\n\n let cleaned_staging_target_keys = tracking_info\n .map(|info| {\n let sqlx::types::Json(staging_target_keys) = info.staging_target_keys;\n staging_target_keys\n .into_iter()\n .filter_map(|(target_id, target_keys)| {\n let cleaned_target_keys: Vec<_> = target_keys\n .into_iter()\n .filter(|key_info| {\n Some(key_info.process_ordinal)\n > precommit_metadata.existing_process_ordinal\n && key_info.process_ordinal != precommit_metadata.process_ordinal\n })\n .collect();\n if !cleaned_target_keys.is_empty() {\n Some((target_id, cleaned_target_keys))\n } else {\n None\n }\n })\n .collect::>()\n })\n .unwrap_or_default();\n if !precommit_metadata.source_entry_exists && cleaned_staging_target_keys.is_empty() {\n // TODO: When we support distributed execution in the future, we'll need to leave a tombstone for a while\n // to prevent an earlier update causing the record reappear because of out-of-order processing.\n if tracking_info_exists {\n db_tracking::delete_source_tracking_info(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n }\n } else {\n db_tracking::commit_source_tracking_info(\n source_id,\n source_key_json,\n cleaned_staging_target_keys,\n source_version.ordinal.into(),\n logic_fingerprint,\n precommit_metadata.process_ordinal,\n process_timestamp.timestamp_micros(),\n precommit_metadata.new_target_keys,\n db_setup,\n &mut *txn,\n if tracking_info_exists {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n )\n .await?;\n }\n\n txn.commit().await?;\n\n Ok(())\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn try_content_hash_optimization(\n source_id: i32,\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n current_hash: &crate::utils::fingerprint::Fingerprint,\n tracking_info: &db_tracking::SourceTrackingInfoForProcessing,\n existing_version: &Option,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n update_stats: &stats::UpdateStats,\n pool: &PgPool,\n) -> Result>> {\n // Check if we can use content hash optimization\n if existing_version\n .as_ref()\n .is_none_or(|v| v.kind != SourceVersionKind::CurrentLogic)\n {\n return Ok(None);\n }\n\n if tracking_info\n .max_process_ordinal\n .zip(tracking_info.process_ordinal)\n .is_none_or(|(max_ord, proc_ord)| max_ord != proc_ord)\n {\n return Ok(None);\n }\n\n let existing_hash = tracking_info\n .memoization_info\n .as_ref()\n .and_then(|info| info.0.as_ref())\n .and_then(|stored_info| stored_info.content_hash.as_ref());\n\n if existing_hash != Some(current_hash) {\n return Ok(None);\n }\n\n // Content hash matches - try optimization\n let mut txn = pool.begin().await?;\n\n let current_tracking_info = db_tracking::read_source_tracking_info_for_precommit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n\n let Some(current_tracking_info) = current_tracking_info else {\n return Ok(None);\n };\n\n // Check 1: Same check as precommit - verify no newer version exists\n let current_source_version = SourceVersion::from_stored_precommit_info(\n ¤t_tracking_info,\n src_eval_ctx.plan.logic_fingerprint,\n );\n if current_source_version.should_skip(source_version, Some(update_stats)) {\n return Ok(Some(SkippedOr::Skipped(current_source_version)));\n }\n\n // Check 2: Verify process_ordinal hasn't changed (no concurrent processing)\n let original_process_ordinal = tracking_info.process_ordinal;\n if current_tracking_info.process_ordinal != original_process_ordinal {\n return Ok(None);\n }\n\n // Safe to apply optimization - just update tracking table\n db_tracking::update_source_tracking_ordinal(\n source_id,\n source_key_json,\n source_version.ordinal.0,\n db_setup,\n &mut *txn,\n )\n .await?;\n\n txn.commit().await?;\n update_stats.num_no_change.inc(1);\n Ok(Some(SkippedOr::Normal(())))\n}\n\npub async fn evaluate_source_entry_with_memory(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n options: EvaluationMemoryOptions,\n pool: &PgPool,\n) -> Result> {\n let stored_info = if options.enable_cache || !options.evaluation_only {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let source_id = setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id;\n let existing_tracking_info = read_source_tracking_info_for_processing(\n source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n existing_tracking_info\n .and_then(|info| info.memoization_info.map(|info| info.0))\n .flatten()\n } else {\n None\n };\n let memory = EvaluationMemory::new(chrono::Utc::now(), stored_info, options);\n let source_value = src_eval_ctx\n .import_op\n .executor\n .get_value(\n src_eval_ctx.key,\n &SourceExecutorGetOptions {\n include_value: true,\n include_ordinal: false,\n },\n )\n .await?\n .value\n .ok_or_else(|| anyhow::anyhow!(\"value not returned\"))?;\n let output = match source_value {\n interface::SourceValue::Existence(source_value) => {\n Some(evaluate_source_entry(src_eval_ctx, source_value, &memory).await?)\n }\n interface::SourceValue::NonExistence => None,\n };\n Ok(output)\n}\n\npub async fn update_source_row(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n source_value: interface::SourceValue,\n source_version: &SourceVersion,\n pool: &PgPool,\n update_stats: &stats::UpdateStats,\n) -> Result> {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let process_time = chrono::Utc::now();\n let source_id = setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id;\n\n // Phase 1: Check existing tracking info and apply optimizations\n let existing_tracking_info = read_source_tracking_info_for_processing(\n source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n\n let existing_version = match &existing_tracking_info {\n Some(info) => {\n let existing_version = SourceVersion::from_stored_processing_info(\n info,\n src_eval_ctx.plan.logic_fingerprint,\n );\n\n // First check ordinal-based skipping\n if existing_version.should_skip(source_version, Some(update_stats)) {\n return Ok(SkippedOr::Skipped(existing_version));\n }\n\n Some(existing_version)\n }\n None => None,\n };\n\n // Compute content hash once if needed for both optimization and evaluation\n let current_content_hash = match &source_value {\n interface::SourceValue::Existence(source_value) => Some(\n Fingerprinter::default()\n .with(source_value)?\n .into_fingerprint(),\n ),\n interface::SourceValue::NonExistence => None,\n };\n\n if let (Some(current_hash), Some(existing_tracking_info)) =\n (¤t_content_hash, &existing_tracking_info)\n {\n if let Some(optimization_result) = try_content_hash_optimization(\n source_id,\n src_eval_ctx,\n &source_key_json,\n source_version,\n current_hash,\n existing_tracking_info,\n &existing_version,\n &setup_execution_ctx.setup_state.tracking_table,\n update_stats,\n pool,\n )\n .await?\n {\n return Ok(optimization_result);\n }\n }\n\n let (output, stored_mem_info) = {\n let extracted_memoization_info = existing_tracking_info\n .and_then(|info| info.memoization_info)\n .and_then(|info| info.0);\n\n match source_value {\n interface::SourceValue::Existence(source_value) => {\n let evaluation_memory = EvaluationMemory::new(\n process_time,\n extracted_memoization_info,\n EvaluationMemoryOptions {\n enable_cache: true,\n evaluation_only: false,\n },\n );\n\n let output =\n evaluate_source_entry(src_eval_ctx, source_value, &evaluation_memory).await?;\n let mut stored_info = evaluation_memory.into_stored()?;\n stored_info.content_hash = current_content_hash;\n\n (Some(output), stored_info)\n }\n interface::SourceValue::NonExistence => (None, Default::default()),\n }\n };\n\n // Phase 2 (precommit): Update with the memoization info and stage target keys.\n let precommit_output = precommit_source_tracking_info(\n source_id,\n &source_key_json,\n source_version,\n src_eval_ctx.plan.logic_fingerprint,\n output.as_ref().map(|scope_value| PrecommitData {\n evaluate_output: scope_value,\n memoization_info: &stored_mem_info,\n }),\n &process_time,\n &setup_execution_ctx.setup_state.tracking_table,\n &src_eval_ctx.plan.export_ops,\n &setup_execution_ctx.export_ops,\n update_stats,\n pool,\n )\n .await?;\n let precommit_output = match precommit_output {\n SkippedOr::Normal(output) => output,\n SkippedOr::Skipped(source_version) => return Ok(SkippedOr::Skipped(source_version)),\n };\n\n // Phase 3: Apply changes to the target storage, including upserting new target records and removing existing ones.\n let mut target_mutations = precommit_output.target_mutations;\n let apply_futs = src_eval_ctx\n .plan\n .export_op_groups\n .iter()\n .filter_map(|export_op_group| {\n let mutations_w_ctx: Vec<_> = export_op_group\n .op_idx\n .iter()\n .filter_map(|export_op_idx| {\n let export_op = &src_eval_ctx.plan.export_ops[*export_op_idx];\n target_mutations\n .remove(&setup_execution_ctx.export_ops[*export_op_idx].target_id)\n .filter(|m| !m.is_empty())\n .map(|mutation| interface::ExportTargetMutationWithContext {\n mutation,\n export_context: export_op.export_context.as_ref(),\n })\n })\n .collect();\n (!mutations_w_ctx.is_empty()).then(|| {\n export_op_group\n .target_factory\n .apply_mutation(mutations_w_ctx)\n })\n });\n\n // TODO: Handle errors.\n try_join_all(apply_futs).await?;\n\n // Phase 4: Update the tracking record.\n commit_source_tracking_info(\n source_id,\n &source_key_json,\n source_version,\n &src_eval_ctx.plan.logic_fingerprint.0,\n precommit_output.metadata,\n &process_time,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n\n if let Some(existing_version) = existing_version {\n if output.is_some() {\n if !source_version.ordinal.is_available()\n || source_version.ordinal != existing_version.ordinal\n {\n update_stats.num_updates.inc(1);\n } else {\n update_stats.num_reprocesses.inc(1);\n }\n } else {\n update_stats.num_deletions.inc(1);\n }\n } else if output.is_some() {\n update_stats.num_insertions.inc(1);\n }\n\n Ok(SkippedOr::Normal(()))\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_github_actions_scenario_ordinal_behavior() {\n // Test ordinal-based behavior - should_skip only cares about ordinal monotonic invariance\n // Content hash optimization is handled at update_source_row level\n\n let processed_version = SourceVersion {\n ordinal: Ordinal(Some(1000)), // Original timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // GitHub Actions checkout: timestamp changes but content same\n let after_checkout_version = SourceVersion {\n ordinal: Ordinal(Some(2000)), // New timestamp after checkout\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Should NOT skip at should_skip level (ordinal is newer - monotonic invariance)\n // Content hash optimization happens at update_source_row level to update only tracking\n assert!(!processed_version.should_skip(&after_checkout_version, None));\n\n // Reverse case: if we somehow get an older ordinal, always skip\n assert!(after_checkout_version.should_skip(&processed_version, None));\n\n // Now simulate actual content change\n let content_changed_version = SourceVersion {\n ordinal: Ordinal(Some(3000)), // Even newer timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Should NOT skip processing (ordinal is newer)\n assert!(!processed_version.should_skip(&content_changed_version, None));\n }\n\n #[test]\n fn test_content_hash_computation() {\n use crate::base::value::{BasicValue, FieldValues, Value};\n use crate::utils::fingerprint::Fingerprinter;\n\n // Test that content hash is computed correctly from source data\n let source_data1 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"Hello\".into())),\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let source_data2 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"Hello\".into())),\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let source_data3 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"World\".into())), // Different content\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let hash1 = Fingerprinter::default()\n .with(&source_data1)\n .unwrap()\n .into_fingerprint();\n\n let hash2 = Fingerprinter::default()\n .with(&source_data2)\n .unwrap()\n .into_fingerprint();\n\n let hash3 = Fingerprinter::default()\n .with(&source_data3)\n .unwrap()\n .into_fingerprint();\n\n // Same content should produce same hash\n assert_eq!(hash1, hash2);\n\n // Different content should produce different hash\n assert_ne!(hash1, hash3);\n assert_ne!(hash2, hash3);\n }\n\n #[test]\n fn test_github_actions_content_hash_optimization_requirements() {\n // This test documents the exact requirements for GitHub Actions scenario\n // where file modification times change but content remains the same\n\n use crate::utils::fingerprint::Fingerprinter;\n\n // Simulate file content that remains the same across GitHub Actions checkout\n let file_content = \"const hello = 'world';\\nexport default hello;\";\n\n // Hash before checkout (original file)\n let hash_before_checkout = Fingerprinter::default()\n .with(&file_content)\n .unwrap()\n .into_fingerprint();\n\n // Hash after checkout (same content, different timestamp)\n let hash_after_checkout = Fingerprinter::default()\n .with(&file_content)\n .unwrap()\n .into_fingerprint();\n\n // Content hashes must be identical for optimization to work\n assert_eq!(\n hash_before_checkout, hash_after_checkout,\n \"Content hash optimization requires identical hashes for same content\"\n );\n\n // Test with slightly different content (should produce different hashes)\n let modified_content = \"const hello = 'world!';\\nexport default hello;\"; // Added !\n let hash_modified = Fingerprinter::default()\n .with(&modified_content)\n .unwrap()\n .into_fingerprint();\n\n assert_ne!(\n hash_before_checkout, hash_modified,\n \"Different content should produce different hashes\"\n );\n }\n\n #[test]\n fn test_github_actions_ordinal_behavior_with_content_optimization() {\n // Test the complete GitHub Actions scenario:\n // 1. File processed with ordinal=1000, content_hash=ABC\n // 2. GitHub Actions checkout: ordinal=2000, content_hash=ABC (same content)\n // 3. Should use content hash optimization (update only tracking, skip evaluation)\n\n let original_processing = SourceVersion {\n ordinal: Ordinal(Some(1000)), // Original file timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n let after_github_checkout = SourceVersion {\n ordinal: Ordinal(Some(2000)), // New timestamp after checkout\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Step 1: Ordinal check should NOT skip (newer ordinal means potential processing needed)\n assert!(\n !original_processing.should_skip(&after_github_checkout, None),\n \"GitHub Actions: newer ordinal should not be skipped at ordinal level\"\n );\n\n // Step 2: Content hash optimization should trigger when content is same\n // This is tested in the integration level - the optimization path should:\n // - Compare content hashes\n // - If same: update only tracking info (process_ordinal, process_time)\n // - Skip expensive evaluation and target storage updates\n\n // Step 3: After optimization, tracking shows the new ordinal\n let after_optimization = SourceVersion {\n ordinal: Ordinal(Some(2000)), // Updated to new ordinal\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Future requests with same ordinal should be skipped\n assert!(\n after_optimization.should_skip(&after_github_checkout, None),\n \"After optimization, same ordinal should be skipped\"\n );\n }\n}\n"], ["/cocoindex/src/ops/targets/shared/table_columns.rs", "use crate::{\n ops::sdk::SetupStateCompatibility,\n prelude::*,\n setup::{CombinedState, SetupChangeType},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TableColumnsSchema {\n #[serde(with = \"indexmap::map::serde_seq\", alias = \"key_fields_schema\")]\n pub key_columns: IndexMap,\n\n #[serde(with = \"indexmap::map::serde_seq\", alias = \"value_fields_schema\")]\n pub value_columns: IndexMap,\n}\n\n#[derive(Debug)]\npub enum TableUpsertionAction {\n Create {\n keys: IndexMap,\n values: IndexMap,\n },\n Update {\n columns_to_delete: IndexSet,\n columns_to_upsert: IndexMap,\n },\n}\n\nimpl TableUpsertionAction {\n pub fn is_empty(&self) -> bool {\n match self {\n Self::Create { .. } => false,\n Self::Update {\n columns_to_delete,\n columns_to_upsert,\n } => columns_to_delete.is_empty() && columns_to_upsert.is_empty(),\n }\n }\n}\n\n#[derive(Debug)]\npub struct TableMainSetupAction {\n pub drop_existing: bool,\n pub table_upsertion: Option>,\n}\n\nimpl TableMainSetupAction {\n pub fn from_states(\n desired_state: Option<&S>,\n existing: &CombinedState,\n existing_invalidated: bool,\n ) -> Self\n where\n for<'a> &'a S: Into>>,\n T: Clone,\n {\n let existing_may_exists = existing.possible_versions().next().is_some();\n let possible_existing_cols: Vec>> = existing\n .possible_versions()\n .map(Into::>>::into)\n .collect();\n let Some(desired_state) = desired_state else {\n return Self {\n drop_existing: existing_may_exists,\n table_upsertion: None,\n };\n };\n\n let desired_cols: Cow<'_, TableColumnsSchema> = desired_state.into();\n let drop_existing = existing_invalidated\n || possible_existing_cols\n .iter()\n .any(|v| v.key_columns != desired_cols.key_columns)\n || (existing_may_exists && !existing.always_exists());\n\n let table_upsertion = if existing.always_exists() && !drop_existing {\n TableUpsertionAction::Update {\n columns_to_delete: possible_existing_cols\n .iter()\n .flat_map(|v| v.value_columns.keys())\n .filter(|column_name| !desired_cols.value_columns.contains_key(*column_name))\n .cloned()\n .collect(),\n columns_to_upsert: desired_cols\n .value_columns\n .iter()\n .filter(|(column_name, schema)| {\n !possible_existing_cols\n .iter()\n .all(|v| v.value_columns.get(*column_name) == Some(schema))\n })\n .map(|(k, v)| (k.to_owned(), v.to_owned()))\n .collect(),\n }\n } else {\n TableUpsertionAction::Create {\n keys: desired_cols.key_columns.to_owned(),\n values: desired_cols.value_columns.to_owned(),\n }\n };\n\n Self {\n drop_existing,\n table_upsertion: Some(table_upsertion).filter(|action| !action.is_empty()),\n }\n }\n\n pub fn describe_changes(&self) -> Vec\n where\n T: std::fmt::Display,\n {\n let mut descriptions = vec![];\n if self.drop_existing {\n descriptions.push(setup::ChangeDescription::Action(\"Drop table\".to_string()));\n }\n if let Some(table_upsertion) = &self.table_upsertion {\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Create table:\\n key columns: {}\\n value columns: {}\\n\",\n keys.iter().map(|(k, v)| format!(\"{k} {v}\")).join(\", \"),\n values.iter().map(|(k, v)| format!(\"{k} {v}\")).join(\", \"),\n )));\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n if !columns_to_delete.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Delete column from table: {}\",\n columns_to_delete.iter().join(\", \"),\n )));\n }\n if !columns_to_upsert.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Add / update columns in table: {}\",\n columns_to_upsert\n .iter()\n .map(|(k, v)| format!(\"{k} {v}\"))\n .join(\", \"),\n )));\n }\n }\n }\n }\n descriptions\n }\n\n pub fn change_type(&self, has_other_update: bool) -> SetupChangeType {\n match (self.drop_existing, &self.table_upsertion) {\n (_, Some(TableUpsertionAction::Create { .. })) => SetupChangeType::Create,\n (_, Some(TableUpsertionAction::Update { .. })) => SetupChangeType::Update,\n (true, None) => SetupChangeType::Delete,\n (false, None) => {\n if has_other_update {\n SetupChangeType::Update\n } else {\n SetupChangeType::NoChange\n }\n }\n }\n }\n}\n\npub fn check_table_compatibility(\n desired: &TableColumnsSchema,\n existing: &TableColumnsSchema,\n) -> SetupStateCompatibility {\n let is_key_identical = existing.key_columns == desired.key_columns;\n if is_key_identical {\n let is_value_lossy = existing\n .value_columns\n .iter()\n .any(|(k, v)| desired.value_columns.get(k) != Some(v));\n if is_value_lossy {\n SetupStateCompatibility::PartialCompatible\n } else {\n SetupStateCompatibility::Compatible\n }\n } else {\n SetupStateCompatibility::NotCompatible\n }\n}\n"], ["/cocoindex/src/ops/targets/kuzu.rs", "use chrono::TimeDelta;\nuse serde_json::json;\n\nuse std::fmt::Write;\n\nuse super::shared::property_graph::GraphElementMapping;\nuse super::shared::property_graph::*;\nuse super::shared::table_columns::{\n TableColumnsSchema, TableMainSetupAction, TableUpsertionAction, check_table_compatibility,\n};\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::prelude::*;\n\nuse crate::setup::SetupChangeType;\nuse crate::{ops::sdk::*, setup::CombinedState};\n\nconst SELF_CONTAINED_TAG_FIELD_NAME: &str = \"__self_contained\";\n\n////////////////////////////////////////////////////////////\n// Public Types\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n /// The URL of the [Kuzu API server](https://kuzu.com/docs/api/server/overview),\n /// e.g. `http://localhost:8000`.\n api_server_url: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n connection: spec::AuthEntryReference,\n mapping: GraphElementMapping,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Declaration {\n connection: spec::AuthEntryReference,\n #[serde(flatten)]\n decl: GraphDeclaration,\n}\n\n////////////////////////////////////////////////////////////\n// Utils to deal with Kuzu\n////////////////////////////////////////////////////////////\n\nstruct CypherBuilder {\n query: String,\n}\n\nimpl CypherBuilder {\n fn new() -> Self {\n Self {\n query: String::new(),\n }\n }\n\n fn query_mut(&mut self) -> &mut String {\n &mut self.query\n }\n}\n\nstruct KuzuThinClient {\n reqwest_client: reqwest::Client,\n query_url: String,\n}\n\nimpl KuzuThinClient {\n fn new(conn_spec: &ConnectionSpec, reqwest_client: reqwest::Client) -> Self {\n Self {\n reqwest_client,\n query_url: format!(\"{}/cypher\", conn_spec.api_server_url.trim_end_matches('/')),\n }\n }\n\n async fn run_cypher(&self, cyper_builder: CypherBuilder) -> Result<()> {\n if cyper_builder.query.is_empty() {\n return Ok(());\n }\n let query = json!({\n \"query\": cyper_builder.query\n });\n let response = self\n .reqwest_client\n .post(&self.query_url)\n .json(&query)\n .send()\n .await?;\n if !response.status().is_success() {\n return Err(anyhow::anyhow!(\n \"Failed to run cypher: {}\",\n response.text().await?\n ));\n }\n Ok(())\n }\n}\n\nfn kuzu_table_type(elem_type: &ElementType) -> &'static str {\n match elem_type {\n ElementType::Node(_) => \"NODE\",\n ElementType::Relationship(_) => \"REL\",\n }\n}\n\nfn basic_type_to_kuzu(basic_type: &BasicValueType) -> Result {\n Ok(match basic_type {\n BasicValueType::Bytes => \"BLOB\".to_string(),\n BasicValueType::Str => \"STRING\".to_string(),\n BasicValueType::Bool => \"BOOL\".to_string(),\n BasicValueType::Int64 => \"INT64\".to_string(),\n BasicValueType::Float32 => \"FLOAT\".to_string(),\n BasicValueType::Float64 => \"DOUBLE\".to_string(),\n BasicValueType::Range => \"UINT64[2]\".to_string(),\n BasicValueType::Uuid => \"UUID\".to_string(),\n BasicValueType::Date => \"DATE\".to_string(),\n BasicValueType::LocalDateTime => \"TIMESTAMP\".to_string(),\n BasicValueType::OffsetDateTime => \"TIMESTAMP\".to_string(),\n BasicValueType::TimeDelta => \"INTERVAL\".to_string(),\n BasicValueType::Vector(t) => format!(\n \"{}[{}]\",\n basic_type_to_kuzu(&t.element_type)?,\n t.dimension\n .map_or_else(|| \"\".to_string(), |d| d.to_string())\n ),\n t @ (BasicValueType::Union(_) | BasicValueType::Time | BasicValueType::Json) => {\n api_bail!(\"{t} is not supported in Kuzu\")\n }\n })\n}\n\nfn struct_schema_to_kuzu(struct_schema: &StructSchema) -> Result {\n Ok(format!(\n \"STRUCT({})\",\n struct_schema\n .fields\n .iter()\n .map(|f| Ok(format!(\n \"{} {}\",\n f.name,\n value_type_to_kuzu(&f.value_type.typ)?\n )))\n .collect::>>()?\n .join(\", \")\n ))\n}\n\nfn value_type_to_kuzu(value_type: &ValueType) -> Result {\n Ok(match value_type {\n ValueType::Basic(basic_type) => basic_type_to_kuzu(basic_type)?,\n ValueType::Struct(struct_type) => struct_schema_to_kuzu(struct_type)?,\n ValueType::Table(table_type) => format!(\"{}[]\", struct_schema_to_kuzu(&table_type.row)?),\n })\n}\n\n////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]\nstruct ReferencedNodeTable {\n table_name: String,\n\n #[serde(with = \"indexmap::map::serde_seq\")]\n key_columns: IndexMap,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\nstruct SetupState {\n schema: TableColumnsSchema,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n referenced_node_tables: Option<(ReferencedNodeTable, ReferencedNodeTable)>,\n}\n\nimpl<'a> From<&'a SetupState> for Cow<'a, TableColumnsSchema> {\n fn from(val: &'a SetupState) -> Self {\n Cow::Borrowed(&val.schema)\n }\n}\n\n#[derive(Debug)]\nstruct GraphElementDataSetupStatus {\n actions: TableMainSetupAction,\n referenced_node_tables: Option<(String, String)>,\n drop_affected_referenced_node_tables: IndexSet,\n}\n\nimpl setup::ResourceSetupStatus for GraphElementDataSetupStatus {\n fn describe_changes(&self) -> Vec {\n self.actions.describe_changes()\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.actions.change_type(false)\n }\n}\n\nfn append_drop_table(\n cypher: &mut CypherBuilder,\n setup_status: &GraphElementDataSetupStatus,\n elem_type: &ElementType,\n) -> Result<()> {\n if !setup_status.actions.drop_existing {\n return Ok(());\n }\n writeln!(\n cypher.query_mut(),\n \"DROP TABLE IF EXISTS {};\",\n elem_type.label()\n )?;\n Ok(())\n}\n\nfn append_delete_orphaned_nodes(cypher: &mut CypherBuilder, node_table: &str) -> Result<()> {\n writeln!(\n cypher.query_mut(),\n \"MATCH (n:{node_table}) WITH n WHERE NOT (n)--() DELETE n;\"\n )?;\n Ok(())\n}\n\nfn append_upsert_table(\n cypher: &mut CypherBuilder,\n setup_status: &GraphElementDataSetupStatus,\n elem_type: &ElementType,\n) -> Result<()> {\n let table_upsertion = if let Some(table_upsertion) = &setup_status.actions.table_upsertion {\n table_upsertion\n } else {\n return Ok(());\n };\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n write!(\n cypher.query_mut(),\n \"CREATE {kuzu_table_type} TABLE IF NOT EXISTS {table_name} (\",\n kuzu_table_type = kuzu_table_type(elem_type),\n table_name = elem_type.label(),\n )?;\n if let Some((src, tgt)) = &setup_status.referenced_node_tables {\n write!(cypher.query_mut(), \"FROM {src} TO {tgt}, \")?;\n }\n cypher.query_mut().push_str(\n keys.iter()\n .chain(values.iter())\n .map(|(name, kuzu_type)| format!(\"{name} {kuzu_type}\"))\n .join(\", \")\n .as_str(),\n );\n match elem_type {\n ElementType::Node(_) => {\n write!(\n cypher.query_mut(),\n \", {SELF_CONTAINED_TAG_FIELD_NAME} BOOL, PRIMARY KEY ({})\",\n keys.iter().map(|(name, _)| name).join(\", \")\n )?;\n }\n ElementType::Relationship(_) => {}\n }\n write!(cypher.query_mut(), \");\\n\\n\")?;\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n let table_name = elem_type.label();\n for name in columns_to_delete\n .iter()\n .chain(columns_to_upsert.iter().map(|(name, _)| name))\n {\n writeln!(\n cypher.query_mut(),\n \"ALTER TABLE {table_name} DROP IF EXISTS {name};\"\n )?;\n }\n for (name, kuzu_type) in columns_to_upsert.iter() {\n writeln!(\n cypher.query_mut(),\n \"ALTER TABLE {table_name} ADD {name} {kuzu_type};\",\n )?;\n }\n }\n }\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Utils to convert value to Kuzu literals\n////////////////////////////////////////////////////////////\n\nfn append_string_literal(cypher: &mut CypherBuilder, s: &str) -> Result<()> {\n let out = cypher.query_mut();\n out.push('\"');\n for c in s.chars() {\n match c {\n '\\\\' => out.push_str(\"\\\\\\\\\"),\n '\"' => out.push_str(\"\\\\\\\"\"),\n // Control characters (0x00..=0x1F)\n c if (c as u32) < 0x20 => write!(out, \"\\\\u{:04X}\", c as u32)?,\n // BMP Unicode\n c if (c as u32) <= 0xFFFF => out.push(c),\n // Non-BMP Unicode: Encode as surrogate pairs for Cypher \\uXXXX\\uXXXX\n c => {\n let code = c as u32;\n let high = 0xD800 + ((code - 0x10000) >> 10);\n let low = 0xDC00 + ((code - 0x10000) & 0x3FF);\n write!(out, \"\\\\u{high:04X}\\\\u{low:04X}\")?;\n }\n }\n }\n out.push('\"');\n Ok(())\n}\n\nfn append_basic_value(cypher: &mut CypherBuilder, basic_value: &BasicValue) -> Result<()> {\n match basic_value {\n BasicValue::Bytes(bytes) => {\n write!(cypher.query_mut(), \"BLOB(\")?;\n for byte in bytes {\n write!(cypher.query_mut(), \"\\\\\\\\x{byte:02X}\")?;\n }\n write!(cypher.query_mut(), \")\")?;\n }\n BasicValue::Str(s) => {\n append_string_literal(cypher, s)?;\n }\n BasicValue::Bool(b) => {\n write!(cypher.query_mut(), \"{b}\")?;\n }\n BasicValue::Int64(i) => {\n write!(cypher.query_mut(), \"{i}\")?;\n }\n BasicValue::Float32(f) => {\n write!(cypher.query_mut(), \"{f}\")?;\n }\n BasicValue::Float64(f) => {\n write!(cypher.query_mut(), \"{f}\")?;\n }\n BasicValue::Range(r) => {\n write!(cypher.query_mut(), \"[{}, {}]\", r.start, r.end)?;\n }\n BasicValue::Uuid(u) => {\n write!(cypher.query_mut(), \"UUID(\\\"{u}\\\")\")?;\n }\n BasicValue::Date(d) => {\n write!(cypher.query_mut(), \"DATE(\\\"{d}\\\")\")?;\n }\n BasicValue::LocalDateTime(dt) => write!(cypher.query_mut(), \"TIMESTAMP(\\\"{dt}\\\")\")?,\n BasicValue::OffsetDateTime(dt) => write!(cypher.query_mut(), \"TIMESTAMP(\\\"{dt}\\\")\")?,\n BasicValue::TimeDelta(td) => {\n let num_days = td.num_days();\n let sub_day_duration = *td - TimeDelta::days(num_days);\n write!(cypher.query_mut(), \"INTERVAL(\\\"\")?;\n if num_days != 0 {\n write!(cypher.query_mut(), \"{num_days} days \")?;\n }\n let microseconds = sub_day_duration\n .num_microseconds()\n .ok_or_else(invariance_violation)?;\n write!(cypher.query_mut(), \"{microseconds} microseconds\\\")\")?;\n }\n BasicValue::Vector(v) => {\n write!(cypher.query_mut(), \"[\")?;\n let mut prefix = \"\";\n for elem in v.iter() {\n cypher.query_mut().push_str(prefix);\n append_basic_value(cypher, elem)?;\n prefix = \", \";\n }\n write!(cypher.query_mut(), \"]\")?;\n }\n v @ (BasicValue::UnionVariant { .. } | BasicValue::Time(_) | BasicValue::Json(_)) => {\n bail!(\"value types are not supported in Kuzu: {}\", v.kind());\n }\n }\n Ok(())\n}\n\nfn append_struct_fields<'a>(\n cypher: &'a mut CypherBuilder,\n field_schema: &[schema::FieldSchema],\n field_values: impl Iterator,\n) -> Result<()> {\n let mut prefix = \"\";\n for (f, v) in std::iter::zip(field_schema.iter(), field_values) {\n write!(cypher.query_mut(), \"{prefix}{}: \", f.name)?;\n append_value(cypher, &f.value_type.typ, v)?;\n prefix = \", \";\n }\n Ok(())\n}\n\nfn append_value(\n cypher: &mut CypherBuilder,\n typ: &schema::ValueType,\n value: &value::Value,\n) -> Result<()> {\n match value {\n value::Value::Null => {\n write!(cypher.query_mut(), \"NULL\")?;\n }\n value::Value::Basic(basic_value) => append_basic_value(cypher, basic_value)?,\n value::Value::Struct(struct_value) => {\n let struct_schema = match typ {\n schema::ValueType::Struct(struct_schema) => struct_schema,\n _ => {\n api_bail!(\"Expected struct type, got {}\", typ);\n }\n };\n cypher.query_mut().push('{');\n append_struct_fields(cypher, &struct_schema.fields, struct_value.fields.iter())?;\n cypher.query_mut().push('}');\n }\n value::Value::KTable(map) => {\n let row_schema = match typ {\n schema::ValueType::Table(table_schema) => &table_schema.row,\n _ => {\n api_bail!(\"Expected table type, got {}\", typ);\n }\n };\n cypher.query_mut().push('[');\n let mut prefix = \"\";\n for (k, v) in map.iter() {\n let key_value = value::Value::from(k);\n cypher.query_mut().push_str(prefix);\n cypher.query_mut().push('{');\n append_struct_fields(\n cypher,\n &row_schema.fields,\n std::iter::once(&key_value).chain(v.fields.iter()),\n )?;\n cypher.query_mut().push('}');\n prefix = \", \";\n }\n cypher.query_mut().push(']');\n }\n value::Value::LTable(rows) | value::Value::UTable(rows) => {\n let row_schema = match typ {\n schema::ValueType::Table(table_schema) => &table_schema.row,\n _ => {\n api_bail!(\"Expected table type, got {}\", typ);\n }\n };\n cypher.query_mut().push('[');\n let mut prefix = \"\";\n for v in rows.iter() {\n cypher.query_mut().push_str(prefix);\n cypher.query_mut().push('{');\n append_struct_fields(cypher, &row_schema.fields, v.fields.iter())?;\n cypher.query_mut().push('}');\n prefix = \", \";\n }\n cypher.query_mut().push(']');\n }\n }\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Deal with mutations\n////////////////////////////////////////////////////////////\n\nstruct ExportContext {\n conn_ref: AuthEntryReference,\n kuzu_client: KuzuThinClient,\n analyzed_data_coll: AnalyzedDataCollection,\n}\n\nfn append_key_pattern<'a>(\n cypher: &'a mut CypherBuilder,\n key_fields: &'a [FieldSchema],\n values: impl Iterator>,\n) -> Result<()> {\n write!(cypher.query_mut(), \"{{\")?;\n let mut prefix = \"\";\n for (f, v) in std::iter::zip(key_fields.iter(), values) {\n write!(cypher.query_mut(), \"{prefix}{}: \", f.name)?;\n append_value(cypher, &f.value_type.typ, v.as_ref())?;\n prefix = \", \";\n }\n write!(cypher.query_mut(), \"}}\")?;\n Ok(())\n}\n\nfn append_set_value_fields(\n cypher: &mut CypherBuilder,\n var_name: &str,\n value_fields: &[FieldSchema],\n value_fields_idx: &[usize],\n upsert_entry: &ExportTargetUpsertEntry,\n set_self_contained_tag: bool,\n) -> Result<()> {\n let mut prefix = \" SET \";\n if set_self_contained_tag {\n write!(\n cypher.query_mut(),\n \"{prefix}{var_name}.{SELF_CONTAINED_TAG_FIELD_NAME} = TRUE\"\n )?;\n prefix = \", \";\n }\n for (value_field, value_idx) in std::iter::zip(value_fields.iter(), value_fields_idx.iter()) {\n let field_name = &value_field.name;\n write!(cypher.query_mut(), \"{prefix}{var_name}.{field_name}=\")?;\n append_value(\n cypher,\n &value_field.value_type.typ,\n &upsert_entry.value.fields[*value_idx],\n )?;\n prefix = \", \";\n }\n Ok(())\n}\n\nfn append_upsert_node(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n {\n write!(\n cypher.query_mut(),\n \"MERGE ({NODE_VAR_NAME}:{label} \",\n label = data_coll.schema.elem_type.label(),\n )?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n upsert_entry\n .key\n .fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n write!(cypher.query_mut(), \")\")?;\n }\n append_set_value_fields(\n cypher,\n NODE_VAR_NAME,\n &data_coll.schema.value_fields,\n &data_coll.value_fields_input_idx,\n upsert_entry,\n true,\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_merge_node_for_rel(\n cypher: &mut CypherBuilder,\n var_name: &str,\n field_mapping: &AnalyzedGraphElementFieldMapping,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n {\n write!(\n cypher.query_mut(),\n \"MERGE ({var_name}:{label} \",\n label = field_mapping.schema.elem_type.label(),\n )?;\n append_key_pattern(\n cypher,\n &field_mapping.schema.key_fields,\n field_mapping\n .fields_input_idx\n .key\n .iter()\n .map(|idx| Cow::Borrowed(&upsert_entry.value.fields[*idx])),\n )?;\n write!(cypher.query_mut(), \")\")?;\n }\n append_set_value_fields(\n cypher,\n var_name,\n &field_mapping.schema.value_fields,\n &field_mapping.fields_input_idx.value,\n upsert_entry,\n false,\n )?;\n writeln!(cypher.query_mut())?;\n Ok(())\n}\n\nfn append_upsert_rel(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n const REL_VAR_NAME: &str = \"r\";\n const SRC_NODE_VAR_NAME: &str = \"s\";\n const TGT_NODE_VAR_NAME: &str = \"t\";\n\n let rel_info = if let Some(rel_info) = &data_coll.rel {\n rel_info\n } else {\n return Ok(());\n };\n append_merge_node_for_rel(cypher, SRC_NODE_VAR_NAME, &rel_info.source, upsert_entry)?;\n append_merge_node_for_rel(cypher, TGT_NODE_VAR_NAME, &rel_info.target, upsert_entry)?;\n {\n let rel_type = data_coll.schema.elem_type.label();\n write!(\n cypher.query_mut(),\n \"MERGE ({SRC_NODE_VAR_NAME})-[{REL_VAR_NAME}:{rel_type} \"\n )?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n upsert_entry\n .key\n .fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n write!(cypher.query_mut(), \"]->({TGT_NODE_VAR_NAME})\")?;\n }\n append_set_value_fields(\n cypher,\n REL_VAR_NAME,\n &data_coll.schema.value_fields,\n &data_coll.value_fields_input_idx,\n upsert_entry,\n false,\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_delete_node(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n key: &KeyValue,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n let node_label = data_coll.schema.elem_type.label();\n write!(cypher.query_mut(), \"MATCH ({NODE_VAR_NAME}:{node_label} \")?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n key.fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n writeln!(cypher.query_mut(), \")\")?;\n writeln!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} SET {NODE_VAR_NAME}.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\"\n )?;\n writeln!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} WHERE NOT ({NODE_VAR_NAME})--() DELETE {NODE_VAR_NAME}\"\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_delete_rel(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n key: &KeyValue,\n src_node_key: &KeyValue,\n tgt_node_key: &KeyValue,\n) -> Result<()> {\n const REL_VAR_NAME: &str = \"r\";\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n let rel_type = data_coll.schema.elem_type.label();\n\n write!(\n cypher.query_mut(),\n \"MATCH (:{label} \",\n label = rel.source.schema.elem_type.label()\n )?;\n let src_key_schema = &rel.source.schema.key_fields;\n append_key_pattern(\n cypher,\n src_key_schema,\n src_node_key\n .fields_iter(src_key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n\n write!(cypher.query_mut(), \")-[{REL_VAR_NAME}:{rel_type} \")?;\n let key_schema = &data_coll.schema.key_fields;\n append_key_pattern(\n cypher,\n key_schema,\n key.fields_iter(key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n\n write!(\n cypher.query_mut(),\n \"]->(:{label} \",\n label = rel.target.schema.elem_type.label()\n )?;\n let tgt_key_schema = &rel.target.schema.key_fields;\n append_key_pattern(\n cypher,\n tgt_key_schema,\n tgt_node_key\n .fields_iter(tgt_key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n write!(cypher.query_mut(), \") DELETE {REL_VAR_NAME}\")?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_maybe_gc_node(\n cypher: &mut CypherBuilder,\n schema: &GraphElementSchema,\n key: &KeyValue,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n let node_label = schema.elem_type.label();\n write!(cypher.query_mut(), \"MATCH ({NODE_VAR_NAME}:{node_label} \")?;\n append_key_pattern(\n cypher,\n &schema.key_fields,\n key.fields_iter(schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n writeln!(cypher.query_mut(), \")\")?;\n write!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} WHERE NOT ({NODE_VAR_NAME})--() DELETE {NODE_VAR_NAME}\"\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Factory implementation\n////////////////////////////////////////////////////////////\n\ntype KuzuGraphElement = GraphElementType;\n\nstruct Factory {\n reqwest_client: reqwest::Client,\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = Declaration;\n type SetupState = SetupState;\n type SetupStatus = GraphElementDataSetupStatus;\n\n type Key = KuzuGraphElement;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Kuzu\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(KuzuGraphElement, SetupState)>,\n )> {\n let (analyzed_data_colls, declared_graph_elements) = analyze_graph_mappings(\n data_collections\n .iter()\n .map(|d| DataCollectionGraphMappingInput {\n auth_ref: &d.spec.connection,\n mapping: &d.spec.mapping,\n index_options: &d.index_options,\n key_fields_schema: d.key_fields_schema.clone(),\n value_fields_schema: d.value_fields_schema.clone(),\n }),\n declarations.iter().map(|d| (&d.connection, &d.decl)),\n )?;\n fn to_kuzu_cols(fields: &[FieldSchema]) -> Result> {\n fields\n .iter()\n .map(|f| Ok((f.name.clone(), value_type_to_kuzu(&f.value_type.typ)?)))\n .collect::>>()\n }\n let data_coll_outputs: Vec> =\n std::iter::zip(data_collections, analyzed_data_colls.into_iter())\n .map(|(data_coll, analyzed)| {\n fn to_dep_table(\n field_mapping: &AnalyzedGraphElementFieldMapping,\n ) -> Result {\n Ok(ReferencedNodeTable {\n table_name: field_mapping.schema.elem_type.label().to_string(),\n key_columns: to_kuzu_cols(&field_mapping.schema.key_fields)?,\n })\n }\n let setup_key = KuzuGraphElement {\n connection: data_coll.spec.connection.clone(),\n typ: analyzed.schema.elem_type.clone(),\n };\n let desired_setup_state = SetupState {\n schema: TableColumnsSchema {\n key_columns: to_kuzu_cols(&analyzed.schema.key_fields)?,\n value_columns: to_kuzu_cols(&analyzed.schema.value_fields)?,\n },\n referenced_node_tables: (analyzed.rel.as_ref())\n .map(|rel| {\n anyhow::Ok((to_dep_table(&rel.source)?, to_dep_table(&rel.target)?))\n })\n .transpose()?,\n };\n\n let export_context = ExportContext {\n conn_ref: data_coll.spec.connection.clone(),\n kuzu_client: KuzuThinClient::new(\n &context\n .auth_registry\n .get::(&data_coll.spec.connection)?,\n self.reqwest_client.clone(),\n ),\n analyzed_data_coll: analyzed,\n };\n Ok(TypedExportDataCollectionBuildOutput {\n export_context: async move { Ok(Arc::new(export_context)) }.boxed(),\n setup_key,\n desired_setup_state,\n })\n })\n .collect::>()?;\n let decl_output = std::iter::zip(declarations, declared_graph_elements)\n .map(|(decl, graph_elem_schema)| {\n let setup_state = SetupState {\n schema: TableColumnsSchema {\n key_columns: to_kuzu_cols(&graph_elem_schema.key_fields)?,\n value_columns: to_kuzu_cols(&graph_elem_schema.value_fields)?,\n },\n referenced_node_tables: None,\n };\n let setup_key = GraphElementType {\n connection: decl.connection,\n typ: graph_elem_schema.elem_type.clone(),\n };\n Ok((setup_key, setup_state))\n })\n .collect::>()?;\n Ok((data_coll_outputs, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n _key: KuzuGraphElement,\n desired: Option,\n existing: CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n let existing_invalidated = desired.as_ref().is_some_and(|desired| {\n existing\n .possible_versions()\n .any(|v| v.referenced_node_tables != desired.referenced_node_tables)\n });\n let actions =\n TableMainSetupAction::from_states(desired.as_ref(), &existing, existing_invalidated);\n let drop_affected_referenced_node_tables = if actions.drop_existing {\n existing\n .possible_versions()\n .flat_map(|v| &v.referenced_node_tables)\n .flat_map(|(src, tgt)| [src.table_name.clone(), tgt.table_name.clone()].into_iter())\n .collect()\n } else {\n IndexSet::new()\n };\n Ok(GraphElementDataSetupStatus {\n actions,\n referenced_node_tables: desired\n .and_then(|desired| desired.referenced_node_tables)\n .map(|(src, tgt)| (src.table_name, tgt.table_name)),\n drop_affected_referenced_node_tables,\n })\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(\n if desired.referenced_node_tables != existing.referenced_node_tables {\n SetupStateCompatibility::NotCompatible\n } else {\n check_table_compatibility(&desired.schema, &existing.schema)\n },\n )\n }\n\n fn describe_resource(&self, key: &KuzuGraphElement) -> Result {\n Ok(format!(\n \"Kuzu {} TABLE {}\",\n kuzu_table_type(&key.typ),\n key.typ.label()\n ))\n }\n\n fn extract_additional_key(\n &self,\n _key: &KeyValue,\n value: &FieldValues,\n export_context: &ExportContext,\n ) -> Result {\n let additional_key = if let Some(rel_info) = &export_context.analyzed_data_coll.rel {\n serde_json::to_value((\n (rel_info.source.fields_input_idx).extract_key(&value.fields)?,\n (rel_info.target.fields_input_idx).extract_key(&value.fields)?,\n ))?\n } else {\n serde_json::Value::Null\n };\n Ok(additional_key)\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut mutations_by_conn = IndexMap::new();\n for mutation in mutations.into_iter() {\n mutations_by_conn\n .entry(mutation.export_context.conn_ref.clone())\n .or_insert_with(Vec::new)\n .push(mutation);\n }\n for mutations in mutations_by_conn.into_values() {\n let kuzu_client = &mutations[0].export_context.kuzu_client;\n let mut cypher = CypherBuilder::new();\n writeln!(cypher.query_mut(), \"BEGIN TRANSACTION;\")?;\n\n let (mut rel_mutations, nodes_mutations): (Vec<_>, Vec<_>) = mutations\n .into_iter()\n .partition(|m| m.export_context.analyzed_data_coll.rel.is_some());\n\n struct NodeTableGcInfo {\n schema: Arc,\n keys: IndexSet,\n }\n fn register_gc_node(\n map: &mut IndexMap,\n schema: &Arc,\n key: KeyValue,\n ) {\n map.entry(schema.elem_type.clone())\n .or_insert_with(|| NodeTableGcInfo {\n schema: schema.clone(),\n keys: IndexSet::new(),\n })\n .keys\n .insert(key);\n }\n fn resolve_gc_node(\n map: &mut IndexMap,\n schema: &Arc,\n key: &KeyValue,\n ) {\n map.get_mut(&schema.elem_type)\n .map(|info| info.keys.shift_remove(key));\n }\n let mut gc_info = IndexMap::::new();\n\n // Deletes for relationships\n for rel_mutation in rel_mutations.iter_mut() {\n let data_coll = &rel_mutation.export_context.analyzed_data_coll;\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n for delete in rel_mutation.mutation.deletes.iter_mut() {\n let mut additional_keys = match delete.additional_key.take() {\n serde_json::Value::Array(keys) => keys,\n _ => return Err(invariance_violation()),\n };\n if additional_keys.len() != 2 {\n api_bail!(\n \"Expected additional key with 2 fields, got {}\",\n delete.additional_key\n );\n }\n let src_key = KeyValue::from_json(\n additional_keys[0].take(),\n &rel.source.schema.key_fields,\n )?;\n let tgt_key = KeyValue::from_json(\n additional_keys[1].take(),\n &rel.target.schema.key_fields,\n )?;\n append_delete_rel(&mut cypher, data_coll, &delete.key, &src_key, &tgt_key)?;\n register_gc_node(&mut gc_info, &rel.source.schema, src_key);\n register_gc_node(&mut gc_info, &rel.target.schema, tgt_key);\n }\n }\n\n for node_mutation in nodes_mutations.iter() {\n let data_coll = &node_mutation.export_context.analyzed_data_coll;\n // Deletes for nodes\n for delete in node_mutation.mutation.deletes.iter() {\n append_delete_node(&mut cypher, data_coll, &delete.key)?;\n resolve_gc_node(&mut gc_info, &data_coll.schema, &delete.key);\n }\n\n // Upserts for nodes\n for upsert in node_mutation.mutation.upserts.iter() {\n append_upsert_node(&mut cypher, data_coll, upsert)?;\n resolve_gc_node(&mut gc_info, &data_coll.schema, &upsert.key);\n }\n }\n // Upserts for relationships\n for rel_mutation in rel_mutations.iter() {\n let data_coll = &rel_mutation.export_context.analyzed_data_coll;\n for upsert in rel_mutation.mutation.upserts.iter() {\n append_upsert_rel(&mut cypher, data_coll, upsert)?;\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n resolve_gc_node(\n &mut gc_info,\n &rel.source.schema,\n &(rel.source.fields_input_idx).extract_key(&upsert.value.fields)?,\n );\n resolve_gc_node(\n &mut gc_info,\n &rel.target.schema,\n &(rel.target.fields_input_idx).extract_key(&upsert.value.fields)?,\n );\n }\n }\n\n // GC orphaned nodes\n for info in gc_info.into_values() {\n for key in info.keys {\n append_maybe_gc_node(&mut cypher, &info.schema, &key)?;\n }\n }\n\n writeln!(cypher.query_mut(), \"COMMIT;\")?;\n kuzu_client.run_cypher(cypher).await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n let mut changes_by_conn = IndexMap::new();\n for change in changes.into_iter() {\n changes_by_conn\n .entry(change.key.connection.clone())\n .or_insert_with(Vec::new)\n .push(change);\n }\n for (conn, changes) in changes_by_conn.into_iter() {\n let conn_spec = context.auth_registry.get::(&conn)?;\n let kuzu_client = KuzuThinClient::new(&conn_spec, self.reqwest_client.clone());\n\n let (node_changes, rel_changes): (Vec<_>, Vec<_>) =\n changes.into_iter().partition(|c| match &c.key.typ {\n ElementType::Node(_) => true,\n ElementType::Relationship(_) => false,\n });\n\n let mut partial_affected_node_tables = IndexSet::new();\n let mut cypher = CypherBuilder::new();\n // Relationships first when dropping.\n for change in rel_changes.iter().chain(node_changes.iter()) {\n if !change.setup_status.actions.drop_existing {\n continue;\n }\n append_drop_table(&mut cypher, change.setup_status, &change.key.typ)?;\n\n partial_affected_node_tables.extend(\n change\n .setup_status\n .drop_affected_referenced_node_tables\n .iter(),\n );\n if let ElementType::Node(label) = &change.key.typ {\n partial_affected_node_tables.swap_remove(label);\n }\n }\n // Nodes first when creating.\n for change in node_changes.iter().chain(rel_changes.iter()) {\n append_upsert_table(&mut cypher, change.setup_status, &change.key.typ)?;\n }\n\n for table in partial_affected_node_tables {\n append_delete_orphaned_nodes(&mut cypher, table)?;\n }\n\n kuzu_client.run_cypher(cypher).await?;\n }\n Ok(())\n }\n}\n\npub fn register(\n registry: &mut ExecutorFactoryRegistry,\n reqwest_client: reqwest::Client,\n) -> Result<()> {\n Factory { reqwest_client }.register(registry)\n}\n"], ["/cocoindex/src/builder/exec_ctx.rs", "use crate::prelude::*;\n\nuse crate::execution::db_tracking_setup;\nuse crate::ops::get_executor_factory;\nuse crate::ops::interface::SetupStateCompatibility;\n\npub struct ImportOpExecutionContext {\n pub source_id: i32,\n}\n\npub struct ExportOpExecutionContext {\n pub target_id: i32,\n}\n\npub struct FlowSetupExecutionContext {\n pub setup_state: setup::FlowSetupState,\n pub import_ops: Vec,\n pub export_ops: Vec,\n}\n\npub struct AnalyzedTargetSetupState {\n pub target_kind: String,\n pub setup_key: serde_json::Value,\n pub desired_setup_state: serde_json::Value,\n pub setup_by_user: bool,\n}\n\npub struct AnalyzedSetupState {\n pub targets: Vec,\n pub declarations: Vec,\n}\n\nfn build_import_op_exec_ctx(\n import_field_name: &spec::FieldName,\n import_op_output_type: &schema::EnrichedValueType,\n existing_source_states: Option<&Vec<&setup::SourceSetupState>>,\n metadata: &mut setup::FlowSetupMetadata,\n) -> Result {\n let key_schema_no_attrs = import_op_output_type\n .typ\n .key_type()\n .ok_or_else(|| api_error!(\"Source must produce a type with key\"))?\n .typ\n .without_attrs();\n\n let existing_source_ids = existing_source_states\n .iter()\n .flat_map(|v| v.iter())\n .filter_map(|state| {\n if state.key_schema == key_schema_no_attrs {\n Some(state.source_id)\n } else {\n None\n }\n })\n .collect::>();\n let source_id = if existing_source_ids.len() == 1 {\n existing_source_ids.into_iter().next().unwrap()\n } else {\n if existing_source_ids.len() > 1 {\n warn!(\"Multiple source states with the same key schema found\");\n }\n metadata.last_source_id += 1;\n metadata.last_source_id\n };\n metadata.sources.insert(\n import_field_name.clone(),\n setup::SourceSetupState {\n source_id,\n key_schema: key_schema_no_attrs,\n },\n );\n Ok(ImportOpExecutionContext { source_id })\n}\n\nfn build_target_id(\n analyzed_target_ss: &AnalyzedTargetSetupState,\n existing_target_states: &HashMap<&setup::ResourceIdentifier, Vec<&setup::TargetSetupState>>,\n flow_setup_state: &mut setup::FlowSetupState,\n) -> Result {\n let interface::ExecutorFactory::ExportTarget(target_factory) =\n get_executor_factory(&analyzed_target_ss.target_kind)?\n else {\n api_bail!(\n \"`{}` is not a export target op\",\n analyzed_target_ss.target_kind\n )\n };\n\n let resource_id = setup::ResourceIdentifier {\n key: analyzed_target_ss.setup_key.clone(),\n target_kind: analyzed_target_ss.target_kind.clone(),\n };\n let existing_target_states = existing_target_states.get(&resource_id);\n let mut compatible_target_ids = HashSet::>::new();\n let mut reusable_schema_version_ids = HashSet::>::new();\n for existing_state in existing_target_states.iter().flat_map(|v| v.iter()) {\n let compatibility =\n if analyzed_target_ss.setup_by_user == existing_state.common.setup_by_user {\n target_factory.check_state_compatibility(\n &analyzed_target_ss.desired_setup_state,\n &existing_state.state,\n )?\n } else {\n SetupStateCompatibility::NotCompatible\n };\n let compatible_target_id = if compatibility != SetupStateCompatibility::NotCompatible {\n reusable_schema_version_ids.insert(\n (compatibility == SetupStateCompatibility::Compatible)\n .then_some(existing_state.common.schema_version_id),\n );\n Some(existing_state.common.target_id)\n } else {\n None\n };\n compatible_target_ids.insert(compatible_target_id);\n }\n\n let target_id = if compatible_target_ids.len() == 1 {\n compatible_target_ids.into_iter().next().flatten()\n } else {\n if compatible_target_ids.len() > 1 {\n warn!(\"Multiple target states with the same key schema found\");\n }\n None\n };\n let target_id = target_id.unwrap_or_else(|| {\n flow_setup_state.metadata.last_target_id += 1;\n flow_setup_state.metadata.last_target_id\n });\n let max_schema_version_id = existing_target_states\n .iter()\n .flat_map(|v| v.iter())\n .map(|s| s.common.max_schema_version_id)\n .max()\n .unwrap_or(0);\n let schema_version_id = if reusable_schema_version_ids.len() == 1 {\n reusable_schema_version_ids\n .into_iter()\n .next()\n .unwrap()\n .unwrap_or(max_schema_version_id + 1)\n } else {\n max_schema_version_id + 1\n };\n match flow_setup_state.targets.entry(resource_id) {\n indexmap::map::Entry::Occupied(entry) => {\n api_bail!(\n \"Target resource already exists: kind = {}, key = {}\",\n entry.key().target_kind,\n entry.key().key\n );\n }\n indexmap::map::Entry::Vacant(entry) => {\n entry.insert(setup::TargetSetupState {\n common: setup::TargetSetupStateCommon {\n target_id,\n schema_version_id,\n max_schema_version_id: max_schema_version_id.max(schema_version_id),\n setup_by_user: analyzed_target_ss.setup_by_user,\n },\n state: analyzed_target_ss.desired_setup_state.clone(),\n });\n }\n }\n Ok(target_id)\n}\n\npub fn build_flow_setup_execution_context(\n flow_inst: &spec::FlowInstanceSpec,\n data_schema: &schema::FlowSchema,\n analyzed_ss: &AnalyzedSetupState,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n) -> Result {\n let existing_metadata_versions = || {\n existing_flow_ss\n .iter()\n .flat_map(|flow_ss| flow_ss.metadata.possible_versions())\n };\n\n let mut source_states_by_name = HashMap::<&str, Vec<&setup::SourceSetupState>>::new();\n for metadata_version in existing_metadata_versions() {\n for (source_name, state) in metadata_version.sources.iter() {\n source_states_by_name\n .entry(source_name.as_str())\n .or_default()\n .push(state);\n }\n }\n\n let mut target_states_by_name_type =\n HashMap::<&setup::ResourceIdentifier, Vec<&setup::TargetSetupState>>::new();\n for metadata_version in existing_flow_ss.iter() {\n for (resource_id, target) in metadata_version.targets.iter() {\n target_states_by_name_type\n .entry(resource_id)\n .or_default()\n .extend(target.possible_versions());\n }\n }\n\n let mut setup_state = setup::FlowSetupState:: {\n seen_flow_metadata_version: existing_flow_ss\n .and_then(|flow_ss| flow_ss.seen_flow_metadata_version),\n metadata: setup::FlowSetupMetadata {\n last_source_id: existing_metadata_versions()\n .map(|metadata| metadata.last_source_id)\n .max()\n .unwrap_or(0),\n last_target_id: existing_metadata_versions()\n .map(|metadata| metadata.last_target_id)\n .max()\n .unwrap_or(0),\n sources: BTreeMap::new(),\n },\n tracking_table: db_tracking_setup::TrackingTableSetupState {\n table_name: existing_flow_ss\n .and_then(|flow_ss| {\n flow_ss\n .tracking_table\n .current\n .as_ref()\n .map(|v| v.table_name.clone())\n })\n .unwrap_or_else(|| db_tracking_setup::default_tracking_table_name(&flow_inst.name)),\n version_id: db_tracking_setup::CURRENT_TRACKING_TABLE_VERSION,\n },\n targets: IndexMap::new(),\n };\n\n let import_op_exec_ctx = flow_inst\n .import_ops\n .iter()\n .map(|import_op| {\n let output_type = data_schema\n .root_op_scope\n .op_output_types\n .get(&import_op.name)\n .ok_or_else(invariance_violation)?;\n build_import_op_exec_ctx(\n &import_op.name,\n output_type,\n source_states_by_name.get(&import_op.name.as_str()),\n &mut setup_state.metadata,\n )\n })\n .collect::>>()?;\n\n let export_op_exec_ctx = analyzed_ss\n .targets\n .iter()\n .map(|analyzed_target_ss| {\n let target_id = build_target_id(\n analyzed_target_ss,\n &target_states_by_name_type,\n &mut setup_state,\n )?;\n Ok(ExportOpExecutionContext { target_id })\n })\n .collect::>>()?;\n\n for analyzed_target_ss in analyzed_ss.declarations.iter() {\n build_target_id(\n analyzed_target_ss,\n &target_states_by_name_type,\n &mut setup_state,\n )?;\n }\n\n Ok(FlowSetupExecutionContext {\n setup_state,\n import_ops: import_op_exec_ctx,\n export_ops: export_op_exec_ctx,\n })\n}\n"], ["/cocoindex/src/setup/components.rs", "use super::{CombinedState, ResourceSetupStatus, SetupChangeType, StateChange};\nuse crate::prelude::*;\nuse std::fmt::Debug;\n\npub trait State: Debug + Send + Sync {\n fn key(&self) -> Key;\n}\n\n#[async_trait]\npub trait SetupOperator: 'static + Send + Sync {\n type Key: Debug + Hash + Eq + Clone + Send + Sync;\n type State: State;\n type SetupState: Send + Sync + IntoIterator;\n type Context: Sync;\n\n fn describe_key(&self, key: &Self::Key) -> String;\n\n fn describe_state(&self, state: &Self::State) -> String;\n\n fn is_up_to_date(&self, current: &Self::State, desired: &Self::State) -> bool;\n\n async fn create(&self, state: &Self::State, context: &Self::Context) -> Result<()>;\n\n async fn delete(&self, key: &Self::Key, context: &Self::Context) -> Result<()>;\n\n async fn update(&self, state: &Self::State, context: &Self::Context) -> Result<()> {\n self.delete(&state.key(), context).await?;\n self.create(state, context).await\n }\n}\n\n#[derive(Debug)]\nstruct CompositeStateUpsert {\n state: S,\n already_exists: bool,\n}\n\n#[derive(Derivative)]\n#[derivative(Debug)]\npub struct SetupStatus {\n #[derivative(Debug = \"ignore\")]\n desc: D,\n keys_to_delete: IndexSet,\n states_to_upsert: Vec>,\n}\n\nimpl SetupStatus {\n pub fn create(\n desc: D,\n desired: Option,\n existing: CombinedState,\n ) -> Result {\n let existing_component_states = CombinedState {\n current: existing.current.map(|s| {\n s.into_iter()\n .map(|s| (s.key(), s))\n .collect::>()\n }),\n staging: existing\n .staging\n .into_iter()\n .map(|s| match s {\n StateChange::Delete => StateChange::Delete,\n StateChange::Upsert(s) => {\n StateChange::Upsert(s.into_iter().map(|s| (s.key(), s)).collect())\n }\n })\n .collect(),\n legacy_state_key: existing.legacy_state_key,\n };\n let mut keys_to_delete = IndexSet::new();\n let mut states_to_upsert = vec![];\n\n // Collect all existing component keys\n for c in existing_component_states.possible_versions() {\n keys_to_delete.extend(c.keys().cloned());\n }\n\n if let Some(desired_state) = desired {\n for desired_comp_state in desired_state {\n let key = desired_comp_state.key();\n\n // Remove keys that should be kept from deletion list\n keys_to_delete.shift_remove(&key);\n\n // Add components that need to be updated\n let is_up_to_date = existing_component_states.always_exists()\n && existing_component_states.possible_versions().all(|v| {\n v.get(&key)\n .is_some_and(|s| desc.is_up_to_date(s, &desired_comp_state))\n });\n if !is_up_to_date {\n let already_exists = existing_component_states\n .possible_versions()\n .any(|v| v.contains_key(&key));\n states_to_upsert.push(CompositeStateUpsert {\n state: desired_comp_state,\n already_exists,\n });\n }\n }\n }\n\n Ok(Self {\n desc,\n keys_to_delete,\n states_to_upsert,\n })\n }\n}\n\nimpl ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n\n for key in &self.keys_to_delete {\n result.push(setup::ChangeDescription::Action(format!(\n \"Delete {}\",\n self.desc.describe_key(key)\n )));\n }\n\n for state in &self.states_to_upsert {\n result.push(setup::ChangeDescription::Action(format!(\n \"{} {}\",\n if state.already_exists {\n \"Update\"\n } else {\n \"Create\"\n },\n self.desc.describe_state(&state.state)\n )));\n }\n\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n if self.keys_to_delete.is_empty() && self.states_to_upsert.is_empty() {\n SetupChangeType::NoChange\n } else if self.keys_to_delete.is_empty() {\n SetupChangeType::Create\n } else if self.states_to_upsert.is_empty() {\n SetupChangeType::Delete\n } else {\n SetupChangeType::Update\n }\n }\n}\n\npub async fn apply_component_changes(\n changes: Vec<&SetupStatus>,\n context: &D::Context,\n) -> Result<()> {\n // First delete components that need to be removed\n for change in changes.iter() {\n for key in &change.keys_to_delete {\n change.desc.delete(key, context).await?;\n }\n }\n\n // Then upsert components that need to be updated\n for change in changes.iter() {\n for state in &change.states_to_upsert {\n if state.already_exists {\n change.desc.update(&state.state, context).await?;\n } else {\n change.desc.create(&state.state, context).await?;\n }\n }\n }\n\n Ok(())\n}\n\nimpl ResourceSetupStatus for (A, B) {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n result.extend(self.0.describe_changes());\n result.extend(self.1.describe_changes());\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n match (self.0.change_type(), self.1.change_type()) {\n (SetupChangeType::Invalid, _) | (_, SetupChangeType::Invalid) => {\n SetupChangeType::Invalid\n }\n (SetupChangeType::NoChange, b) => b,\n (a, _) => a,\n }\n }\n}\n"], ["/cocoindex/src/ops/targets/qdrant.rs", "use crate::ops::sdk::*;\nuse crate::prelude::*;\n\nuse std::fmt::Display;\n\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::setup;\nuse qdrant_client::Qdrant;\nuse qdrant_client::qdrant::{\n CreateCollectionBuilder, DeletePointsBuilder, DenseVector, Distance, MultiDenseVector,\n MultiVectorComparator, MultiVectorConfigBuilder, NamedVectors, PointId, PointStruct,\n PointsIdsList, UpsertPointsBuilder, Value as QdrantValue, Vector as QdrantVector,\n VectorParamsBuilder, VectorsConfigBuilder,\n};\n\nconst DEFAULT_VECTOR_SIMILARITY_METRIC: spec::VectorSimilarityMetric =\n spec::VectorSimilarityMetric::CosineSimilarity;\nconst DEFAULT_URL: &str = \"http://localhost:6334/\";\n\n////////////////////////////////////////////////////////////\n// Public Types\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n grpc_url: String,\n api_key: Option,\n}\n\n#[derive(Debug, Deserialize, Clone)]\nstruct Spec {\n connection: Option>,\n collection_name: String,\n}\n\n////////////////////////////////////////////////////////////\n// Common\n////////////////////////////////////////////////////////////\n\nstruct FieldInfo {\n field_schema: schema::FieldSchema,\n vector_shape: Option,\n}\n\nenum VectorShape {\n Vector(usize),\n MultiVector(usize),\n}\n\nimpl VectorShape {\n fn vector_size(&self) -> usize {\n match self {\n VectorShape::Vector(size) => *size,\n VectorShape::MultiVector(size) => *size,\n }\n }\n\n fn multi_vector_comparator(&self) -> Option {\n match self {\n VectorShape::MultiVector(_) => Some(MultiVectorComparator::MaxSim),\n _ => None,\n }\n }\n}\n\nfn parse_vector_schema_shape(vector_schema: &schema::VectorTypeSchema) -> Option {\n match &*vector_schema.element_type {\n schema::BasicValueType::Float32\n | schema::BasicValueType::Float64\n | schema::BasicValueType::Int64 => vector_schema.dimension.map(VectorShape::Vector),\n\n schema::BasicValueType::Vector(nested_vector_schema) => {\n match parse_vector_schema_shape(nested_vector_schema) {\n Some(VectorShape::Vector(dim)) => Some(VectorShape::MultiVector(dim)),\n _ => None,\n }\n }\n _ => None,\n }\n}\n\nfn parse_vector_shape(typ: &schema::ValueType) -> Option {\n match typ {\n schema::ValueType::Basic(schema::BasicValueType::Vector(vector_schema)) => {\n parse_vector_schema_shape(vector_schema)\n }\n _ => None,\n }\n}\n\nfn encode_dense_vector(v: &BasicValue) -> Result {\n let vec = match v {\n BasicValue::Vector(v) => v\n .iter()\n .map(|elem| {\n Ok(match elem {\n BasicValue::Float32(f) => *f,\n BasicValue::Float64(f) => *f as f32,\n BasicValue::Int64(i) => *i as f32,\n _ => bail!(\"Unsupported vector type: {:?}\", elem.kind()),\n })\n })\n .collect::>>()?,\n _ => bail!(\"Expected a vector field, got {:?}\", v),\n };\n Ok(vec.into())\n}\n\nfn encode_multi_dense_vector(v: &BasicValue) -> Result {\n let vecs = match v {\n BasicValue::Vector(v) => v\n .iter()\n .map(encode_dense_vector)\n .collect::>>()?,\n _ => bail!(\"Expected a vector field, got {:?}\", v),\n };\n Ok(vecs.into())\n}\n\nfn embedding_metric_to_qdrant(metric: spec::VectorSimilarityMetric) -> Result {\n Ok(match metric {\n spec::VectorSimilarityMetric::CosineSimilarity => Distance::Cosine,\n spec::VectorSimilarityMetric::L2Distance => Distance::Euclid,\n spec::VectorSimilarityMetric::InnerProduct => Distance::Dot,\n })\n}\n\n////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct CollectionKey {\n connection: Option>,\n collection_name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\nstruct VectorDef {\n vector_size: usize,\n metric: spec::VectorSimilarityMetric,\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n multi_vector_comparator: Option,\n}\n#[derive(Debug, Clone, Serialize, Deserialize)]\nstruct SetupState {\n #[serde(default)]\n vectors: BTreeMap,\n\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n unsupported_vector_fields: Vec<(String, ValueType)>,\n}\n\n#[derive(Debug)]\nstruct SetupStatus {\n delete_collection: bool,\n add_collection: Option,\n}\n\nimpl setup::ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n if self.delete_collection {\n result.push(setup::ChangeDescription::Action(\n \"Delete collection\".to_string(),\n ));\n }\n if let Some(add_collection) = &self.add_collection {\n let vector_descriptions = add_collection\n .vectors\n .iter()\n .map(|(name, vector_def)| {\n format!(\n \"{}[{}], {}\",\n name, vector_def.vector_size, vector_def.metric\n )\n })\n .collect::>()\n .join(\"; \");\n result.push(setup::ChangeDescription::Action(format!(\n \"Create collection{}\",\n if vector_descriptions.is_empty() {\n \"\".to_string()\n } else {\n format!(\" with vectors: {vector_descriptions}\")\n }\n )));\n for (name, schema) in add_collection.unsupported_vector_fields.iter() {\n result.push(setup::ChangeDescription::Note(format!(\n \"Field `{}` has type `{}`. Only number vector with fixed size is supported by Qdrant. It will be stored in payload.\",\n name, schema\n )));\n }\n }\n result\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n match (self.delete_collection, self.add_collection.is_some()) {\n (false, false) => setup::SetupChangeType::NoChange,\n (false, true) => setup::SetupChangeType::Create,\n (true, false) => setup::SetupChangeType::Delete,\n (true, true) => setup::SetupChangeType::Update,\n }\n }\n}\n\nimpl SetupStatus {\n async fn apply_delete(&self, collection_name: &String, qdrant_client: &Qdrant) -> Result<()> {\n if self.delete_collection {\n qdrant_client.delete_collection(collection_name).await?;\n }\n Ok(())\n }\n\n async fn apply_create(&self, collection_name: &String, qdrant_client: &Qdrant) -> Result<()> {\n if let Some(add_collection) = &self.add_collection {\n let mut builder = CreateCollectionBuilder::new(collection_name);\n if !add_collection.vectors.is_empty() {\n let mut vectors_config = VectorsConfigBuilder::default();\n for (name, vector_def) in add_collection.vectors.iter() {\n let mut params = VectorParamsBuilder::new(\n vector_def.vector_size as u64,\n embedding_metric_to_qdrant(vector_def.metric)?,\n );\n if let Some(multi_vector_comparator) = &vector_def.multi_vector_comparator {\n params = params.multivector_config(MultiVectorConfigBuilder::new(\n MultiVectorComparator::from_str_name(multi_vector_comparator)\n .ok_or_else(|| {\n anyhow!(\n \"unrecognized multi vector comparator: {}\",\n multi_vector_comparator\n )\n })?,\n ));\n }\n vectors_config.add_named_vector_params(name, params);\n }\n builder = builder.vectors_config(vectors_config);\n }\n qdrant_client.create_collection(builder).await?;\n }\n Ok(())\n }\n}\n\n////////////////////////////////////////////////////////////\n// Deal with mutations\n////////////////////////////////////////////////////////////\n\nstruct ExportContext {\n qdrant_client: Arc,\n collection_name: String,\n fields_info: Vec,\n}\n\nimpl ExportContext {\n async fn apply_mutation(&self, mutation: ExportTargetMutation) -> Result<()> {\n let mut points: Vec = Vec::with_capacity(mutation.upserts.len());\n for upsert in mutation.upserts.iter() {\n let point_id = key_to_point_id(&upsert.key)?;\n let (payload, vectors) = values_to_payload(&upsert.value.fields, &self.fields_info)?;\n\n points.push(PointStruct::new(point_id, vectors, payload));\n }\n\n if !points.is_empty() {\n self.qdrant_client\n .upsert_points(UpsertPointsBuilder::new(&self.collection_name, points).wait(true))\n .await?;\n }\n\n let ids = mutation\n .deletes\n .iter()\n .map(|deletion| key_to_point_id(&deletion.key))\n .collect::>>()?;\n\n if !ids.is_empty() {\n self.qdrant_client\n .delete_points(\n DeletePointsBuilder::new(&self.collection_name)\n .points(PointsIdsList { ids })\n .wait(true),\n )\n .await?;\n }\n\n Ok(())\n }\n}\nfn key_to_point_id(key_value: &KeyValue) -> Result {\n let point_id = match key_value {\n KeyValue::Str(v) => PointId::from(v.to_string()),\n KeyValue::Int64(v) => PointId::from(*v as u64),\n KeyValue::Uuid(v) => PointId::from(v.to_string()),\n e => bail!(\"Invalid Qdrant point ID: {e}\"),\n };\n\n Ok(point_id)\n}\n\nfn values_to_payload(\n value_fields: &[Value],\n fields_info: &[FieldInfo],\n) -> Result<(HashMap, NamedVectors)> {\n let mut payload = HashMap::with_capacity(value_fields.len());\n let mut vectors = NamedVectors::default();\n\n for (value, field_info) in value_fields.iter().zip(fields_info.iter()) {\n let field_name = &field_info.field_schema.name;\n\n match &field_info.vector_shape {\n Some(vector_shape) => {\n if value.is_null() {\n continue;\n }\n let vector: QdrantVector = match value {\n Value::Basic(basic_value) => match vector_shape {\n VectorShape::Vector(_) => encode_dense_vector(&basic_value)?.into(),\n VectorShape::MultiVector(_) => {\n encode_multi_dense_vector(&basic_value)?.into()\n }\n },\n _ => {\n bail!(\"Expected a vector field, got {:?}\", value);\n }\n };\n vectors = vectors.add_vector(field_name.clone(), vector);\n }\n None => {\n let json_value = serde_json::to_value(TypedValue {\n t: &field_info.field_schema.value_type.typ,\n v: value,\n })?;\n payload.insert(field_name.clone(), json_value.into());\n }\n }\n }\n\n Ok((payload, vectors))\n}\n\n////////////////////////////////////////////////////////////\n// Factory implementation\n////////////////////////////////////////////////////////////\n\n#[derive(Default)]\nstruct Factory {\n qdrant_clients: Mutex>, Arc>>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct CollectionId {\n collection_name: String,\n}\n\nimpl Display for CollectionId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.collection_name)?;\n Ok(())\n }\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = ();\n type SetupState = SetupState;\n type SetupStatus = SetupStatus;\n type Key = CollectionKey;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Qdrant\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n _declarations: Vec<()>,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(CollectionKey, SetupState)>,\n )> {\n let data_coll_output = data_collections\n .into_iter()\n .map(|d| {\n if d.key_fields_schema.len() != 1 {\n api_bail!(\n \"Expected one primary key field for the point ID. Got {}.\",\n d.key_fields_schema.len()\n )\n }\n\n let mut fields_info = Vec::::new();\n let mut vector_def = BTreeMap::::new();\n let mut unsupported_vector_fields = Vec::<(String, ValueType)>::new();\n\n for field in d.value_fields_schema.iter() {\n let vector_shape = parse_vector_shape(&field.value_type.typ);\n if let Some(vector_shape) = &vector_shape {\n vector_def.insert(\n field.name.clone(),\n VectorDef {\n vector_size: vector_shape.vector_size(),\n metric: DEFAULT_VECTOR_SIMILARITY_METRIC,\n multi_vector_comparator: vector_shape.multi_vector_comparator().map(|s| s.as_str_name().to_string()),\n },\n );\n } else if matches!(\n &field.value_type.typ,\n schema::ValueType::Basic(schema::BasicValueType::Vector(_))\n ) {\n // This is a vector field but not supported by Qdrant\n unsupported_vector_fields.push((field.name.clone(), field.value_type.typ.clone()));\n }\n fields_info.push(FieldInfo {\n field_schema: field.clone(),\n vector_shape,\n });\n }\n\n let mut specified_vector_fields = HashSet::new();\n for vector_index in d.index_options.vector_indexes {\n match vector_def.get_mut(&vector_index.field_name) {\n Some(vector_def) => {\n if specified_vector_fields.insert(vector_index.field_name.clone()) {\n // Validate the metric is supported by Qdrant\n embedding_metric_to_qdrant(vector_index.metric)\n .with_context(||\n format!(\"Parsing vector index metric {} for field `{}`\", vector_index.metric, vector_index.field_name))?;\n vector_def.metric = vector_index.metric;\n } else {\n api_bail!(\"Field `{}` specified more than once in vector index definition\", vector_index.field_name);\n }\n }\n None => {\n if let Some(field) = d.value_fields_schema.iter().find(|f| f.name == vector_index.field_name) {\n api_bail!(\n \"Field `{}` specified in vector index is expected to be a number vector with fixed size, actual type: {}\",\n vector_index.field_name, field.value_type.typ\n );\n } else {\n api_bail!(\"Field `{}` specified in vector index is not found\", vector_index.field_name);\n }\n }\n }\n }\n\n let export_context = Arc::new(ExportContext {\n qdrant_client: self\n .get_qdrant_client(&d.spec.connection, &context.auth_registry)?,\n collection_name: d.spec.collection_name.clone(),\n fields_info,\n });\n Ok(TypedExportDataCollectionBuildOutput {\n export_context: Box::pin(async move { Ok(export_context) }),\n setup_key: CollectionKey {\n connection: d.spec.connection,\n collection_name: d.spec.collection_name,\n },\n desired_setup_state: SetupState {\n vectors: vector_def,\n unsupported_vector_fields,\n },\n })\n })\n .collect::>>()?;\n Ok((data_coll_output, vec![]))\n }\n\n fn deserialize_setup_key(key: serde_json::Value) -> Result {\n Ok(match key {\n serde_json::Value::String(s) => {\n // For backward compatibility.\n CollectionKey {\n collection_name: s,\n connection: None,\n }\n }\n _ => serde_json::from_value(key)?,\n })\n }\n\n async fn check_setup_status(\n &self,\n _key: CollectionKey,\n desired: Option,\n existing: setup::CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n let desired_exists = desired.is_some();\n let add_collection = desired.filter(|state| {\n !existing.always_exists()\n || existing\n .possible_versions()\n .any(|v| v.vectors != state.vectors)\n });\n let delete_collection = existing.possible_versions().next().is_some()\n && (!desired_exists || add_collection.is_some());\n Ok(SetupStatus {\n delete_collection,\n add_collection,\n })\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(if desired.vectors == existing.vectors {\n SetupStateCompatibility::Compatible\n } else {\n SetupStateCompatibility::NotCompatible\n })\n }\n\n fn describe_resource(&self, key: &CollectionKey) -> Result {\n Ok(format!(\n \"Qdrant collection {}{}\",\n key.collection_name,\n key.connection\n .as_ref()\n .map_or_else(|| \"\".to_string(), |auth_entry| format!(\" @ {auth_entry}\"))\n ))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n for mutation_w_ctx in mutations.into_iter() {\n mutation_w_ctx\n .export_context\n .apply_mutation(mutation_w_ctx.mutation)\n .await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n for setup_change in setup_status.iter() {\n let qdrant_client =\n self.get_qdrant_client(&setup_change.key.connection, &context.auth_registry)?;\n setup_change\n .setup_status\n .apply_delete(&setup_change.key.collection_name, &qdrant_client)\n .await?;\n }\n for setup_change in setup_status.iter() {\n let qdrant_client =\n self.get_qdrant_client(&setup_change.key.connection, &context.auth_registry)?;\n setup_change\n .setup_status\n .apply_create(&setup_change.key.collection_name, &qdrant_client)\n .await?;\n }\n Ok(())\n }\n}\n\nimpl Factory {\n fn new() -> Self {\n Self {\n qdrant_clients: Mutex::new(HashMap::new()),\n }\n }\n\n fn get_qdrant_client(\n &self,\n auth_entry: &Option>,\n auth_registry: &AuthRegistry,\n ) -> Result> {\n let mut clients = self.qdrant_clients.lock().unwrap();\n if let Some(client) = clients.get(auth_entry) {\n return Ok(client.clone());\n }\n\n let spec = auth_entry.as_ref().map_or_else(\n || {\n Ok(ConnectionSpec {\n grpc_url: DEFAULT_URL.to_string(),\n api_key: None,\n })\n },\n |auth_entry| auth_registry.get(auth_entry),\n )?;\n let client = Arc::new(\n Qdrant::from_url(&spec.grpc_url)\n .api_key(spec.api_key)\n .skip_compatibility_check()\n .build()?,\n );\n clients.insert(auth_entry.clone(), client.clone());\n Ok(client)\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory::new().register(registry)\n}\n"], ["/cocoindex/src/execution/db_tracking.rs", "use crate::prelude::*;\n\nuse super::{db_tracking_setup::TrackingTableSetupState, memoization::StoredMemoizationInfo};\nuse crate::utils::{db::WriteAction, fingerprint::Fingerprint};\nuse futures::Stream;\nuse serde::de::{self, Deserializer, SeqAccess, Visitor};\nuse serde::ser::SerializeSeq;\nuse sqlx::PgPool;\nuse std::fmt;\n\n#[derive(Debug, Clone)]\npub struct TrackedTargetKeyInfo {\n pub key: serde_json::Value,\n pub additional_key: serde_json::Value,\n pub process_ordinal: i64,\n pub fingerprint: Option,\n}\n\nimpl Serialize for TrackedTargetKeyInfo {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let mut seq = serializer.serialize_seq(None)?;\n seq.serialize_element(&self.key)?;\n seq.serialize_element(&self.process_ordinal)?;\n seq.serialize_element(&self.fingerprint)?;\n if !self.additional_key.is_null() {\n seq.serialize_element(&self.additional_key)?;\n }\n seq.end()\n }\n}\n\nimpl<'de> serde::Deserialize<'de> for TrackedTargetKeyInfo {\n fn deserialize(deserializer: D) -> Result\n where\n D: Deserializer<'de>,\n {\n struct TrackedTargetKeyVisitor;\n\n impl<'de> Visitor<'de> for TrackedTargetKeyVisitor {\n type Value = TrackedTargetKeyInfo;\n\n fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n formatter.write_str(\"a sequence of 3 or 4 elements for TrackedTargetKey\")\n }\n\n fn visit_seq(self, mut seq: A) -> Result\n where\n A: SeqAccess<'de>,\n {\n let target_key: serde_json::Value = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(0, &self))?;\n let process_ordinal: i64 = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(1, &self))?;\n let fingerprint: Option = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(2, &self))?;\n let additional_key: Option = seq.next_element()?;\n\n Ok(TrackedTargetKeyInfo {\n key: target_key,\n process_ordinal,\n fingerprint,\n additional_key: additional_key.unwrap_or(serde_json::Value::Null),\n })\n }\n }\n\n deserializer.deserialize_seq(TrackedTargetKeyVisitor)\n }\n}\n\n/// (source_id, target_key)\npub type TrackedTargetKeyForSource = Vec<(i32, Vec)>;\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForProcessing {\n pub memoization_info: Option>>,\n\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub max_process_ordinal: Option,\n pub process_ordinal: Option,\n}\n\npub async fn read_source_tracking_info_for_processing(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n pool: &PgPool,\n) -> Result> {\n let query_str = format!(\n \"SELECT memoization_info, processed_source_ordinal, process_logic_fingerprint, max_process_ordinal, process_ordinal FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(pool)\n .await?;\n\n Ok(tracking_info)\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForPrecommit {\n pub max_process_ordinal: i64,\n pub staging_target_keys: sqlx::types::Json,\n\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub process_ordinal: Option,\n pub target_keys: Option>,\n}\n\npub async fn read_source_tracking_info_for_precommit(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT max_process_ordinal, staging_target_keys, processed_source_ordinal, process_logic_fingerprint, process_ordinal, target_keys FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let precommit_tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(db_executor)\n .await?;\n\n Ok(precommit_tracking_info)\n}\n\n#[allow(clippy::too_many_arguments)]\npub async fn precommit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n max_process_ordinal: i64,\n staging_target_keys: TrackedTargetKeyForSource,\n memoization_info: Option<&StoredMemoizationInfo>,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {} (source_id, source_key, max_process_ordinal, staging_target_keys, memoization_info) VALUES ($1, $2, $3, $4, $5)\",\n db_setup.table_name\n ),\n WriteAction::Update => format!(\n \"UPDATE {} SET max_process_ordinal = $3, staging_target_keys = $4, memoization_info = $5 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n ),\n };\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(max_process_ordinal) // $3\n .bind(sqlx::types::Json(staging_target_keys)) // $4\n .bind(memoization_info.map(sqlx::types::Json)) // $5\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForCommit {\n pub staging_target_keys: sqlx::types::Json,\n pub process_ordinal: Option,\n}\n\npub async fn read_source_tracking_info_for_commit(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT staging_target_keys, process_ordinal FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let commit_tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(db_executor)\n .await?;\n Ok(commit_tracking_info)\n}\n\n#[allow(clippy::too_many_arguments)]\npub async fn commit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n staging_target_keys: TrackedTargetKeyForSource,\n processed_source_ordinal: Option,\n logic_fingerprint: &[u8],\n process_ordinal: i64,\n process_time_micros: i64,\n target_keys: TrackedTargetKeyForSource,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {} ( \\\n source_id, source_key, \\\n max_process_ordinal, staging_target_keys, \\\n processed_source_ordinal, process_logic_fingerprint, process_ordinal, process_time_micros, target_keys) \\\n VALUES ($1, $2, $6 + 1, $3, $4, $5, $6, $7, $8)\",\n db_setup.table_name\n ),\n WriteAction::Update => format!(\n \"UPDATE {} SET staging_target_keys = $3, processed_source_ordinal = $4, process_logic_fingerprint = $5, process_ordinal = $6, process_time_micros = $7, target_keys = $8 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n ),\n };\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(sqlx::types::Json(staging_target_keys)) // $3\n .bind(processed_source_ordinal) // $4\n .bind(logic_fingerprint) // $5\n .bind(process_ordinal) // $6\n .bind(process_time_micros) // $7\n .bind(sqlx::types::Json(target_keys)) // $8\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\npub async fn delete_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"DELETE FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n sqlx::query(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct TrackedSourceKeyMetadata {\n pub source_key: serde_json::Value,\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n}\n\npub struct ListTrackedSourceKeyMetadataState {\n query_str: String,\n}\n\nimpl ListTrackedSourceKeyMetadataState {\n pub fn new() -> Self {\n Self {\n query_str: String::new(),\n }\n }\n\n pub fn list<'a>(\n &'a mut self,\n source_id: i32,\n db_setup: &'a TrackingTableSetupState,\n pool: &'a PgPool,\n ) -> impl Stream> + 'a {\n self.query_str = format!(\n \"SELECT source_key, processed_source_ordinal, process_logic_fingerprint FROM {} WHERE source_id = $1\",\n db_setup.table_name\n );\n sqlx::query_as(&self.query_str).bind(source_id).fetch(pool)\n }\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceLastProcessedInfo {\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub process_time_micros: Option,\n}\n\npub async fn read_source_last_processed_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n pool: &PgPool,\n) -> Result> {\n let query_str = format!(\n \"SELECT processed_source_ordinal, process_logic_fingerprint, process_time_micros FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let last_processed_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(pool)\n .await?;\n Ok(last_processed_info)\n}\n\npub async fn update_source_tracking_ordinal(\n source_id: i32,\n source_key_json: &serde_json::Value,\n processed_source_ordinal: Option,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"UPDATE {} SET processed_source_ordinal = $3 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(processed_source_ordinal) // $3\n .execute(db_executor)\n .await?;\n Ok(())\n}\n"], ["/cocoindex/src/ops/py_factory.rs", "use crate::prelude::*;\n\nuse pyo3::{\n IntoPyObjectExt, Py, PyAny, Python, pyclass, pymethods,\n types::{IntoPyDict, PyList, PyString, PyTuple},\n};\nuse pythonize::{depythonize, pythonize};\n\nuse crate::{\n base::{schema, value},\n builder::plan,\n ops::sdk::SetupStateCompatibility,\n py::{self, ToResultWithPyTrace},\n};\nuse anyhow::{Result, anyhow};\n\n#[pyclass(name = \"OpArgSchema\")]\npub struct PyOpArgSchema {\n value_type: crate::py::Pythonized,\n analyzed_value: crate::py::Pythonized,\n}\n\n#[pymethods]\nimpl PyOpArgSchema {\n #[getter]\n fn value_type(&self) -> &crate::py::Pythonized {\n &self.value_type\n }\n\n #[getter]\n fn analyzed_value(&self) -> &crate::py::Pythonized {\n &self.analyzed_value\n }\n}\n\nstruct PyFunctionExecutor {\n py_function_executor: Py,\n py_exec_ctx: Arc,\n\n num_positional_args: usize,\n kw_args_names: Vec>,\n result_type: schema::EnrichedValueType,\n\n enable_cache: bool,\n behavior_version: Option,\n}\n\nimpl PyFunctionExecutor {\n fn call_py_fn<'py>(\n &self,\n py: Python<'py>,\n input: Vec,\n ) -> Result> {\n let mut args = Vec::with_capacity(self.num_positional_args);\n for v in input[0..self.num_positional_args].iter() {\n args.push(py::value_to_py_object(py, v)?);\n }\n\n let kwargs = if self.kw_args_names.is_empty() {\n None\n } else {\n let mut kwargs = Vec::with_capacity(self.kw_args_names.len());\n for (name, v) in self\n .kw_args_names\n .iter()\n .zip(input[self.num_positional_args..].iter())\n {\n kwargs.push((name.bind(py), py::value_to_py_object(py, v)?));\n }\n Some(kwargs)\n };\n\n let result = self\n .py_function_executor\n .call(\n py,\n PyTuple::new(py, args.into_iter())?,\n kwargs\n .map(|kwargs| -> Result<_> { Ok(kwargs.into_py_dict(py)?) })\n .transpose()?\n .as_ref(),\n )\n .to_result_with_py_trace(py)?;\n Ok(result.into_bound(py))\n }\n}\n\n#[async_trait]\nimpl interface::SimpleFunctionExecutor for Arc {\n async fn evaluate(&self, input: Vec) -> Result {\n let self = self.clone();\n let result_fut = Python::with_gil(|py| -> Result<_> {\n let result_coro = self.call_py_fn(py, input)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(self.py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro,\n )?)\n })?;\n let result = result_fut.await;\n Python::with_gil(|py| -> Result<_> {\n let result = result.to_result_with_py_trace(py)?;\n Ok(py::value_from_py_object(\n &self.result_type.typ,\n &result.into_bound(py),\n )?)\n })\n }\n\n fn enable_cache(&self) -> bool {\n self.enable_cache\n }\n\n fn behavior_version(&self) -> Option {\n self.behavior_version\n }\n}\n\npub(crate) struct PyFunctionFactory {\n pub py_function_factory: Py,\n}\n\n#[async_trait]\nimpl interface::SimpleFunctionFactory for PyFunctionFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n schema::EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let (result_type, executor, kw_args_names, num_positional_args) =\n Python::with_gil(|py| -> anyhow::Result<_> {\n let mut args = vec![pythonize(py, &spec)?];\n let mut kwargs = vec![];\n let mut num_positional_args = 0;\n for arg in input_schema.into_iter() {\n let py_arg_schema = PyOpArgSchema {\n value_type: crate::py::Pythonized(arg.value_type.clone()),\n analyzed_value: crate::py::Pythonized(arg.analyzed_value.clone()),\n };\n match arg.name.0 {\n Some(name) => {\n kwargs.push((name.clone(), py_arg_schema));\n }\n None => {\n args.push(py_arg_schema.into_bound_py_any(py)?);\n num_positional_args += 1;\n }\n }\n }\n\n let kw_args_names = kwargs\n .iter()\n .map(|(name, _)| PyString::new(py, name).unbind())\n .collect::>();\n let result = self\n .py_function_factory\n .call(\n py,\n PyTuple::new(py, args.into_iter())?,\n Some(&kwargs.into_py_dict(py)?),\n )\n .to_result_with_py_trace(py)?;\n let (result_type, executor) = result\n .extract::<(crate::py::Pythonized, Py)>(py)?;\n Ok((\n result_type.into_inner(),\n executor,\n kw_args_names,\n num_positional_args,\n ))\n })?;\n\n let executor_fut = {\n let result_type = result_type.clone();\n async move {\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n let (prepare_fut, enable_cache, behavior_version) =\n Python::with_gil(|py| -> anyhow::Result<_> {\n let prepare_coro = executor\n .call_method(py, \"prepare\", (), None)\n .to_result_with_py_trace(py)?;\n let prepare_fut = pyo3_async_runtimes::into_future_with_locals(\n &pyo3_async_runtimes::TaskLocals::new(\n py_exec_ctx.event_loop.bind(py).clone(),\n ),\n prepare_coro.into_bound(py),\n )?;\n let enable_cache = executor\n .call_method(py, \"enable_cache\", (), None)\n .to_result_with_py_trace(py)?\n .extract::(py)?;\n let behavior_version = executor\n .call_method(py, \"behavior_version\", (), None)\n .to_result_with_py_trace(py)?\n .extract::>(py)?;\n Ok((prepare_fut, enable_cache, behavior_version))\n })?;\n prepare_fut.await?;\n Ok(Box::new(Arc::new(PyFunctionExecutor {\n py_function_executor: executor,\n py_exec_ctx,\n num_positional_args,\n kw_args_names,\n result_type,\n enable_cache,\n behavior_version,\n }))\n as Box)\n }\n };\n\n Ok((result_type, executor_fut.boxed()))\n }\n}\n\npub(crate) struct PyExportTargetFactory {\n pub py_target_connector: Py,\n}\n\nstruct PyTargetExecutorContext {\n py_export_ctx: Py,\n py_exec_ctx: Arc,\n}\n\n#[derive(Debug)]\nstruct PyTargetResourceSetupStatus {\n stale_existing_states: IndexSet>,\n desired_state: Option,\n}\n\nimpl setup::ResourceSetupStatus for PyTargetResourceSetupStatus {\n fn describe_changes(&self) -> Vec {\n vec![]\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n if self.stale_existing_states.is_empty() {\n setup::SetupChangeType::NoChange\n } else if self.desired_state.is_some() {\n if self\n .stale_existing_states\n .iter()\n .any(|state| state.is_none())\n {\n setup::SetupChangeType::Create\n } else {\n setup::SetupChangeType::Update\n }\n } else {\n setup::SetupChangeType::Delete\n }\n }\n}\n\n#[async_trait]\nimpl interface::ExportTargetFactory for PyExportTargetFactory {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )> {\n if declarations.len() != 0 {\n api_error!(\"Custom target connector doesn't support declarations yet\");\n }\n\n let mut build_outputs = Vec::with_capacity(data_collections.len());\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n for data_collection in data_collections.into_iter() {\n let (py_export_ctx, persistent_key) =\n Python::with_gil(|py| -> Result<(Py, serde_json::Value)> {\n // Deserialize the spec to Python object.\n let py_export_ctx = self\n .py_target_connector\n .call_method(\n py,\n \"create_export_context\",\n (\n &data_collection.name,\n pythonize(py, &data_collection.spec)?,\n pythonize(py, &data_collection.key_fields_schema)?,\n pythonize(py, &data_collection.value_fields_schema)?,\n ),\n None,\n )\n .to_result_with_py_trace(py)?;\n\n // Call the `get_persistent_key` method to get the persistent key.\n let persistent_key = self\n .py_target_connector\n .call_method(py, \"get_persistent_key\", (&py_export_ctx,), None)\n .to_result_with_py_trace(py)?;\n let persistent_key = depythonize(&persistent_key.into_bound(py))?;\n Ok((py_export_ctx, persistent_key))\n })?;\n\n let py_exec_ctx = py_exec_ctx.clone();\n let build_output = interface::ExportDataCollectionBuildOutput {\n export_context: Box::pin(async move {\n Ok(Arc::new(PyTargetExecutorContext {\n py_export_ctx,\n py_exec_ctx,\n }) as Arc)\n }),\n setup_key: persistent_key,\n desired_setup_state: data_collection.spec,\n };\n build_outputs.push(build_output);\n }\n Ok((build_outputs, vec![]))\n }\n\n async fn check_setup_status(\n &self,\n _key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n _context: Arc,\n ) -> Result> {\n // Collect all possible existing states that are not the desired state.\n let mut stale_existing_states = IndexSet::new();\n if !existing_states.always_exists() && desired_state.is_some() {\n stale_existing_states.insert(None);\n }\n for possible_state in existing_states.possible_versions() {\n if Some(possible_state) != desired_state.as_ref() {\n stale_existing_states.insert(Some(possible_state.clone()));\n }\n }\n\n Ok(Box::new(PyTargetResourceSetupStatus {\n stale_existing_states,\n desired_state,\n }))\n }\n\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result {\n Ok(key.clone())\n }\n\n fn check_state_compatibility(\n &self,\n _desired_state: &serde_json::Value,\n _existing_state: &serde_json::Value,\n ) -> Result {\n // The Python target connector doesn't support state update yet.\n Ok(SetupStateCompatibility::Compatible)\n }\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result {\n Python::with_gil(|py| -> Result {\n let result = self\n .py_target_connector\n .call_method(py, \"describe_resource\", (pythonize(py, key)?,), None)\n .to_result_with_py_trace(py)?;\n let description = result.extract::(py)?;\n Ok(description)\n })\n }\n\n fn extract_additional_key(\n &self,\n _key: &value::KeyValue,\n _value: &value::FieldValues,\n _export_context: &(dyn Any + Send + Sync),\n ) -> Result {\n Ok(serde_json::Value::Null)\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n // Filter the setup changes that are not NoChange, and flatten to\n // `list[tuple[key, list[stale_existing_states | None], desired_state | None]]` for Python.\n let mut setup_changes = Vec::new();\n for item in setup_status.into_iter() {\n let decoded_setup_status = (item.setup_status as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?;\n if ::change_type(decoded_setup_status)\n != setup::SetupChangeType::NoChange\n {\n setup_changes.push((\n item.key,\n &decoded_setup_status.stale_existing_states,\n &decoded_setup_status.desired_state,\n ));\n }\n }\n\n if setup_changes.is_empty() {\n return Ok(());\n }\n\n // Call the `apply_setup_changes_async()` method.\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n let py_result = Python::with_gil(move |py| -> Result<_> {\n let result_coro = self\n .py_target_connector\n .call_method(\n py,\n \"apply_setup_changes_async\",\n (pythonize(py, &setup_changes)?,),\n None,\n )\n .to_result_with_py_trace(py)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro.into_bound(py),\n )?)\n })?\n .await;\n Python::with_gil(move |py| py_result.to_result_with_py_trace(py))?;\n\n Ok(())\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec<\n interface::ExportTargetMutationWithContext<'async_trait, dyn Any + Send + Sync>,\n >,\n ) -> Result<()> {\n if mutations.is_empty() {\n return Ok(());\n }\n\n let py_result = Python::with_gil(|py| -> Result<_> {\n // Create a `list[tuple[export_ctx, list[tuple[key, value | None]]]]` for Python, and collect `py_exec_ctx`.\n let mut py_args = Vec::with_capacity(mutations.len());\n let mut py_exec_ctx: Option<&Arc> = None;\n for mutation in mutations.into_iter() {\n // Downcast export_context to PyTargetExecutorContext.\n let export_context = (mutation.export_context as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?;\n\n let mut flattened_mutations = Vec::with_capacity(\n mutation.mutation.upserts.len() + mutation.mutation.deletes.len(),\n );\n for upsert in mutation.mutation.upserts.into_iter() {\n flattened_mutations.push((\n py::value_to_py_object(py, &upsert.key.into())?,\n py::field_values_to_py_object(py, upsert.value.fields.iter())?,\n ));\n }\n for delete in mutation.mutation.deletes.into_iter() {\n flattened_mutations.push((\n py::value_to_py_object(py, &delete.key.into())?,\n py.None().into_bound(py),\n ));\n }\n py_args.push((\n &export_context.py_export_ctx,\n PyList::new(py, flattened_mutations)?.into_any(),\n ));\n py_exec_ctx = py_exec_ctx.or(Some(&export_context.py_exec_ctx));\n }\n let py_exec_ctx = py_exec_ctx.ok_or_else(invariance_violation)?;\n\n let result_coro = self\n .py_target_connector\n .call_method(py, \"mutate_async\", (py_args,), None)\n .to_result_with_py_trace(py)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro.into_bound(py),\n )?)\n })?\n .await;\n\n Python::with_gil(move |py| py_result.to_result_with_py_trace(py))?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/builder/analyzer.rs", "use crate::builder::exec_ctx::AnalyzedSetupState;\nuse crate::ops::get_executor_factory;\nuse crate::prelude::*;\n\nuse super::plan::*;\nuse crate::lib_context::get_auth_registry;\nuse crate::utils::fingerprint::Fingerprinter;\nuse crate::{\n base::{schema::*, spec::*},\n ops::interface::*,\n};\nuse futures::future::{BoxFuture, try_join3};\nuse futures::{FutureExt, future::try_join_all};\n\n#[derive(Debug)]\npub(super) enum ValueTypeBuilder {\n Basic(BasicValueType),\n Struct(StructSchemaBuilder),\n Table(TableSchemaBuilder),\n}\n\nimpl TryFrom<&ValueType> for ValueTypeBuilder {\n type Error = anyhow::Error;\n\n fn try_from(value_type: &ValueType) -> Result {\n match value_type {\n ValueType::Basic(basic_type) => Ok(ValueTypeBuilder::Basic(basic_type.clone())),\n ValueType::Struct(struct_type) => Ok(ValueTypeBuilder::Struct(struct_type.try_into()?)),\n ValueType::Table(table_type) => Ok(ValueTypeBuilder::Table(table_type.try_into()?)),\n }\n }\n}\n\nimpl TryInto for &ValueTypeBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n match self {\n ValueTypeBuilder::Basic(basic_type) => Ok(ValueType::Basic(basic_type.clone())),\n ValueTypeBuilder::Struct(struct_type) => Ok(ValueType::Struct(struct_type.try_into()?)),\n ValueTypeBuilder::Table(table_type) => Ok(ValueType::Table(table_type.try_into()?)),\n }\n }\n}\n\n#[derive(Default, Debug)]\npub(super) struct StructSchemaBuilder {\n fields: Vec>,\n field_name_idx: HashMap,\n description: Option>,\n}\n\nimpl StructSchemaBuilder {\n fn add_field(&mut self, field: FieldSchema) -> Result {\n let field_idx = self.fields.len() as u32;\n match self.field_name_idx.entry(field.name.clone()) {\n std::collections::hash_map::Entry::Occupied(_) => {\n bail!(\"Field name already exists: {}\", field.name);\n }\n std::collections::hash_map::Entry::Vacant(entry) => {\n entry.insert(field_idx);\n }\n }\n self.fields.push(field);\n Ok(field_idx)\n }\n\n pub fn find_field(&self, field_name: &'_ str) -> Option<(u32, &FieldSchema)> {\n self.field_name_idx\n .get(field_name)\n .map(|&field_idx| (field_idx, &self.fields[field_idx as usize]))\n }\n}\n\nimpl TryFrom<&StructSchema> for StructSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_from(schema: &StructSchema) -> Result {\n let mut result = StructSchemaBuilder {\n fields: Vec::with_capacity(schema.fields.len()),\n field_name_idx: HashMap::with_capacity(schema.fields.len()),\n description: schema.description.clone(),\n };\n for field in schema.fields.iter() {\n result.add_field(FieldSchema::::from_alternative(field)?)?;\n }\n Ok(result)\n }\n}\n\nimpl TryInto for &StructSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n Ok(StructSchema {\n fields: Arc::new(\n self.fields\n .iter()\n .map(FieldSchema::::from_alternative)\n .collect::>>()?,\n ),\n description: self.description.clone(),\n })\n }\n}\n\n#[derive(Debug)]\npub(super) struct TableSchemaBuilder {\n pub kind: TableKind,\n pub sub_scope: Arc>,\n}\n\nimpl TryFrom<&TableSchema> for TableSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_from(schema: &TableSchema) -> Result {\n Ok(Self {\n kind: schema.kind,\n sub_scope: Arc::new(Mutex::new(DataScopeBuilder {\n data: (&schema.row).try_into()?,\n })),\n })\n }\n}\n\nimpl TryInto for &TableSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n let sub_scope = self.sub_scope.lock().unwrap();\n let row = (&sub_scope.data).try_into()?;\n Ok(TableSchema {\n kind: self.kind,\n row,\n })\n }\n}\n\nfn try_make_common_value_type(\n value_type1: &EnrichedValueType,\n value_type2: &EnrichedValueType,\n) -> Result {\n let typ = match (&value_type1.typ, &value_type2.typ) {\n (ValueType::Basic(basic_type1), ValueType::Basic(basic_type2)) => {\n if basic_type1 != basic_type2 {\n api_bail!(\"Value types are not compatible: {basic_type1} vs {basic_type2}\");\n }\n ValueType::Basic(basic_type1.clone())\n }\n (ValueType::Struct(struct_type1), ValueType::Struct(struct_type2)) => {\n let common_schema = try_merge_struct_schemas(struct_type1, struct_type2)?;\n ValueType::Struct(common_schema)\n }\n (ValueType::Table(table_type1), ValueType::Table(table_type2)) => {\n if table_type1.kind != table_type2.kind {\n api_bail!(\n \"Collection types are not compatible: {} vs {}\",\n table_type1,\n table_type2\n );\n }\n let row = try_merge_struct_schemas(&table_type1.row, &table_type2.row)?;\n ValueType::Table(TableSchema {\n kind: table_type1.kind,\n row,\n })\n }\n (t1 @ (ValueType::Basic(_) | ValueType::Struct(_) | ValueType::Table(_)), t2) => {\n api_bail!(\"Unmatched types:\\n {t1}\\n {t2}\\n\",)\n }\n };\n let common_attrs: Vec<_> = value_type1\n .attrs\n .iter()\n .filter_map(|(k, v)| {\n if value_type2.attrs.get(k) == Some(v) {\n Some((k, v))\n } else {\n None\n }\n })\n .collect();\n let attrs = if common_attrs.len() == value_type1.attrs.len() {\n value_type1.attrs.clone()\n } else {\n Arc::new(\n common_attrs\n .into_iter()\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect(),\n )\n };\n\n Ok(EnrichedValueType {\n typ,\n nullable: value_type1.nullable || value_type2.nullable,\n attrs,\n })\n}\n\nfn try_merge_fields_schemas(\n schema1: &[FieldSchema],\n schema2: &[FieldSchema],\n) -> Result> {\n if schema1.len() != schema2.len() {\n api_bail!(\n \"Fields are not compatible as they have different fields count:\\n ({})\\n ({})\\n\",\n schema1\n .iter()\n .map(|f| f.to_string())\n .collect::>()\n .join(\", \"),\n schema2\n .iter()\n .map(|f| f.to_string())\n .collect::>()\n .join(\", \")\n );\n }\n let mut result_fields = Vec::with_capacity(schema1.len());\n for (field1, field2) in schema1.iter().zip(schema2.iter()) {\n if field1.name != field2.name {\n api_bail!(\n \"Structs are not compatible as they have incompatible field names `{}` vs `{}`\",\n field1.name,\n field2.name\n );\n }\n result_fields.push(FieldSchema {\n name: field1.name.clone(),\n value_type: try_make_common_value_type(&field1.value_type, &field2.value_type)?,\n });\n }\n Ok(result_fields)\n}\n\nfn try_merge_struct_schemas(\n schema1: &StructSchema,\n schema2: &StructSchema,\n) -> Result {\n let fields = try_merge_fields_schemas(&schema1.fields, &schema2.fields)?;\n Ok(StructSchema {\n fields: Arc::new(fields),\n description: schema1\n .description\n .clone()\n .or_else(|| schema2.description.clone()),\n })\n}\n\nfn try_merge_collector_schemas(\n schema1: &CollectorSchema,\n schema2: &CollectorSchema,\n) -> Result {\n let fields = try_merge_fields_schemas(&schema1.fields, &schema2.fields)?;\n Ok(CollectorSchema {\n fields,\n auto_uuid_field_idx: if schema1.auto_uuid_field_idx == schema2.auto_uuid_field_idx {\n schema1.auto_uuid_field_idx\n } else {\n None\n },\n })\n}\n\n#[derive(Debug)]\npub(super) struct CollectorBuilder {\n pub schema: Arc,\n pub is_used: bool,\n}\n\nimpl CollectorBuilder {\n pub fn new(schema: Arc) -> Self {\n Self {\n schema,\n is_used: false,\n }\n }\n\n pub fn merge_schema(&mut self, schema: &CollectorSchema) -> Result<()> {\n if self.is_used {\n api_bail!(\"Collector is already used\");\n }\n let existing_schema = Arc::make_mut(&mut self.schema);\n *existing_schema = try_merge_collector_schemas(existing_schema, schema)?;\n Ok(())\n }\n\n pub fn use_schema(&mut self) -> Arc {\n self.is_used = true;\n self.schema.clone()\n }\n}\n\n#[derive(Debug)]\npub(super) struct DataScopeBuilder {\n pub data: StructSchemaBuilder,\n}\n\nimpl DataScopeBuilder {\n pub fn new() -> Self {\n Self {\n data: Default::default(),\n }\n }\n\n pub fn last_field(&self) -> Option<&FieldSchema> {\n self.data.fields.last()\n }\n\n pub fn add_field(\n &mut self,\n name: FieldName,\n value_type: &EnrichedValueType,\n ) -> Result {\n let field_index = self.data.add_field(FieldSchema {\n name,\n value_type: EnrichedValueType::from_alternative(value_type)?,\n })?;\n Ok(AnalyzedOpOutput {\n field_idx: field_index,\n })\n }\n\n pub fn analyze_field_path<'a>(\n &'a self,\n field_path: &'_ FieldPath,\n ) -> Result<(\n AnalyzedLocalFieldReference,\n &'a EnrichedValueType,\n )> {\n let mut indices = Vec::with_capacity(field_path.len());\n let mut struct_schema = &self.data;\n\n let mut i = 0;\n let value_type = loop {\n let field_name = &field_path[i];\n let (field_idx, field) = struct_schema.find_field(field_name).ok_or_else(|| {\n api_error!(\"Field {} not found\", field_path[0..(i + 1)].join(\".\"))\n })?;\n indices.push(field_idx);\n if i + 1 >= field_path.len() {\n break &field.value_type;\n }\n i += 1;\n\n struct_schema = match &field.value_type.typ {\n ValueTypeBuilder::Struct(struct_type) => struct_type,\n _ => {\n api_bail!(\"Field {} is not a struct\", field_path[0..(i + 1)].join(\".\"));\n }\n };\n };\n Ok((\n AnalyzedLocalFieldReference {\n fields_idx: indices,\n },\n value_type,\n ))\n }\n}\n\npub(super) struct AnalyzerContext {\n pub lib_ctx: Arc,\n pub flow_ctx: Arc,\n}\n\n#[derive(Debug, Default)]\npub(super) struct OpScopeStates {\n pub op_output_types: HashMap,\n pub collectors: IndexMap,\n pub sub_scopes: HashMap>,\n}\n\nimpl OpScopeStates {\n pub fn add_collector(\n &mut self,\n collector_name: FieldName,\n schema: CollectorSchema,\n ) -> Result {\n let existing_len = self.collectors.len();\n let idx = match self.collectors.entry(collector_name) {\n indexmap::map::Entry::Occupied(mut entry) => {\n entry.get_mut().merge_schema(&schema)?;\n entry.index()\n }\n indexmap::map::Entry::Vacant(entry) => {\n entry.insert(CollectorBuilder::new(Arc::new(schema)));\n existing_len\n }\n };\n Ok(AnalyzedLocalCollectorReference {\n collector_idx: idx as u32,\n })\n }\n\n pub fn consume_collector(\n &mut self,\n collector_name: &FieldName,\n ) -> Result<(AnalyzedLocalCollectorReference, Arc)> {\n let (collector_idx, _, collector) = self\n .collectors\n .get_full_mut(collector_name)\n .ok_or_else(|| api_error!(\"Collector not found: {}\", collector_name))?;\n Ok((\n AnalyzedLocalCollectorReference {\n collector_idx: collector_idx as u32,\n },\n collector.use_schema(),\n ))\n }\n\n fn build_op_scope_schema(&self) -> OpScopeSchema {\n OpScopeSchema {\n op_output_types: self\n .op_output_types\n .iter()\n .map(|(name, value_type)| (name.clone(), value_type.without_attrs()))\n .collect(),\n collectors: self\n .collectors\n .iter()\n .map(|(name, schema)| NamedSpec {\n name: name.clone(),\n spec: schema.schema.clone(),\n })\n .collect(),\n op_scopes: self.sub_scopes.clone(),\n }\n }\n}\n\n#[derive(Debug)]\npub struct OpScope {\n pub name: String,\n pub parent: Option<(Arc, spec::FieldPath)>,\n pub(super) data: Arc>,\n pub(super) states: Mutex,\n}\n\nstruct Iter<'a>(Option<&'a OpScope>);\n\nimpl<'a> Iterator for Iter<'a> {\n type Item = &'a OpScope;\n\n fn next(&mut self) -> Option {\n match self.0 {\n Some(scope) => {\n self.0 = scope.parent.as_ref().map(|(parent, _)| parent.as_ref());\n Some(scope)\n }\n None => None,\n }\n }\n}\n\nimpl OpScope {\n pub(super) fn new(\n name: String,\n parent: Option<(Arc, spec::FieldPath)>,\n data: Arc>,\n ) -> Arc {\n Arc::new(Self {\n name,\n parent,\n data,\n states: Mutex::default(),\n })\n }\n\n fn add_op_output(\n &self,\n name: FieldName,\n value_type: EnrichedValueType,\n ) -> Result {\n let op_output = self\n .data\n .lock()\n .unwrap()\n .add_field(name.clone(), &value_type)?;\n self.states\n .lock()\n .unwrap()\n .op_output_types\n .insert(name, value_type);\n Ok(op_output)\n }\n\n pub fn ancestors(&self) -> impl Iterator {\n Iter(Some(self))\n }\n\n pub fn is_op_scope_descendant(&self, other: &Self) -> bool {\n if self == other {\n return true;\n }\n match &self.parent {\n Some((parent, _)) => parent.is_op_scope_descendant(other),\n None => false,\n }\n }\n\n pub(super) fn new_foreach_op_scope(\n self: &Arc,\n scope_name: String,\n field_path: &FieldPath,\n ) -> Result<(AnalyzedLocalFieldReference, Arc)> {\n let (local_field_ref, sub_data_scope) = {\n let data_scope = self.data.lock().unwrap();\n let (local_field_ref, value_type) = data_scope.analyze_field_path(field_path)?;\n let sub_data_scope = match &value_type.typ {\n ValueTypeBuilder::Table(table_type) => table_type.sub_scope.clone(),\n _ => api_bail!(\"ForEach only works on collection, field {field_path} is not\"),\n };\n (local_field_ref, sub_data_scope)\n };\n let sub_op_scope = OpScope::new(\n scope_name,\n Some((self.clone(), field_path.clone())),\n sub_data_scope,\n );\n Ok((local_field_ref, sub_op_scope))\n }\n}\n\nimpl std::fmt::Display for OpScope {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n if let Some((scope, field_path)) = &self.parent {\n write!(f, \"{} [{} AS {}]\", scope, field_path, self.name)?;\n } else {\n write!(f, \"[{}]\", self.name)?;\n }\n Ok(())\n }\n}\n\nimpl PartialEq for OpScope {\n fn eq(&self, other: &Self) -> bool {\n std::ptr::eq(self, other)\n }\n}\nimpl Eq for OpScope {}\n\nfn find_scope<'a>(scope_name: &ScopeName, op_scope: &'a OpScope) -> Result<(u32, &'a OpScope)> {\n let (up_level, scope) = op_scope\n .ancestors()\n .enumerate()\n .find(|(_, s)| &s.name == scope_name)\n .ok_or_else(|| api_error!(\"Scope not found: {}\", scope_name))?;\n Ok((up_level as u32, scope))\n}\n\nfn analyze_struct_mapping(\n mapping: &StructMapping,\n op_scope: &OpScope,\n) -> Result<(AnalyzedStructMapping, Vec)> {\n let mut field_mappings = Vec::with_capacity(mapping.fields.len());\n let mut field_schemas = Vec::with_capacity(mapping.fields.len());\n for field in mapping.fields.iter() {\n let (field_mapping, value_type) = analyze_value_mapping(&field.spec, op_scope)?;\n field_mappings.push(field_mapping);\n field_schemas.push(FieldSchema {\n name: field.name.clone(),\n value_type,\n });\n }\n Ok((\n AnalyzedStructMapping {\n fields: field_mappings,\n },\n field_schemas,\n ))\n}\n\nfn analyze_value_mapping(\n value_mapping: &ValueMapping,\n op_scope: &OpScope,\n) -> Result<(AnalyzedValueMapping, EnrichedValueType)> {\n let result = match value_mapping {\n ValueMapping::Constant(v) => {\n let value = value::Value::from_json(v.value.clone(), &v.schema.typ)?;\n (AnalyzedValueMapping::Constant { value }, v.schema.clone())\n }\n\n ValueMapping::Field(v) => {\n let (scope_up_level, op_scope) = match &v.scope {\n Some(scope_name) => find_scope(scope_name, op_scope)?,\n None => (0, op_scope),\n };\n let data_scope = op_scope.data.lock().unwrap();\n let (local_field_ref, value_type) = data_scope.analyze_field_path(&v.field_path)?;\n (\n AnalyzedValueMapping::Field(AnalyzedFieldReference {\n local: local_field_ref,\n scope_up_level,\n }),\n EnrichedValueType::from_alternative(value_type)?,\n )\n }\n\n ValueMapping::Struct(v) => {\n let (struct_mapping, field_schemas) = analyze_struct_mapping(v, op_scope)?;\n (\n AnalyzedValueMapping::Struct(struct_mapping),\n EnrichedValueType {\n typ: ValueType::Struct(StructSchema {\n fields: Arc::new(field_schemas),\n description: None,\n }),\n nullable: false,\n attrs: Default::default(),\n },\n )\n }\n };\n Ok(result)\n}\n\nfn analyze_input_fields(\n arg_bindings: &[OpArgBinding],\n op_scope: &OpScope,\n) -> Result> {\n let mut input_field_schemas = Vec::with_capacity(arg_bindings.len());\n for arg_binding in arg_bindings.iter() {\n let (analyzed_value, value_type) = analyze_value_mapping(&arg_binding.value, op_scope)?;\n input_field_schemas.push(OpArgSchema {\n name: arg_binding.arg_name.clone(),\n value_type,\n analyzed_value: analyzed_value.clone(),\n });\n }\n Ok(input_field_schemas)\n}\n\nfn add_collector(\n scope_name: &ScopeName,\n collector_name: FieldName,\n schema: CollectorSchema,\n op_scope: &OpScope,\n) -> Result {\n let (scope_up_level, scope) = find_scope(scope_name, op_scope)?;\n let local_ref = scope\n .states\n .lock()\n .unwrap()\n .add_collector(collector_name, schema)?;\n Ok(AnalyzedCollectorReference {\n local: local_ref,\n scope_up_level,\n })\n}\n\nstruct ExportDataFieldsInfo {\n local_collector_ref: AnalyzedLocalCollectorReference,\n primary_key_def: AnalyzedPrimaryKeyDef,\n primary_key_type: ValueType,\n value_fields_idx: Vec,\n value_stable: bool,\n}\n\nimpl AnalyzerContext {\n pub(super) async fn analyze_import_op(\n &self,\n op_scope: &Arc,\n import_op: NamedSpec,\n ) -> Result> + Send + use<>> {\n let source_factory = match get_executor_factory(&import_op.spec.source.kind)? {\n ExecutorFactory::Source(source_executor) => source_executor,\n _ => {\n return Err(anyhow::anyhow!(\n \"`{}` is not a source op\",\n import_op.spec.source.kind\n ));\n }\n };\n let (output_type, executor) = source_factory\n .build(\n serde_json::Value::Object(import_op.spec.source.spec),\n self.flow_ctx.clone(),\n )\n .await?;\n\n let op_name = import_op.name.clone();\n let primary_key_type = output_type\n .typ\n .key_type()\n .ok_or_else(|| api_error!(\"Source must produce a type with key: {op_name}\"))?\n .typ\n .clone();\n let output = op_scope.add_op_output(import_op.name, output_type)?;\n\n let concur_control_options = import_op\n .spec\n .execution_options\n .get_concur_control_options();\n let global_concurrency_controller = self.lib_ctx.global_concurrency_controller.clone();\n let result_fut = async move {\n trace!(\"Start building executor for source op `{op_name}`\");\n let executor = executor.await?;\n trace!(\"Finished building executor for source op `{op_name}`\");\n Ok(AnalyzedImportOp {\n executor,\n output,\n primary_key_type,\n name: op_name,\n refresh_options: import_op.spec.refresh_options,\n concurrency_controller: concur_control::CombinedConcurrencyController::new(\n &concur_control_options,\n global_concurrency_controller,\n ),\n })\n };\n Ok(result_fut)\n }\n\n pub(super) async fn analyze_reactive_op(\n &self,\n op_scope: &Arc,\n reactive_op: &NamedSpec,\n ) -> Result>> {\n let result_fut = match &reactive_op.spec {\n ReactiveOpSpec::Transform(op) => {\n let input_field_schemas =\n analyze_input_fields(&op.inputs, op_scope).with_context(|| {\n format!(\n \"Failed to analyze inputs for transform op: {}\",\n reactive_op.name\n )\n })?;\n let spec = serde_json::Value::Object(op.op.spec.clone());\n\n match get_executor_factory(&op.op.kind)? {\n ExecutorFactory::SimpleFunction(fn_executor) => {\n let input_value_mappings = input_field_schemas\n .iter()\n .map(|field| field.analyzed_value.clone())\n .collect();\n let (output_enriched_type, executor) = fn_executor\n .build(spec, input_field_schemas, self.flow_ctx.clone())\n .await?;\n let logic_fingerprinter = Fingerprinter::default()\n .with(&op.op)?\n .with(&output_enriched_type.without_attrs())?;\n let output_type = output_enriched_type.typ.clone();\n let output = op_scope\n .add_op_output(reactive_op.name.clone(), output_enriched_type)?;\n let op_name = reactive_op.name.clone();\n async move {\n trace!(\"Start building executor for transform op `{op_name}`\");\n let executor = executor.await.with_context(|| {\n format!(\"Failed to build executor for transform op: {op_name}\")\n })?;\n let enable_cache = executor.enable_cache();\n let behavior_version = executor.behavior_version();\n trace!(\"Finished building executor for transform op `{op_name}`, enable cache: {enable_cache}, behavior version: {behavior_version:?}\");\n let function_exec_info = AnalyzedFunctionExecInfo {\n enable_cache,\n behavior_version,\n fingerprinter: logic_fingerprinter\n .with(&behavior_version)?,\n output_type\n };\n if function_exec_info.enable_cache\n && function_exec_info.behavior_version.is_none()\n {\n api_bail!(\n \"When caching is enabled, behavior version must be specified for transform op: {op_name}\"\n );\n }\n Ok(AnalyzedReactiveOp::Transform(AnalyzedTransformOp {\n name: op_name,\n inputs: input_value_mappings,\n function_exec_info,\n executor,\n output,\n }))\n }\n .boxed()\n }\n _ => api_bail!(\"`{}` is not a function op\", op.op.kind),\n }\n }\n\n ReactiveOpSpec::ForEach(foreach_op) => {\n let (local_field_ref, sub_op_scope) = op_scope.new_foreach_op_scope(\n foreach_op.op_scope.name.clone(),\n &foreach_op.field_path,\n )?;\n let analyzed_op_scope_fut = {\n let analyzed_op_scope_fut = self\n .analyze_op_scope(&sub_op_scope, &foreach_op.op_scope.ops)\n .boxed_local()\n .await?;\n let sub_op_scope_schema =\n sub_op_scope.states.lock().unwrap().build_op_scope_schema();\n op_scope.states.lock().unwrap().sub_scopes.insert(\n foreach_op.op_scope.name.clone(),\n Arc::new(sub_op_scope_schema),\n );\n analyzed_op_scope_fut\n };\n let op_name = reactive_op.name.clone();\n\n let concur_control_options =\n foreach_op.execution_options.get_concur_control_options();\n async move {\n Ok(AnalyzedReactiveOp::ForEach(AnalyzedForEachOp {\n local_field_ref,\n op_scope: analyzed_op_scope_fut\n .await\n .with_context(|| format!(\"Analyzing foreach op: {op_name}\"))?,\n name: op_name,\n concurrency_controller: concur_control::ConcurrencyController::new(\n &concur_control_options,\n ),\n }))\n }\n .boxed()\n }\n\n ReactiveOpSpec::Collect(op) => {\n let (struct_mapping, fields_schema) = analyze_struct_mapping(&op.input, op_scope)?;\n let has_auto_uuid_field = op.auto_uuid_field.is_some();\n let fingerprinter = Fingerprinter::default().with(&fields_schema)?;\n let collect_op = AnalyzedReactiveOp::Collect(AnalyzedCollectOp {\n name: reactive_op.name.clone(),\n has_auto_uuid_field,\n input: struct_mapping,\n collector_ref: add_collector(\n &op.scope_name,\n op.collector_name.clone(),\n CollectorSchema::from_fields(fields_schema, op.auto_uuid_field.clone()),\n op_scope,\n )?,\n fingerprinter,\n });\n async move { Ok(collect_op) }.boxed()\n }\n };\n Ok(result_fut)\n }\n\n #[allow(clippy::too_many_arguments)]\n async fn analyze_export_op_group(\n &self,\n target_kind: &str,\n op_scope: &Arc,\n flow_inst: &FlowInstanceSpec,\n export_op_group: &AnalyzedExportTargetOpGroup,\n declarations: Vec,\n targets_analyzed_ss: &mut [Option],\n declarations_analyzed_ss: &mut Vec,\n ) -> Result> + Send + use<>>> {\n let mut collection_specs = Vec::::new();\n let mut data_fields_infos = Vec::::new();\n for idx in export_op_group.op_idx.iter() {\n let export_op = &flow_inst.export_ops[*idx];\n let (local_collector_ref, collector_schema) = op_scope\n .states\n .lock()\n .unwrap()\n .consume_collector(&export_op.spec.collector_name)?;\n let (key_fields_schema, value_fields_schema, data_collection_info) =\n match &export_op.spec.index_options.primary_key_fields {\n Some(fields) => {\n let pk_fields_idx = fields\n .iter()\n .map(|f| {\n collector_schema\n .fields\n .iter()\n .position(|field| &field.name == f)\n .ok_or_else(|| anyhow!(\"field not found: {}\", f))\n })\n .collect::>>()?;\n\n let key_fields_schema = pk_fields_idx\n .iter()\n .map(|idx| collector_schema.fields[*idx].clone())\n .collect::>();\n let primary_key_type = if pk_fields_idx.len() == 1 {\n key_fields_schema[0].value_type.typ.clone()\n } else {\n ValueType::Struct(StructSchema {\n fields: Arc::from(key_fields_schema.clone()),\n description: None,\n })\n };\n let mut value_fields_schema: Vec = vec![];\n let mut value_fields_idx = vec![];\n for (idx, field) in collector_schema.fields.iter().enumerate() {\n if !pk_fields_idx.contains(&idx) {\n value_fields_schema.push(field.clone());\n value_fields_idx.push(idx as u32);\n }\n }\n let value_stable = collector_schema\n .auto_uuid_field_idx\n .as_ref()\n .map(|uuid_idx| pk_fields_idx.contains(uuid_idx))\n .unwrap_or(false);\n (\n key_fields_schema,\n value_fields_schema,\n ExportDataFieldsInfo {\n local_collector_ref,\n primary_key_def: AnalyzedPrimaryKeyDef::Fields(pk_fields_idx),\n primary_key_type,\n value_fields_idx,\n value_stable,\n },\n )\n }\n None => {\n // TODO: Support auto-generate primary key\n api_bail!(\"Primary key fields must be specified\")\n }\n };\n collection_specs.push(interface::ExportDataCollectionSpec {\n name: export_op.name.clone(),\n spec: serde_json::Value::Object(export_op.spec.target.spec.clone()),\n key_fields_schema,\n value_fields_schema,\n index_options: export_op.spec.index_options.clone(),\n });\n data_fields_infos.push(data_collection_info);\n }\n let (data_collections_output, declarations_output) = export_op_group\n .target_factory\n .clone()\n .build(collection_specs, declarations, self.flow_ctx.clone())\n .await?;\n let analyzed_export_ops = export_op_group\n .op_idx\n .iter()\n .zip(data_collections_output.into_iter())\n .zip(data_fields_infos.into_iter())\n .map(|((idx, data_coll_output), data_fields_info)| {\n let export_op = &flow_inst.export_ops[*idx];\n let op_name = export_op.name.clone();\n let export_target_factory = export_op_group.target_factory.clone();\n\n let export_op_ss = exec_ctx::AnalyzedTargetSetupState {\n target_kind: target_kind.to_string(),\n setup_key: data_coll_output.setup_key,\n desired_setup_state: data_coll_output.desired_setup_state,\n setup_by_user: export_op.spec.setup_by_user,\n };\n targets_analyzed_ss[*idx] = Some(export_op_ss);\n\n Ok(async move {\n trace!(\"Start building executor for export op `{op_name}`\");\n let export_context = data_coll_output\n .export_context\n .await\n .with_context(|| format!(\"Analyzing export op: {op_name}\"))?;\n trace!(\"Finished building executor for export op `{op_name}`\");\n Ok(AnalyzedExportOp {\n name: op_name,\n input: data_fields_info.local_collector_ref,\n export_target_factory,\n export_context,\n primary_key_def: data_fields_info.primary_key_def,\n primary_key_type: data_fields_info.primary_key_type,\n value_fields: data_fields_info.value_fields_idx,\n value_stable: data_fields_info.value_stable,\n })\n })\n })\n .collect::>>()?;\n for (setup_key, desired_setup_state) in declarations_output {\n let decl_ss = exec_ctx::AnalyzedTargetSetupState {\n target_kind: target_kind.to_string(),\n setup_key,\n desired_setup_state,\n setup_by_user: false,\n };\n declarations_analyzed_ss.push(decl_ss);\n }\n Ok(analyzed_export_ops)\n }\n\n async fn analyze_op_scope(\n &self,\n op_scope: &Arc,\n reactive_ops: &[NamedSpec],\n ) -> Result> + Send + use<>> {\n let mut op_futs = Vec::with_capacity(reactive_ops.len());\n for reactive_op in reactive_ops.iter() {\n op_futs.push(self.analyze_reactive_op(op_scope, reactive_op).await?);\n }\n let collector_len = op_scope.states.lock().unwrap().collectors.len();\n let result_fut = async move {\n Ok(AnalyzedOpScope {\n reactive_ops: try_join_all(op_futs).await?,\n collector_len,\n })\n };\n Ok(result_fut)\n }\n}\n\npub fn build_flow_instance_context(\n flow_inst_name: &str,\n py_exec_ctx: Option,\n) -> Arc {\n Arc::new(FlowInstanceContext {\n flow_instance_name: flow_inst_name.to_string(),\n auth_registry: get_auth_registry().clone(),\n py_exec_ctx: py_exec_ctx.map(Arc::new),\n })\n}\n\nfn build_flow_schema(root_op_scope: &OpScope) -> Result {\n let schema = (&root_op_scope.data.lock().unwrap().data).try_into()?;\n let root_op_scope_schema = root_op_scope.states.lock().unwrap().build_op_scope_schema();\n Ok(FlowSchema {\n schema,\n root_op_scope: root_op_scope_schema,\n })\n}\n\npub async fn analyze_flow(\n flow_inst: &FlowInstanceSpec,\n flow_ctx: Arc,\n) -> Result<(\n FlowSchema,\n AnalyzedSetupState,\n impl Future> + Send + use<>,\n)> {\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: get_lib_context()?,\n flow_ctx,\n };\n let root_data_scope = Arc::new(Mutex::new(DataScopeBuilder::new()));\n let root_op_scope = OpScope::new(ROOT_SCOPE_NAME.to_string(), None, root_data_scope);\n let mut import_ops_futs = Vec::with_capacity(flow_inst.import_ops.len());\n for import_op in flow_inst.import_ops.iter() {\n import_ops_futs.push(\n analyzer_ctx\n .analyze_import_op(&root_op_scope, import_op.clone())\n .await?,\n );\n }\n let op_scope_fut = analyzer_ctx\n .analyze_op_scope(&root_op_scope, &flow_inst.reactive_ops)\n .await?;\n\n #[derive(Default)]\n struct TargetOpGroup {\n export_op_ids: Vec,\n declarations: Vec,\n }\n let mut target_op_group = IndexMap::::new();\n for (idx, export_op) in flow_inst.export_ops.iter().enumerate() {\n target_op_group\n .entry(export_op.spec.target.kind.clone())\n .or_default()\n .export_op_ids\n .push(idx);\n }\n for declaration in flow_inst.declarations.iter() {\n target_op_group\n .entry(declaration.kind.clone())\n .or_default()\n .declarations\n .push(serde_json::Value::Object(declaration.spec.clone()));\n }\n\n let mut export_ops_futs = vec![];\n let mut analyzed_target_op_groups = vec![];\n\n let mut targets_analyzed_ss = Vec::with_capacity(flow_inst.export_ops.len());\n targets_analyzed_ss.resize_with(flow_inst.export_ops.len(), || None);\n\n let mut declarations_analyzed_ss = Vec::with_capacity(flow_inst.declarations.len());\n\n for (target_kind, op_ids) in target_op_group.into_iter() {\n let target_factory = match get_executor_factory(&target_kind)? {\n ExecutorFactory::ExportTarget(export_executor) => export_executor,\n _ => api_bail!(\"`{}` is not a export target op\", target_kind),\n };\n let analyzed_target_op_group = AnalyzedExportTargetOpGroup {\n target_factory,\n op_idx: op_ids.export_op_ids,\n };\n export_ops_futs.extend(\n analyzer_ctx\n .analyze_export_op_group(\n target_kind.as_str(),\n &root_op_scope,\n flow_inst,\n &analyzed_target_op_group,\n op_ids.declarations,\n &mut targets_analyzed_ss,\n &mut declarations_analyzed_ss,\n )\n .await?,\n );\n analyzed_target_op_groups.push(analyzed_target_op_group);\n }\n\n let flow_schema = build_flow_schema(&root_op_scope)?;\n let analyzed_ss = exec_ctx::AnalyzedSetupState {\n targets: targets_analyzed_ss\n .into_iter()\n .enumerate()\n .map(|(idx, v)| v.ok_or_else(|| anyhow!(\"target op `{}` not found\", idx)))\n .collect::>>()?,\n declarations: declarations_analyzed_ss,\n };\n\n let logic_fingerprint = Fingerprinter::default()\n .with(&flow_inst)?\n .with(&flow_schema.schema)?\n .into_fingerprint();\n let plan_fut = async move {\n let (import_ops, op_scope, export_ops) = try_join3(\n try_join_all(import_ops_futs),\n op_scope_fut,\n try_join_all(export_ops_futs),\n )\n .await?;\n\n Ok(ExecutionPlan {\n logic_fingerprint,\n import_ops,\n op_scope,\n export_ops,\n export_op_groups: analyzed_target_op_groups,\n })\n };\n\n Ok((flow_schema, analyzed_ss, plan_fut))\n}\n\npub async fn analyze_transient_flow<'a>(\n flow_inst: &TransientFlowSpec,\n flow_ctx: Arc,\n) -> Result<(\n EnrichedValueType,\n FlowSchema,\n impl Future> + Send + 'a,\n)> {\n let mut root_data_scope = DataScopeBuilder::new();\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: get_lib_context()?,\n flow_ctx,\n };\n let mut input_fields = vec![];\n for field in flow_inst.input_fields.iter() {\n let analyzed_field = root_data_scope.add_field(field.name.clone(), &field.value_type)?;\n input_fields.push(analyzed_field);\n }\n let root_op_scope = OpScope::new(\n ROOT_SCOPE_NAME.to_string(),\n None,\n Arc::new(Mutex::new(root_data_scope)),\n );\n let op_scope_fut = analyzer_ctx\n .analyze_op_scope(&root_op_scope, &flow_inst.reactive_ops)\n .await?;\n let (output_value, output_type) =\n analyze_value_mapping(&flow_inst.output_value, &root_op_scope)?;\n let data_schema = build_flow_schema(&root_op_scope)?;\n let plan_fut = async move {\n let op_scope = op_scope_fut.await?;\n Ok(TransientExecutionPlan {\n input_fields,\n op_scope,\n output_value,\n })\n };\n Ok((output_type, data_schema, plan_fut))\n}\n"], ["/cocoindex/src/ops/factory_bases.rs", "use crate::prelude::*;\nuse crate::setup::ResourceSetupStatus;\nuse std::fmt::Debug;\nuse std::hash::Hash;\n\nuse super::interface::*;\nuse super::registry::*;\nuse crate::api_bail;\nuse crate::api_error;\nuse crate::base::schema::*;\nuse crate::base::spec::*;\nuse crate::builder::plan::AnalyzedValueMapping;\nuse crate::setup;\n// SourceFactoryBase\npub struct ResolvedOpArg {\n pub name: String,\n pub typ: EnrichedValueType,\n pub idx: usize,\n}\n\npub trait ResolvedOpArgExt: Sized {\n fn expect_type(self, expected_type: &ValueType) -> Result;\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value>;\n fn take_value(&self, args: &mut [value::Value]) -> Result;\n}\n\nimpl ResolvedOpArgExt for ResolvedOpArg {\n fn expect_type(self, expected_type: &ValueType) -> Result {\n if &self.typ.typ != expected_type {\n api_bail!(\n \"Expected argument `{}` to be of type `{}`, got `{}`\",\n self.name,\n expected_type,\n self.typ.typ\n );\n }\n Ok(self)\n }\n\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value> {\n if self.idx >= args.len() {\n api_bail!(\n \"Two few arguments, {} provided, expected at least {} for `{}`\",\n args.len(),\n self.idx + 1,\n self.name\n );\n }\n Ok(&args[self.idx])\n }\n\n fn take_value(&self, args: &mut [value::Value]) -> Result {\n if self.idx >= args.len() {\n api_bail!(\n \"Two few arguments, {} provided, expected at least {} for `{}`\",\n args.len(),\n self.idx + 1,\n self.name\n );\n }\n Ok(std::mem::take(&mut args[self.idx]))\n }\n}\n\nimpl ResolvedOpArgExt for Option {\n fn expect_type(self, expected_type: &ValueType) -> Result {\n self.map(|arg| arg.expect_type(expected_type)).transpose()\n }\n\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value> {\n Ok(self\n .as_ref()\n .map(|arg| arg.value(args))\n .transpose()?\n .unwrap_or(&value::Value::Null))\n }\n\n fn take_value(&self, args: &mut [value::Value]) -> Result {\n Ok(self\n .as_ref()\n .map(|arg| arg.take_value(args))\n .transpose()?\n .unwrap_or(value::Value::Null))\n }\n}\n\npub struct OpArgsResolver<'a> {\n args: &'a [OpArgSchema],\n num_positional_args: usize,\n next_positional_idx: usize,\n remaining_kwargs: HashMap<&'a str, usize>,\n}\n\nimpl<'a> OpArgsResolver<'a> {\n pub fn new(args: &'a [OpArgSchema]) -> Result {\n let mut num_positional_args = 0;\n let mut kwargs = HashMap::new();\n for (idx, arg) in args.iter().enumerate() {\n if let Some(name) = &arg.name.0 {\n kwargs.insert(name.as_str(), idx);\n } else {\n if !kwargs.is_empty() {\n api_bail!(\"Positional arguments must be provided before keyword arguments\");\n }\n num_positional_args += 1;\n }\n }\n Ok(Self {\n args,\n num_positional_args,\n next_positional_idx: 0,\n remaining_kwargs: kwargs,\n })\n }\n\n pub fn next_optional_arg(&mut self, name: &str) -> Result> {\n let idx = if let Some(idx) = self.remaining_kwargs.remove(name) {\n if self.next_positional_idx < self.num_positional_args {\n api_bail!(\"`{name}` is provided as both positional and keyword arguments\");\n } else {\n Some(idx)\n }\n } else if self.next_positional_idx < self.num_positional_args {\n let idx = self.next_positional_idx;\n self.next_positional_idx += 1;\n Some(idx)\n } else {\n None\n };\n Ok(idx.map(|idx| ResolvedOpArg {\n name: name.to_string(),\n typ: self.args[idx].value_type.clone(),\n idx,\n }))\n }\n\n pub fn next_arg(&mut self, name: &str) -> Result {\n Ok(self\n .next_optional_arg(name)?\n .ok_or_else(|| api_error!(\"Required argument `{name}` is missing\",))?)\n }\n\n pub fn done(self) -> Result<()> {\n if self.next_positional_idx < self.num_positional_args {\n api_bail!(\n \"Expected {} positional arguments, got {}\",\n self.next_positional_idx,\n self.num_positional_args\n );\n }\n if !self.remaining_kwargs.is_empty() {\n api_bail!(\n \"Unexpected keyword arguments: {}\",\n self.remaining_kwargs\n .keys()\n .map(|k| format!(\"`{k}`\"))\n .collect::>()\n .join(\", \")\n )\n }\n Ok(())\n }\n\n pub fn get_analyze_value(&self, resolved_arg: &ResolvedOpArg) -> &AnalyzedValueMapping {\n &self.args[resolved_arg.idx].analyzed_value\n }\n}\n\n#[async_trait]\npub trait SourceFactoryBase: SourceFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n\n fn name(&self) -> &str;\n\n async fn get_output_schema(\n &self,\n spec: &Self::Spec,\n context: &FlowInstanceContext,\n ) -> Result;\n\n async fn build_executor(\n self: Arc,\n spec: Self::Spec,\n context: Arc,\n ) -> Result>;\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::Source(Arc::new(self)),\n )\n }\n}\n\n#[async_trait]\nimpl SourceFactory for T {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let spec: T::Spec = serde_json::from_value(spec)?;\n let output_schema = self.get_output_schema(&spec, &context).await?;\n let executor = self.build_executor(spec, context);\n Ok((output_schema, executor))\n }\n}\n\n// SimpleFunctionFactoryBase\n\n#[async_trait]\npub trait SimpleFunctionFactoryBase: SimpleFunctionFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n type ResolvedArgs: Send + Sync;\n\n fn name(&self) -> &str;\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Self::Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n context: &FlowInstanceContext,\n ) -> Result<(Self::ResolvedArgs, EnrichedValueType)>;\n\n async fn build_executor(\n self: Arc,\n spec: Self::Spec,\n resolved_input_schema: Self::ResolvedArgs,\n context: Arc,\n ) -> Result>;\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::SimpleFunction(Arc::new(self)),\n )\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionFactory for T {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let spec: T::Spec = serde_json::from_value(spec)?;\n let mut args_resolver = OpArgsResolver::new(&input_schema)?;\n let (resolved_input_schema, output_schema) = self\n .resolve_schema(&spec, &mut args_resolver, &context)\n .await?;\n args_resolver.done()?;\n let executor = self.build_executor(spec, resolved_input_schema, context);\n Ok((output_schema, executor))\n }\n}\n\npub struct TypedExportDataCollectionBuildOutput {\n pub export_context: BoxFuture<'static, Result>>,\n pub setup_key: F::Key,\n pub desired_setup_state: F::SetupState,\n}\npub struct TypedExportDataCollectionSpec {\n pub name: String,\n pub spec: F::Spec,\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n pub index_options: IndexOptions,\n}\n\npub struct TypedResourceSetupChangeItem<'a, F: StorageFactoryBase + ?Sized> {\n pub key: F::Key,\n pub setup_status: &'a F::SetupStatus,\n}\n\n#[async_trait]\npub trait StorageFactoryBase: ExportTargetFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n type DeclarationSpec: DeserializeOwned + Send + Sync;\n type Key: Debug + Clone + Serialize + DeserializeOwned + Eq + Hash + Send + Sync;\n type SetupState: Debug + Clone + Serialize + DeserializeOwned + Send + Sync;\n type SetupStatus: ResourceSetupStatus;\n type ExportContext: Send + Sync + 'static;\n\n fn name(&self) -> &str;\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(Self::Key, Self::SetupState)>,\n )>;\n\n /// Deserialize the setup key from a JSON value.\n /// You can override this method to provide a custom deserialization logic, e.g. to perform backward compatible deserialization.\n fn deserialize_setup_key(key: serde_json::Value) -> Result {\n Ok(serde_json::from_value(key)?)\n }\n\n /// Will not be called if it's setup by user.\n /// It returns an error if the target only supports setup by user.\n async fn check_setup_status(\n &self,\n key: Self::Key,\n desired_state: Option,\n existing_states: setup::CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result;\n\n fn check_state_compatibility(\n &self,\n desired_state: &Self::SetupState,\n existing_state: &Self::SetupState,\n ) -> Result;\n\n fn describe_resource(&self, key: &Self::Key) -> Result;\n\n fn extract_additional_key(\n &self,\n _key: &value::KeyValue,\n _value: &value::FieldValues,\n _export_context: &Self::ExportContext,\n ) -> Result {\n Ok(serde_json::Value::Null)\n }\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::ExportTarget(Arc::new(self)),\n )\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()>;\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()>;\n}\n\n#[async_trait]\nimpl ExportTargetFactory for T {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )> {\n let (data_coll_output, decl_output) = StorageFactoryBase::build(\n self,\n data_collections\n .into_iter()\n .map(|d| {\n anyhow::Ok(TypedExportDataCollectionSpec {\n name: d.name,\n spec: serde_json::from_value(d.spec)?,\n key_fields_schema: d.key_fields_schema,\n value_fields_schema: d.value_fields_schema,\n index_options: d.index_options,\n })\n })\n .collect::>>()?,\n declarations\n .into_iter()\n .map(|d| anyhow::Ok(serde_json::from_value(d)?))\n .collect::>>()?,\n context,\n )\n .await?;\n\n let data_coll_output = data_coll_output\n .into_iter()\n .map(|d| {\n Ok(interface::ExportDataCollectionBuildOutput {\n export_context: async move {\n Ok(d.export_context.await? as Arc)\n }\n .boxed(),\n setup_key: serde_json::to_value(d.setup_key)?,\n desired_setup_state: serde_json::to_value(d.desired_setup_state)?,\n })\n })\n .collect::>>()?;\n let decl_output = decl_output\n .into_iter()\n .map(|(key, state)| Ok((serde_json::to_value(key)?, serde_json::to_value(state)?)))\n .collect::>>()?;\n Ok((data_coll_output, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result> {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n let desired_state: Option = desired_state\n .map(|v| serde_json::from_value(v.clone()))\n .transpose()?;\n let existing_states = from_json_combined_state(existing_states)?;\n let setup_status = StorageFactoryBase::check_setup_status(\n self,\n key,\n desired_state,\n existing_states,\n flow_instance_ctx,\n )\n .await?;\n Ok(Box::new(setup_status))\n }\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n StorageFactoryBase::describe_resource(self, &key)\n }\n\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n Ok(serde_json::to_value(key)?)\n }\n\n fn check_state_compatibility(\n &self,\n desired_state: &serde_json::Value,\n existing_state: &serde_json::Value,\n ) -> Result {\n let result = StorageFactoryBase::check_state_compatibility(\n self,\n &serde_json::from_value(desired_state.clone())?,\n &serde_json::from_value(existing_state.clone())?,\n )?;\n Ok(result)\n }\n\n fn extract_additional_key(\n &self,\n key: &value::KeyValue,\n value: &value::FieldValues,\n export_context: &(dyn Any + Send + Sync),\n ) -> Result {\n StorageFactoryBase::extract_additional_key(\n self,\n key,\n value,\n export_context\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n )\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mutations = mutations\n .into_iter()\n .map(|m| {\n anyhow::Ok(ExportTargetMutationWithContext {\n mutation: m.mutation,\n export_context: m\n .export_context\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n })\n })\n .collect::>()?;\n StorageFactoryBase::apply_mutation(self, mutations).await\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n StorageFactoryBase::apply_setup_changes(\n self,\n setup_status\n .into_iter()\n .map(|item| -> anyhow::Result<_> {\n Ok(TypedResourceSetupChangeItem {\n key: serde_json::from_value(item.key.clone())?,\n setup_status: (item.setup_status as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n })\n })\n .collect::>>()?,\n context,\n )\n .await\n }\n}\nfn from_json_combined_state(\n existing_states: setup::CombinedState,\n) -> Result> {\n Ok(setup::CombinedState {\n current: existing_states\n .current\n .map(|v| serde_json::from_value(v))\n .transpose()?,\n staging: existing_states\n .staging\n .into_iter()\n .map(|v| {\n anyhow::Ok(match v {\n setup::StateChange::Upsert(v) => {\n setup::StateChange::Upsert(serde_json::from_value(v)?)\n }\n setup::StateChange::Delete => setup::StateChange::Delete,\n })\n })\n .collect::>()?,\n legacy_state_key: existing_states.legacy_state_key,\n })\n}\n"], ["/cocoindex/src/py/mod.rs", "use crate::execution::evaluator::evaluate_transient_flow;\nuse crate::prelude::*;\n\nuse crate::base::schema::{FieldSchema, ValueType};\nuse crate::base::spec::{NamedSpec, OutputMode, ReactiveOpSpec, SpecFormatter};\nuse crate::lib_context::{clear_lib_context, get_auth_registry, init_lib_context};\nuse crate::ops::py_factory::{PyExportTargetFactory, PyOpArgSchema};\nuse crate::ops::{interface::ExecutorFactory, py_factory::PyFunctionFactory, register_factory};\nuse crate::server::{self, ServerSettings};\nuse crate::settings::Settings;\nuse crate::setup::{self};\nuse pyo3::IntoPyObjectExt;\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pyo3_async_runtimes::tokio::future_into_py;\nuse std::fmt::Write;\nuse std::sync::Arc;\n\nmod convert;\npub(crate) use convert::*;\n\npub struct PythonExecutionContext {\n pub event_loop: Py,\n}\n\nimpl PythonExecutionContext {\n pub fn new(_py: Python<'_>, event_loop: Py) -> Self {\n Self { event_loop }\n }\n}\n\npub trait ToResultWithPyTrace {\n fn to_result_with_py_trace(self, py: Python<'_>) -> anyhow::Result;\n}\n\nimpl ToResultWithPyTrace for Result {\n fn to_result_with_py_trace(self, py: Python<'_>) -> anyhow::Result {\n match self {\n Ok(value) => Ok(value),\n Err(err) => {\n let mut err_str = format!(\"Error calling Python function: {err}\");\n if let Some(tb) = err.traceback(py) {\n write!(&mut err_str, \"\\n{}\", tb.format()?)?;\n }\n Err(anyhow::anyhow!(err_str))\n }\n }\n }\n}\npub trait IntoPyResult {\n fn into_py_result(self) -> PyResult;\n}\n\nimpl IntoPyResult for Result {\n fn into_py_result(self) -> PyResult {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(PyException::new_err(format!(\"{err:?}\"))),\n }\n }\n}\n\n#[pyfunction]\nfn init(py: Python<'_>, settings: Pythonized) -> PyResult<()> {\n py.allow_threads(|| -> anyhow::Result<()> {\n init_lib_context(settings.into_inner())?;\n Ok(())\n })\n .into_py_result()\n}\n\n#[pyfunction]\nfn start_server(py: Python<'_>, settings: Pythonized) -> PyResult<()> {\n py.allow_threads(|| -> anyhow::Result<()> {\n let server = get_runtime().block_on(server::init_server(\n get_lib_context()?,\n settings.into_inner(),\n ))?;\n get_runtime().spawn(server);\n Ok(())\n })\n .into_py_result()\n}\n\n#[pyfunction]\nfn stop(py: Python<'_>) -> PyResult<()> {\n py.allow_threads(clear_lib_context);\n Ok(())\n}\n\n#[pyfunction]\nfn register_function_factory(name: String, py_function_factory: Py) -> PyResult<()> {\n let factory = PyFunctionFactory {\n py_function_factory,\n };\n register_factory(name, ExecutorFactory::SimpleFunction(Arc::new(factory))).into_py_result()\n}\n\n#[pyfunction]\nfn register_target_connector(name: String, py_target_connector: Py) -> PyResult<()> {\n let factory = PyExportTargetFactory {\n py_target_connector,\n };\n register_factory(name, ExecutorFactory::ExportTarget(Arc::new(factory))).into_py_result()\n}\n\n#[pyclass]\npub struct IndexUpdateInfo(pub execution::stats::IndexUpdateInfo);\n\n#[pymethods]\nimpl IndexUpdateInfo {\n pub fn __str__(&self) -> String {\n format!(\"{}\", self.0)\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\n#[pyclass]\npub struct Flow(pub Arc);\n\n/// A single line in the rendered spec, with hierarchical children\n#[pyclass(get_all, set_all)]\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct RenderedSpecLine {\n /// The formatted content of the line (e.g., \"Import: name=documents, source=LocalFile\")\n pub content: String,\n /// Child lines in the hierarchy\n pub children: Vec,\n}\n\n/// A rendered specification, grouped by sections\n#[pyclass(get_all, set_all)]\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct RenderedSpec {\n /// List of (section_name, lines) pairs\n pub sections: Vec<(String, Vec)>,\n}\n\n#[pyclass]\npub struct FlowLiveUpdaterUpdates(execution::FlowLiveUpdaterUpdates);\n\n#[pymethods]\nimpl FlowLiveUpdaterUpdates {\n #[getter]\n pub fn active_sources(&self) -> Vec {\n self.0.active_sources.clone()\n }\n\n #[getter]\n pub fn updated_sources(&self) -> Vec {\n self.0.updated_sources.clone()\n }\n}\n\n#[pyclass]\npub struct FlowLiveUpdater(pub Arc);\n\n#[pymethods]\nimpl FlowLiveUpdater {\n #[staticmethod]\n pub fn create<'py>(\n py: Python<'py>,\n flow: &Flow,\n options: Pythonized,\n ) -> PyResult> {\n let flow = flow.0.clone();\n future_into_py(py, async move {\n let lib_context = get_lib_context().into_py_result()?;\n let live_updater = execution::FlowLiveUpdater::start(\n flow,\n lib_context.require_builtin_db_pool().into_py_result()?,\n options.into_inner(),\n )\n .await\n .into_py_result()?;\n Ok(Self(Arc::new(live_updater)))\n })\n }\n\n pub fn wait_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let live_updater = self.0.clone();\n future_into_py(\n py,\n async move { live_updater.wait().await.into_py_result() },\n )\n }\n\n pub fn next_status_updates_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let live_updater = self.0.clone();\n future_into_py(py, async move {\n let updates = live_updater.next_status_updates().await.into_py_result()?;\n Ok(FlowLiveUpdaterUpdates(updates))\n })\n }\n\n pub fn abort(&self) {\n self.0.abort();\n }\n\n pub fn index_update_info(&self) -> IndexUpdateInfo {\n IndexUpdateInfo(self.0.index_update_info())\n }\n}\n\n#[pymethods]\nimpl Flow {\n pub fn __str__(&self) -> String {\n serde_json::to_string_pretty(&self.0.flow.flow_instance).unwrap()\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn name(&self) -> &str {\n &self.0.flow.flow_instance.name\n }\n\n pub fn evaluate_and_dump(\n &self,\n py: Python<'_>,\n options: Pythonized,\n ) -> PyResult<()> {\n py.allow_threads(|| {\n get_runtime()\n .block_on(async {\n let exec_plan = self.0.flow.get_execution_plan().await?;\n let lib_context = get_lib_context()?;\n let execution_ctx = self.0.use_execution_ctx().await?;\n execution::dumper::evaluate_and_dump(\n &exec_plan,\n &execution_ctx.setup_execution_context,\n &self.0.flow.data_schema,\n options.into_inner(),\n lib_context.require_builtin_db_pool()?,\n )\n .await\n })\n .into_py_result()?;\n Ok(())\n })\n }\n\n #[pyo3(signature = (output_mode=None))]\n pub fn get_spec(&self, output_mode: Option>) -> PyResult {\n let mode = output_mode.map_or(OutputMode::Concise, |m| m.into_inner());\n let spec = &self.0.flow.flow_instance;\n let mut sections: IndexMap> = IndexMap::new();\n\n // Sources\n sections.insert(\n \"Source\".to_string(),\n spec.import_ops\n .iter()\n .map(|op| RenderedSpecLine {\n content: format!(\"Import: name={}, {}\", op.name, op.spec.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n // Processing\n fn walk(op: &NamedSpec, mode: OutputMode) -> RenderedSpecLine {\n let content = format!(\"{}: {}\", op.name, op.spec.format(mode));\n\n let children = match &op.spec {\n ReactiveOpSpec::ForEach(fe) => fe\n .op_scope\n .ops\n .iter()\n .map(|nested| walk(nested, mode))\n .collect(),\n _ => vec![],\n };\n\n RenderedSpecLine { content, children }\n }\n\n sections.insert(\n \"Processing\".to_string(),\n spec.reactive_ops.iter().map(|op| walk(op, mode)).collect(),\n );\n\n // Targets\n sections.insert(\n \"Targets\".to_string(),\n spec.export_ops\n .iter()\n .map(|op| RenderedSpecLine {\n content: format!(\"Export: name={}, {}\", op.name, op.spec.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n // Declarations\n sections.insert(\n \"Declarations\".to_string(),\n spec.declarations\n .iter()\n .map(|decl| RenderedSpecLine {\n content: format!(\"Declaration: {}\", decl.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n Ok(RenderedSpec {\n sections: sections.into_iter().collect(),\n })\n }\n\n pub fn get_schema(&self) -> Vec<(String, String, String)> {\n let schema = &self.0.flow.data_schema;\n let mut result = Vec::new();\n\n fn process_fields(\n fields: &[FieldSchema],\n prefix: &str,\n result: &mut Vec<(String, String, String)>,\n ) {\n for field in fields {\n let field_name = format!(\"{}{}\", prefix, field.name);\n\n let mut field_type = match &field.value_type.typ {\n ValueType::Basic(basic) => format!(\"{basic}\"),\n ValueType::Table(t) => format!(\"{}\", t.kind),\n ValueType::Struct(_) => \"Struct\".to_string(),\n };\n\n if field.value_type.nullable {\n field_type.push('?');\n }\n\n let attr_str = if field.value_type.attrs.is_empty() {\n String::new()\n } else {\n field\n .value_type\n .attrs\n .keys()\n .map(|k| k.to_string())\n .collect::>()\n .join(\", \")\n };\n\n result.push((field_name.clone(), field_type, attr_str));\n\n match &field.value_type.typ {\n ValueType::Struct(s) => {\n process_fields(&s.fields, &format!(\"{field_name}.\"), result);\n }\n ValueType::Table(t) => {\n process_fields(&t.row.fields, &format!(\"{field_name}[].\"), result);\n }\n ValueType::Basic(_) => {}\n }\n }\n }\n\n process_fields(&schema.schema.fields, \"\", &mut result);\n result\n }\n\n pub fn make_setup_action(&self) -> SetupChangeBundle {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Setup,\n flow_names: vec![self.name().to_string()],\n };\n SetupChangeBundle(Arc::new(bundle))\n }\n\n pub fn make_drop_action(&self) -> SetupChangeBundle {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Drop,\n flow_names: vec![self.name().to_string()],\n };\n SetupChangeBundle(Arc::new(bundle))\n }\n}\n\n#[pyclass]\npub struct TransientFlow(pub Arc);\n\n#[pymethods]\nimpl TransientFlow {\n pub fn __str__(&self) -> String {\n serde_json::to_string_pretty(&self.0.transient_flow_instance).unwrap()\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn evaluate_async<'py>(\n &self,\n py: Python<'py>,\n args: Vec>,\n ) -> PyResult> {\n let flow = self.0.clone();\n let input_values: Vec = std::iter::zip(\n self.0.transient_flow_instance.input_fields.iter(),\n args.into_iter(),\n )\n .map(|(input_schema, arg)| value_from_py_object(&input_schema.value_type.typ, &arg))\n .collect::>()?;\n\n future_into_py(py, async move {\n let result = evaluate_transient_flow(&flow, &input_values)\n .await\n .into_py_result()?;\n Python::with_gil(|py| value_to_py_object(py, &result)?.into_py_any(py))\n })\n }\n}\n\n#[pyclass]\npub struct SetupChangeBundle(Arc);\n\n#[pymethods]\nimpl SetupChangeBundle {\n pub fn describe_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let lib_context = get_lib_context().into_py_result()?;\n let bundle = self.0.clone();\n future_into_py(py, async move {\n bundle.describe(&lib_context).await.into_py_result()\n })\n }\n\n pub fn apply_async<'py>(\n &self,\n py: Python<'py>,\n report_to_stdout: bool,\n ) -> PyResult> {\n let lib_context = get_lib_context().into_py_result()?;\n let bundle = self.0.clone();\n\n future_into_py(py, async move {\n let mut stdout = None;\n let mut sink = None;\n bundle\n .apply(\n &lib_context,\n if report_to_stdout {\n stdout.insert(std::io::stdout())\n } else {\n sink.insert(std::io::sink())\n },\n )\n .await\n .into_py_result()\n })\n }\n}\n\n#[pyfunction]\nfn flow_names_with_setup_async(py: Python<'_>) -> PyResult> {\n future_into_py(py, async move {\n let lib_context = get_lib_context().into_py_result()?;\n let setup_ctx = lib_context\n .require_persistence_ctx()\n .into_py_result()?\n .setup_ctx\n .read()\n .await;\n let flow_names: Vec = setup_ctx.all_setup_states.flows.keys().cloned().collect();\n PyResult::Ok(flow_names)\n })\n}\n\n#[pyfunction]\nfn make_setup_bundle(flow_names: Vec) -> PyResult {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Setup,\n flow_names,\n };\n Ok(SetupChangeBundle(Arc::new(bundle)))\n}\n\n#[pyfunction]\nfn make_drop_bundle(flow_names: Vec) -> PyResult {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Drop,\n flow_names,\n };\n Ok(SetupChangeBundle(Arc::new(bundle)))\n}\n\n#[pyfunction]\nfn remove_flow_context(flow_name: String) {\n let lib_context_locked = crate::lib_context::LIB_CONTEXT.read().unwrap();\n if let Some(lib_context) = lib_context_locked.as_ref() {\n lib_context.remove_flow_context(&flow_name)\n }\n}\n\n#[pyfunction]\nfn add_auth_entry(key: String, value: Pythonized) -> PyResult<()> {\n get_auth_registry()\n .add(key, value.into_inner())\n .into_py_result()?;\n Ok(())\n}\n\n#[pyfunction]\nfn seder_roundtrip<'py>(\n py: Python<'py>,\n value: Bound<'py, PyAny>,\n typ: Pythonized,\n) -> PyResult> {\n let typ = typ.into_inner();\n let value = value_from_py_object(&typ, &value)?;\n let value = value::test_util::seder_roundtrip(&value, &typ).into_py_result()?;\n value_to_py_object(py, &value)\n}\n\n/// A Python module implemented in Rust.\n#[pymodule]\n#[pyo3(name = \"_engine\")]\nfn cocoindex_engine(m: &Bound<'_, PyModule>) -> PyResult<()> {\n m.add_function(wrap_pyfunction!(init, m)?)?;\n m.add_function(wrap_pyfunction!(start_server, m)?)?;\n m.add_function(wrap_pyfunction!(stop, m)?)?;\n m.add_function(wrap_pyfunction!(register_function_factory, m)?)?;\n m.add_function(wrap_pyfunction!(register_target_connector, m)?)?;\n m.add_function(wrap_pyfunction!(flow_names_with_setup_async, m)?)?;\n m.add_function(wrap_pyfunction!(make_setup_bundle, m)?)?;\n m.add_function(wrap_pyfunction!(make_drop_bundle, m)?)?;\n m.add_function(wrap_pyfunction!(remove_flow_context, m)?)?;\n m.add_function(wrap_pyfunction!(add_auth_entry, m)?)?;\n\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n\n let testutil_module = PyModule::new(m.py(), \"testutil\")?;\n testutil_module.add_function(wrap_pyfunction!(seder_roundtrip, &testutil_module)?)?;\n m.add_submodule(&testutil_module)?;\n\n Ok(())\n}\n"], ["/cocoindex/src/execution/source_indexer.rs", "use crate::{\n prelude::*,\n service::error::{SharedError, SharedResult, SharedResultExt},\n};\n\nuse futures::future::Ready;\nuse sqlx::PgPool;\nuse std::collections::{HashMap, hash_map};\nuse tokio::{sync::Semaphore, task::JoinSet};\n\nuse super::{\n db_tracking,\n evaluator::SourceRowEvaluationContext,\n row_indexer::{self, SkippedOr, SourceVersion},\n stats,\n};\n\nuse crate::ops::interface;\nstruct SourceRowIndexingState {\n source_version: SourceVersion,\n processing_sem: Arc,\n touched_generation: usize,\n}\n\nimpl Default for SourceRowIndexingState {\n fn default() -> Self {\n Self {\n source_version: SourceVersion::default(),\n processing_sem: Arc::new(Semaphore::new(1)),\n touched_generation: 0,\n }\n }\n}\n\nstruct SourceIndexingState {\n rows: HashMap,\n scan_generation: usize,\n}\n\npub struct SourceIndexingContext {\n flow: Arc,\n source_idx: usize,\n pending_update: Mutex>>>>,\n update_sem: Semaphore,\n state: Mutex,\n setup_execution_ctx: Arc,\n}\n\npub const NO_ACK: Option Ready>> = None;\n\nimpl SourceIndexingContext {\n pub async fn load(\n flow: Arc,\n source_idx: usize,\n setup_execution_ctx: Arc,\n pool: &PgPool,\n ) -> Result {\n let plan = flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[source_idx];\n let mut list_state = db_tracking::ListTrackedSourceKeyMetadataState::new();\n let mut rows = HashMap::new();\n let scan_generation = 0;\n {\n let mut key_metadata_stream = list_state.list(\n setup_execution_ctx.import_ops[source_idx].source_id,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n );\n while let Some(key_metadata) = key_metadata_stream.next().await {\n let key_metadata = key_metadata?;\n let source_key = value::Value::::from_json(\n key_metadata.source_key,\n &import_op.primary_key_type,\n )?\n .into_key()?;\n rows.insert(\n source_key,\n SourceRowIndexingState {\n source_version: SourceVersion::from_stored(\n key_metadata.processed_source_ordinal,\n &key_metadata.process_logic_fingerprint,\n plan.logic_fingerprint,\n ),\n processing_sem: Arc::new(Semaphore::new(1)),\n touched_generation: scan_generation,\n },\n );\n }\n }\n Ok(Self {\n flow,\n source_idx,\n state: Mutex::new(SourceIndexingState {\n rows,\n scan_generation,\n }),\n pending_update: Mutex::new(None),\n update_sem: Semaphore::new(1),\n setup_execution_ctx,\n })\n }\n\n pub async fn process_source_key<\n AckFut: Future> + Send + 'static,\n AckFn: FnOnce() -> AckFut,\n >(\n self: Arc,\n key: value::KeyValue,\n source_data: Option,\n update_stats: Arc,\n _concur_permit: concur_control::CombinedConcurrencyControllerPermit,\n ack_fn: Option,\n pool: PgPool,\n ) {\n let process = async {\n let plan = self.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[self.source_idx];\n let schema = &self.flow.data_schema;\n let source_data = match source_data {\n Some(source_data) => source_data,\n None => import_op\n .executor\n .get_value(\n &key,\n &interface::SourceExecutorGetOptions {\n include_value: true,\n include_ordinal: true,\n },\n )\n .await?\n .try_into()?,\n };\n\n let source_version = SourceVersion::from_current_data(&source_data);\n let processing_sem = {\n let mut state = self.state.lock().unwrap();\n let touched_generation = state.scan_generation;\n match state.rows.entry(key.clone()) {\n hash_map::Entry::Occupied(mut entry) => {\n if entry\n .get()\n .source_version\n .should_skip(&source_version, Some(update_stats.as_ref()))\n {\n return anyhow::Ok(());\n }\n let sem = entry.get().processing_sem.clone();\n if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n entry.remove();\n } else {\n entry.get_mut().source_version = source_version.clone();\n }\n sem\n }\n hash_map::Entry::Vacant(entry) => {\n if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n update_stats.num_no_change.inc(1);\n return anyhow::Ok(());\n }\n let new_entry = SourceRowIndexingState {\n source_version: source_version.clone(),\n touched_generation,\n ..Default::default()\n };\n let sem = new_entry.processing_sem.clone();\n entry.insert(new_entry);\n sem\n }\n }\n };\n\n let _processing_permit = processing_sem.acquire().await?;\n let result = row_indexer::update_source_row(\n &SourceRowEvaluationContext {\n plan: &plan,\n import_op,\n schema,\n key: &key,\n import_op_idx: self.source_idx,\n },\n &self.setup_execution_ctx,\n source_data.value,\n &source_version,\n &pool,\n &update_stats,\n )\n .await?;\n let target_source_version = if let SkippedOr::Skipped(existing_source_version) = result\n {\n Some(existing_source_version)\n } else if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n Some(source_version)\n } else {\n None\n };\n if let Some(target_source_version) = target_source_version {\n let mut state = self.state.lock().unwrap();\n let scan_generation = state.scan_generation;\n let entry = state.rows.entry(key.clone());\n match entry {\n hash_map::Entry::Occupied(mut entry) => {\n if !entry\n .get()\n .source_version\n .should_skip(&target_source_version, None)\n {\n if target_source_version.kind\n == row_indexer::SourceVersionKind::NonExistence\n {\n entry.remove();\n } else {\n let mut_entry = entry.get_mut();\n mut_entry.source_version = target_source_version;\n mut_entry.touched_generation = scan_generation;\n }\n }\n }\n hash_map::Entry::Vacant(entry) => {\n if target_source_version.kind\n != row_indexer::SourceVersionKind::NonExistence\n {\n entry.insert(SourceRowIndexingState {\n source_version: target_source_version,\n touched_generation: scan_generation,\n ..Default::default()\n });\n }\n }\n }\n }\n anyhow::Ok(())\n };\n let process_and_ack = async {\n process.await?;\n if let Some(ack_fn) = ack_fn {\n ack_fn().await?;\n }\n anyhow::Ok(())\n };\n if let Err(e) = process_and_ack.await {\n update_stats.num_errors.inc(1);\n error!(\n \"{:?}\",\n e.context(format!(\n \"Error in processing row from source `{source}` with key: {key}\",\n source = self.flow.flow_instance.import_ops[self.source_idx].name\n ))\n );\n }\n }\n\n pub async fn update(\n self: &Arc,\n pool: &PgPool,\n update_stats: &Arc,\n ) -> Result<()> {\n let pending_update_fut = {\n let mut pending_update = self.pending_update.lock().unwrap();\n if let Some(pending_update_fut) = &*pending_update {\n pending_update_fut.clone()\n } else {\n let slf = self.clone();\n let pool = pool.clone();\n let update_stats = update_stats.clone();\n let task = tokio::spawn(async move {\n {\n let _permit = slf.update_sem.acquire().await?;\n {\n let mut pending_update = slf.pending_update.lock().unwrap();\n *pending_update = None;\n }\n slf.update_once(&pool, &update_stats).await?;\n }\n anyhow::Ok(())\n });\n let pending_update_fut = async move {\n task.await\n .map_err(SharedError::from)?\n .map_err(SharedError::new)\n }\n .boxed()\n .shared();\n *pending_update = Some(pending_update_fut.clone());\n pending_update_fut\n }\n };\n pending_update_fut.await.std_result()?;\n Ok(())\n }\n\n async fn update_once(\n self: &Arc,\n pool: &PgPool,\n update_stats: &Arc,\n ) -> Result<()> {\n let plan = self.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[self.source_idx];\n let mut rows_stream = import_op\n .executor\n .list(&interface::SourceExecutorListOptions {\n include_ordinal: true,\n });\n let mut join_set = JoinSet::new();\n let scan_generation = {\n let mut state = self.state.lock().unwrap();\n state.scan_generation += 1;\n state.scan_generation\n };\n while let Some(row) = rows_stream.next().await {\n for row in row? {\n let source_version = SourceVersion::from_current_with_ordinal(\n row.ordinal\n .ok_or_else(|| anyhow::anyhow!(\"ordinal is not available\"))?,\n );\n {\n let mut state = self.state.lock().unwrap();\n let scan_generation = state.scan_generation;\n let row_state = state.rows.entry(row.key.clone()).or_default();\n row_state.touched_generation = scan_generation;\n if row_state\n .source_version\n .should_skip(&source_version, Some(update_stats.as_ref()))\n {\n continue;\n }\n }\n let concur_permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n join_set.spawn(self.clone().process_source_key(\n row.key,\n None,\n update_stats.clone(),\n concur_permit,\n NO_ACK,\n pool.clone(),\n ));\n }\n }\n while let Some(result) = join_set.join_next().await {\n if let Err(e) = result {\n if !e.is_cancelled() {\n error!(\"{e:?}\");\n }\n }\n }\n\n let deleted_key_versions = {\n let mut deleted_key_versions = Vec::new();\n let state = self.state.lock().unwrap();\n for (key, row_state) in state.rows.iter() {\n if row_state.touched_generation < scan_generation {\n deleted_key_versions.push((key.clone(), row_state.source_version.ordinal));\n }\n }\n deleted_key_versions\n };\n for (key, source_ordinal) in deleted_key_versions {\n // If the source ordinal is unavailable, call without source ordinal so that another polling will be triggered to avoid out-of-order.\n let source_data = source_ordinal\n .is_available()\n .then(|| interface::SourceData {\n value: interface::SourceValue::NonExistence,\n ordinal: source_ordinal,\n });\n let concur_permit = import_op.concurrency_controller.acquire(Some(|| 0)).await?;\n join_set.spawn(self.clone().process_source_key(\n key,\n source_data,\n update_stats.clone(),\n concur_permit,\n NO_ACK,\n pool.clone(),\n ));\n }\n while let Some(result) = join_set.join_next().await {\n if let Err(e) = result {\n if !e.is_cancelled() {\n error!(\"{e:?}\");\n }\n }\n }\n\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/sources/google_drive.rs", "use chrono::Duration;\nuse google_drive3::{\n DriveHub,\n api::{File, Scope},\n yup_oauth2::{ServiceAccountAuthenticator, read_service_account_key},\n};\nuse http_body_util::BodyExt;\nuse hyper_rustls::HttpsConnector;\nuse hyper_util::client::legacy::connect::HttpConnector;\nuse phf::phf_map;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\nstruct ExportMimeType {\n text: &'static str,\n binary: &'static str,\n}\n\nconst FOLDER_MIME_TYPE: &str = \"application/vnd.google-apps.folder\";\nconst FILE_MIME_TYPE: &str = \"application/vnd.google-apps.file\";\nstatic EXPORT_MIME_TYPES: phf::Map<&'static str, ExportMimeType> = phf_map! {\n \"application/vnd.google-apps.document\" =>\n ExportMimeType {\n text: \"text/markdown\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.spreadsheet\" =>\n ExportMimeType {\n text: \"text/csv\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.presentation\" =>\n ExportMimeType {\n text: \"text/plain\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.drawing\" =>\n ExportMimeType {\n text: \"image/svg+xml\",\n binary: \"image/png\",\n },\n \"application/vnd.google-apps.script\" =>\n ExportMimeType {\n text: \"application/vnd.google-apps.script+json\",\n binary: \"application/vnd.google-apps.script+json\",\n },\n};\n\nfn is_supported_file_type(mime_type: &str) -> bool {\n !mime_type.starts_with(\"application/vnd.google-apps.\")\n || EXPORT_MIME_TYPES.contains_key(mime_type)\n || mime_type == FILE_MIME_TYPE\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n service_account_credential_path: String,\n binary: bool,\n root_folder_ids: Vec,\n recent_changes_poll_interval: Option,\n}\n\nstruct Executor {\n drive_hub: DriveHub>,\n binary: bool,\n root_folder_ids: IndexSet>,\n recent_updates_poll_interval: Option,\n}\n\nimpl Executor {\n async fn new(spec: Spec) -> Result {\n let service_account_key =\n read_service_account_key(spec.service_account_credential_path).await?;\n let auth = ServiceAccountAuthenticator::builder(service_account_key)\n .build()\n .await?;\n let client =\n hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new())\n .build(\n hyper_rustls::HttpsConnectorBuilder::new()\n .with_provider_and_native_roots(\n rustls::crypto::aws_lc_rs::default_provider(),\n )?\n .https_only()\n .enable_http2()\n .build(),\n );\n let drive_hub = DriveHub::new(client, auth);\n Ok(Self {\n drive_hub,\n binary: spec.binary,\n root_folder_ids: spec.root_folder_ids.into_iter().map(Arc::from).collect(),\n recent_updates_poll_interval: spec.recent_changes_poll_interval,\n })\n }\n}\n\nfn escape_string(s: &str) -> String {\n let mut escaped = String::with_capacity(s.len());\n for c in s.chars() {\n match c {\n '\\'' | '\\\\' => escaped.push('\\\\'),\n _ => {}\n }\n escaped.push(c);\n }\n escaped\n}\n\nconst CUTOFF_TIME_BUFFER: Duration = Duration::seconds(1);\nimpl Executor {\n fn visit_file(\n &self,\n file: File,\n new_folder_ids: &mut Vec>,\n seen_ids: &mut HashSet>,\n ) -> Result> {\n if file.trashed == Some(true) {\n return Ok(None);\n }\n let (id, mime_type) = match (file.id, file.mime_type) {\n (Some(id), Some(mime_type)) => (Arc::::from(id), mime_type),\n (id, mime_type) => {\n warn!(\"Skipping file with incomplete metadata: id={id:?}, mime_type={mime_type:?}\",);\n return Ok(None);\n }\n };\n if !seen_ids.insert(id.clone()) {\n return Ok(None);\n }\n let result = if mime_type == FOLDER_MIME_TYPE {\n new_folder_ids.push(id);\n None\n } else if is_supported_file_type(&mime_type) {\n Some(PartialSourceRowMetadata {\n key: KeyValue::Str(id),\n ordinal: file.modified_time.map(|t| t.try_into()).transpose()?,\n })\n } else {\n None\n };\n Ok(result)\n }\n\n async fn list_files(\n &self,\n folder_id: &str,\n fields: &str,\n next_page_token: &mut Option,\n ) -> Result> {\n let query = format!(\"'{}' in parents\", escape_string(folder_id));\n let mut list_call = self\n .drive_hub\n .files()\n .list()\n .add_scope(Scope::Readonly)\n .q(&query)\n .param(\"fields\", fields);\n if let Some(next_page_token) = &next_page_token {\n list_call = list_call.page_token(next_page_token);\n }\n let (_, files) = list_call.doit().await?;\n *next_page_token = files.next_page_token;\n let file_iter = files.files.into_iter().flat_map(|file| file.into_iter());\n Ok(file_iter)\n }\n\n fn make_cutoff_time(\n most_recent_modified_time: Option>,\n list_start_time: DateTime,\n ) -> DateTime {\n let safe_upperbound = list_start_time - CUTOFF_TIME_BUFFER;\n most_recent_modified_time\n .map(|t| t.min(safe_upperbound))\n .unwrap_or(safe_upperbound)\n }\n\n async fn get_recent_updates(\n &self,\n cutoff_time: &mut DateTime,\n ) -> Result {\n let mut page_size: i32 = 10;\n let mut next_page_token: Option = None;\n let mut changes = Vec::new();\n let mut most_recent_modified_time = None;\n let start_time = Utc::now();\n 'paginate: loop {\n let mut list_call = self\n .drive_hub\n .files()\n .list()\n .add_scope(Scope::Readonly)\n .param(\"fields\", \"files(id,modifiedTime,parents,trashed)\")\n .order_by(\"modifiedTime desc\")\n .page_size(page_size);\n if let Some(token) = next_page_token {\n list_call = list_call.page_token(token.as_str());\n }\n let (_, files) = list_call.doit().await?;\n for file in files.files.into_iter().flat_map(|files| files.into_iter()) {\n let modified_time = file.modified_time.unwrap_or_default();\n if most_recent_modified_time.is_none() {\n most_recent_modified_time = Some(modified_time);\n }\n if modified_time <= *cutoff_time {\n break 'paginate;\n }\n let file_id = file.id.ok_or_else(|| anyhow!(\"File has no id\"))?;\n if self.is_file_covered(&file_id).await? {\n changes.push(SourceChange {\n key: KeyValue::Str(Arc::from(file_id)),\n data: None,\n });\n }\n }\n if let Some(token) = files.next_page_token {\n next_page_token = Some(token);\n } else {\n break;\n }\n // List more in a page since 2nd.\n page_size = 100;\n }\n *cutoff_time = Self::make_cutoff_time(most_recent_modified_time, start_time);\n Ok(SourceChangeMessage {\n changes,\n ack_fn: None,\n })\n }\n\n async fn is_file_covered(&self, file_id: &str) -> Result {\n let mut next_file_id = Some(Cow::Borrowed(file_id));\n while let Some(file_id) = next_file_id {\n if self.root_folder_ids.contains(file_id.as_ref()) {\n return Ok(true);\n }\n let (_, file) = self\n .drive_hub\n .files()\n .get(&file_id)\n .add_scope(Scope::Readonly)\n .param(\"fields\", \"parents\")\n .doit()\n .await?;\n next_file_id = file\n .parents\n .into_iter()\n .flat_map(|parents| parents.into_iter())\n .map(Cow::Owned)\n .next();\n }\n Ok(false)\n }\n}\n\ntrait ResultExt {\n type OptResult;\n fn or_not_found(self) -> Self::OptResult;\n}\n\nimpl ResultExt for google_drive3::Result {\n type OptResult = google_drive3::Result>;\n\n fn or_not_found(self) -> Self::OptResult {\n match self {\n Ok(value) => Ok(Some(value)),\n Err(google_drive3::Error::BadRequest(err_msg))\n if err_msg\n .get(\"error\")\n .and_then(|e| e.get(\"code\"))\n .and_then(|code| code.as_i64())\n == Some(404) =>\n {\n Ok(None)\n }\n Err(e) => Err(e),\n }\n }\n}\n\nfn optional_modified_time(include_ordinal: bool) -> &'static str {\n if include_ordinal { \",modifiedTime\" } else { \"\" }\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n let mut seen_ids = HashSet::new();\n let mut folder_ids = self.root_folder_ids.clone();\n let fields = format!(\n \"files(id,name,mimeType,trashed{})\",\n optional_modified_time(options.include_ordinal)\n );\n let mut new_folder_ids = Vec::new();\n try_stream! {\n while let Some(folder_id) = folder_ids.pop() {\n let mut next_page_token = None;\n loop {\n let mut curr_rows = Vec::new();\n let files = self\n .list_files(&folder_id, &fields, &mut next_page_token)\n .await?;\n for file in files {\n curr_rows.extend(self.visit_file(file, &mut new_folder_ids, &mut seen_ids)?);\n }\n if !curr_rows.is_empty() {\n yield curr_rows;\n }\n if next_page_token.is_none() {\n break;\n }\n }\n folder_ids.extend(new_folder_ids.drain(..).rev());\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let file_id = key.str_value()?;\n let fields = format!(\n \"id,name,mimeType,trashed{}\",\n optional_modified_time(options.include_ordinal)\n );\n let resp = self\n .drive_hub\n .files()\n .get(file_id)\n .add_scope(Scope::Readonly)\n .param(\"fields\", &fields)\n .doit()\n .await\n .or_not_found()?;\n let file = match resp {\n Some((_, file)) if file.trashed != Some(true) => file,\n _ => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n };\n let ordinal = if options.include_ordinal {\n file.modified_time.map(|t| t.try_into()).transpose()?\n } else {\n None\n };\n let type_n_body = if let Some(export_mime_type) = file\n .mime_type\n .as_ref()\n .and_then(|mime_type| EXPORT_MIME_TYPES.get(mime_type.as_str()))\n {\n let target_mime_type = if self.binary {\n export_mime_type.binary\n } else {\n export_mime_type.text\n };\n self.drive_hub\n .files()\n .export(file_id, target_mime_type)\n .add_scope(Scope::Readonly)\n .doit()\n .await\n .or_not_found()?\n .map(|content| (Some(target_mime_type.to_string()), content.into_body()))\n } else {\n self.drive_hub\n .files()\n .get(file_id)\n .add_scope(Scope::Readonly)\n .param(\"alt\", \"media\")\n .doit()\n .await\n .or_not_found()?\n .map(|(resp, _)| (file.mime_type, resp.into_body()))\n };\n let value = match type_n_body {\n Some((mime_type, resp_body)) => {\n let content = resp_body.collect().await?;\n\n let fields = vec![\n file.name.unwrap_or_default().into(),\n mime_type.into(),\n if self.binary {\n content.to_bytes().to_vec().into()\n } else {\n String::from_utf8_lossy(&content.to_bytes())\n .to_string()\n .into()\n },\n ];\n Some(SourceValue::Existence(FieldValues { fields }))\n }\n None => None,\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n let poll_interval = if let Some(poll_interval) = self.recent_updates_poll_interval {\n poll_interval\n } else {\n return Ok(None);\n };\n let mut cutoff_time = Utc::now() - CUTOFF_TIME_BUFFER;\n let mut interval = tokio::time::interval(poll_interval);\n interval.tick().await;\n let stream = stream! {\n loop {\n interval.tick().await;\n yield self.get_recent_updates(&mut cutoff_time).await;\n }\n };\n Ok(Some(stream.boxed()))\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"GoogleDrive\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n schema_builder.add_field(FieldSchema::new(\n \"file_id\",\n make_output_type(BasicValueType::Str),\n ));\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n let mime_type_field = schema_builder.add_field(FieldSchema::new(\n \"mime_type\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n )\n .with_attr(\n field_attrs::CONTENT_MIME_TYPE,\n serde_json::to_value(mime_type_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(spec).await?))\n }\n}\n"], ["/cocoindex/src/ops/interface.rs", "use std::time::SystemTime;\n\nuse crate::base::{schema::*, spec::IndexOptions, value::*};\nuse crate::prelude::*;\nuse crate::setup;\nuse chrono::TimeZone;\nuse serde::Serialize;\n\npub struct FlowInstanceContext {\n pub flow_instance_name: String,\n pub auth_registry: Arc,\n pub py_exec_ctx: Option>,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Default)]\npub struct Ordinal(pub Option);\n\nimpl Ordinal {\n pub fn unavailable() -> Self {\n Self(None)\n }\n\n pub fn is_available(&self) -> bool {\n self.0.is_some()\n }\n}\n\nimpl From for Option {\n fn from(val: Ordinal) -> Self {\n val.0\n }\n}\n\nimpl TryFrom for Ordinal {\n type Error = anyhow::Error;\n\n fn try_from(time: SystemTime) -> Result {\n let duration = time.duration_since(std::time::UNIX_EPOCH)?;\n Ok(Ordinal(Some(duration.as_micros().try_into()?)))\n }\n}\n\nimpl TryFrom> for Ordinal {\n type Error = anyhow::Error;\n\n fn try_from(time: chrono::DateTime) -> Result {\n Ok(Ordinal(Some(time.timestamp_micros())))\n }\n}\n\npub struct PartialSourceRowMetadata {\n pub key: KeyValue,\n pub ordinal: Option,\n}\n\n#[derive(Debug)]\npub enum SourceValue {\n Existence(FieldValues),\n NonExistence,\n}\n\nimpl SourceValue {\n pub fn is_existent(&self) -> bool {\n matches!(self, Self::Existence(_))\n }\n\n pub fn as_optional(&self) -> Option<&FieldValues> {\n match self {\n Self::Existence(value) => Some(value),\n Self::NonExistence => None,\n }\n }\n\n pub fn into_optional(self) -> Option {\n match self {\n Self::Existence(value) => Some(value),\n Self::NonExistence => None,\n }\n }\n}\n\npub struct SourceData {\n pub value: SourceValue,\n pub ordinal: Ordinal,\n}\n\npub struct SourceChange {\n pub key: KeyValue,\n\n /// If None, the engine will poll to get the latest existence state and value.\n pub data: Option,\n}\n\npub struct SourceChangeMessage {\n pub changes: Vec,\n pub ack_fn: Option BoxFuture<'static, Result<()>> + Send + Sync>>,\n}\n\n#[derive(Debug, Default)]\npub struct SourceExecutorListOptions {\n pub include_ordinal: bool,\n}\n\n#[derive(Debug, Default)]\npub struct SourceExecutorGetOptions {\n pub include_ordinal: bool,\n pub include_value: bool,\n}\n\n#[derive(Debug)]\npub struct PartialSourceRowData {\n pub value: Option,\n pub ordinal: Option,\n}\n\nimpl TryFrom for SourceData {\n type Error = anyhow::Error;\n\n fn try_from(data: PartialSourceRowData) -> Result {\n Ok(Self {\n value: data\n .value\n .ok_or_else(|| anyhow::anyhow!(\"value is missing\"))?,\n ordinal: data\n .ordinal\n .ok_or_else(|| anyhow::anyhow!(\"ordinal is missing\"))?,\n })\n }\n}\n#[async_trait]\npub trait SourceExecutor: Send + Sync {\n /// Get the list of keys for the source.\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>>;\n\n // Get the value for the given key.\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result;\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n Ok(None)\n }\n}\n\n#[async_trait]\npub trait SourceFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )>;\n}\n\n#[async_trait]\npub trait SimpleFunctionExecutor: Send + Sync {\n /// Evaluate the operation.\n async fn evaluate(&self, args: Vec) -> Result;\n\n fn enable_cache(&self) -> bool {\n false\n }\n\n /// Must be Some if `enable_cache` is true.\n /// If it changes, the cache will be invalidated.\n fn behavior_version(&self) -> Option {\n None\n }\n}\n\n#[async_trait]\npub trait SimpleFunctionFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )>;\n}\n\n#[derive(Debug)]\npub struct ExportTargetUpsertEntry {\n pub key: KeyValue,\n pub additional_key: serde_json::Value,\n pub value: FieldValues,\n}\n\n#[derive(Debug)]\npub struct ExportTargetDeleteEntry {\n pub key: KeyValue,\n pub additional_key: serde_json::Value,\n}\n\n#[derive(Debug, Default)]\npub struct ExportTargetMutation {\n pub upserts: Vec,\n pub deletes: Vec,\n}\n\nimpl ExportTargetMutation {\n pub fn is_empty(&self) -> bool {\n self.upserts.is_empty() && self.deletes.is_empty()\n }\n}\n\n#[derive(Debug)]\npub struct ExportTargetMutationWithContext<'ctx, T: ?Sized + Send + Sync> {\n pub mutation: ExportTargetMutation,\n pub export_context: &'ctx T,\n}\n\npub struct ResourceSetupChangeItem<'a> {\n pub key: &'a serde_json::Value,\n pub setup_status: &'a dyn setup::ResourceSetupStatus,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum SetupStateCompatibility {\n /// The resource is fully compatible with the desired state.\n /// This means the resource can be updated to the desired state without any loss of data.\n Compatible,\n /// The resource is partially compatible with the desired state.\n /// This means data from some existing fields will be lost after applying the setup change.\n /// But at least their key fields of all rows are still preserved.\n PartialCompatible,\n /// The resource needs to be rebuilt. After applying the setup change, all data will be gone.\n NotCompatible,\n}\n\npub struct ExportDataCollectionBuildOutput {\n pub export_context: BoxFuture<'static, Result>>,\n pub setup_key: serde_json::Value,\n pub desired_setup_state: serde_json::Value,\n}\n\npub struct ExportDataCollectionSpec {\n pub name: String,\n pub spec: serde_json::Value,\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n pub index_options: IndexOptions,\n}\n\n#[async_trait]\npub trait ExportTargetFactory: Send + Sync {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )>;\n\n /// Will not be called if it's setup by user.\n /// It returns an error if the target only supports setup by user.\n async fn check_setup_status(\n &self,\n key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n context: Arc,\n ) -> Result>;\n\n /// Normalize the key. e.g. the JSON format may change (after code change, e.g. new optional field or field ordering), even if the underlying value is not changed.\n /// This should always return the canonical serialized form.\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result;\n\n fn check_state_compatibility(\n &self,\n desired_state: &serde_json::Value,\n existing_state: &serde_json::Value,\n ) -> Result;\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result;\n\n fn extract_additional_key(\n &self,\n key: &KeyValue,\n value: &FieldValues,\n export_context: &(dyn Any + Send + Sync),\n ) -> Result;\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()>;\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()>;\n}\n\n#[derive(Clone)]\npub enum ExecutorFactory {\n Source(Arc),\n SimpleFunction(Arc),\n ExportTarget(Arc),\n}\n"], ["/cocoindex/src/lib_context.rs", "use crate::prelude::*;\n\nuse crate::builder::AnalyzedFlow;\nuse crate::execution::source_indexer::SourceIndexingContext;\nuse crate::service::error::ApiError;\nuse crate::settings;\nuse crate::setup::ObjectSetupStatus;\nuse axum::http::StatusCode;\nuse sqlx::PgPool;\nuse sqlx::postgres::PgConnectOptions;\nuse tokio::runtime::Runtime;\n\npub struct FlowExecutionContext {\n pub setup_execution_context: Arc,\n pub setup_status: setup::FlowSetupStatus,\n source_indexing_contexts: Vec>>,\n}\n\nasync fn build_setup_context(\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n) -> Result<(\n Arc,\n setup::FlowSetupStatus,\n)> {\n let setup_execution_context = Arc::new(exec_ctx::build_flow_setup_execution_context(\n &analyzed_flow.flow_instance,\n &analyzed_flow.data_schema,\n &analyzed_flow.setup_state,\n existing_flow_ss,\n )?);\n\n let setup_status = setup::check_flow_setup_status(\n Some(&setup_execution_context.setup_state),\n existing_flow_ss,\n &analyzed_flow.flow_instance_ctx,\n )\n .await?;\n\n Ok((setup_execution_context, setup_status))\n}\n\nimpl FlowExecutionContext {\n async fn new(\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result {\n let (setup_execution_context, setup_status) =\n build_setup_context(analyzed_flow, existing_flow_ss).await?;\n\n let mut source_indexing_contexts = Vec::new();\n source_indexing_contexts.resize_with(analyzed_flow.flow_instance.import_ops.len(), || {\n tokio::sync::OnceCell::new()\n });\n\n Ok(Self {\n setup_execution_context,\n setup_status,\n source_indexing_contexts,\n })\n }\n\n pub async fn update_setup_state(\n &mut self,\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result<()> {\n let (setup_execution_context, setup_status) =\n build_setup_context(analyzed_flow, existing_flow_ss).await?;\n\n self.setup_execution_context = setup_execution_context;\n self.setup_status = setup_status;\n Ok(())\n }\n\n pub async fn get_source_indexing_context(\n &self,\n flow: &Arc,\n source_idx: usize,\n pool: &PgPool,\n ) -> Result<&Arc> {\n self.source_indexing_contexts[source_idx]\n .get_or_try_init(|| async move {\n anyhow::Ok(Arc::new(\n SourceIndexingContext::load(\n flow.clone(),\n source_idx,\n self.setup_execution_context.clone(),\n pool,\n )\n .await?,\n ))\n })\n .await\n }\n}\n\npub struct FlowContext {\n pub flow: Arc,\n execution_ctx: Arc>,\n}\n\nimpl FlowContext {\n pub fn flow_name(&self) -> &str {\n &self.flow.flow_instance.name\n }\n\n pub async fn new(\n flow: Arc,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result {\n let execution_ctx = Arc::new(tokio::sync::RwLock::new(\n FlowExecutionContext::new(&flow, existing_flow_ss).await?,\n ));\n Ok(Self {\n flow,\n execution_ctx,\n })\n }\n\n pub async fn use_execution_ctx(\n &self,\n ) -> Result> {\n let execution_ctx = self.execution_ctx.read().await;\n if !execution_ctx.setup_status.is_up_to_date() {\n api_bail!(\n \"Setup for flow `{}` is not up-to-date. Please run `cocoindex setup` to update the setup.\",\n self.flow_name()\n );\n }\n Ok(execution_ctx)\n }\n\n pub async fn use_owned_execution_ctx(\n &self,\n ) -> Result> {\n let execution_ctx = self.execution_ctx.clone().read_owned().await;\n if !execution_ctx.setup_status.is_up_to_date() {\n api_bail!(\n \"Setup for flow `{}` is not up-to-date. Please run `cocoindex setup` to update the setup.\",\n self.flow_name()\n );\n }\n Ok(execution_ctx)\n }\n\n pub fn get_execution_ctx_for_setup(&self) -> &tokio::sync::RwLock {\n &self.execution_ctx\n }\n}\n\nstatic TOKIO_RUNTIME: LazyLock = LazyLock::new(|| Runtime::new().unwrap());\nstatic AUTH_REGISTRY: LazyLock> = LazyLock::new(|| Arc::new(AuthRegistry::new()));\n\ntype PoolKey = (String, Option);\ntype PoolValue = Arc>;\n\n#[derive(Default)]\npub struct DbPools {\n pub pools: Mutex>,\n}\n\nimpl DbPools {\n pub async fn get_pool(&self, conn_spec: &settings::DatabaseConnectionSpec) -> Result {\n let db_pool_cell = {\n let key = (conn_spec.url.clone(), conn_spec.user.clone());\n let mut db_pools = self.pools.lock().unwrap();\n db_pools.entry(key).or_default().clone()\n };\n let pool = db_pool_cell\n .get_or_try_init(|| async move {\n let mut pg_options: PgConnectOptions = conn_spec.url.parse()?;\n if let Some(user) = &conn_spec.user {\n pg_options = pg_options.username(user);\n }\n if let Some(password) = &conn_spec.password {\n pg_options = pg_options.password(password);\n }\n let pool = PgPool::connect_with(pg_options)\n .await\n .context(\"Failed to connect to database\")?;\n anyhow::Ok(pool)\n })\n .await?;\n Ok(pool.clone())\n }\n}\n\npub struct LibSetupContext {\n pub all_setup_states: setup::AllSetupStates,\n pub global_setup_status: setup::GlobalSetupStatus,\n}\npub struct PersistenceContext {\n pub builtin_db_pool: PgPool,\n pub setup_ctx: tokio::sync::RwLock,\n}\n\npub struct LibContext {\n pub db_pools: DbPools,\n pub persistence_ctx: Option,\n pub flows: Mutex>>,\n\n pub global_concurrency_controller: Arc,\n}\n\nimpl LibContext {\n pub fn get_flow_context(&self, flow_name: &str) -> Result> {\n let flows = self.flows.lock().unwrap();\n let flow_ctx = flows\n .get(flow_name)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"Flow instance not found: {flow_name}\"),\n StatusCode::NOT_FOUND,\n )\n })?\n .clone();\n Ok(flow_ctx)\n }\n\n pub fn remove_flow_context(&self, flow_name: &str) {\n let mut flows = self.flows.lock().unwrap();\n flows.remove(flow_name);\n }\n\n pub fn require_persistence_ctx(&self) -> Result<&PersistenceContext> {\n self.persistence_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Database is required for this operation. Please set COCOINDEX_DATABASE_URL environment variable and call cocoindex.init() with database settings.\"))\n }\n\n pub fn require_builtin_db_pool(&self) -> Result<&PgPool> {\n Ok(&self.require_persistence_ctx()?.builtin_db_pool)\n }\n}\n\npub fn get_runtime() -> &'static Runtime {\n &TOKIO_RUNTIME\n}\n\npub fn get_auth_registry() -> &'static Arc {\n &AUTH_REGISTRY\n}\n\nstatic LIB_INIT: OnceLock<()> = OnceLock::new();\npub fn create_lib_context(settings: settings::Settings) -> Result {\n LIB_INIT.get_or_init(|| {\n let _ = env_logger::try_init();\n\n pyo3_async_runtimes::tokio::init_with_runtime(get_runtime()).unwrap();\n\n let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();\n });\n\n let db_pools = DbPools::default();\n let persistence_ctx = if let Some(database_spec) = &settings.database {\n let (pool, all_setup_states) = get_runtime().block_on(async {\n let pool = db_pools.get_pool(database_spec).await?;\n let existing_ss = setup::get_existing_setup_state(&pool).await?;\n anyhow::Ok((pool, existing_ss))\n })?;\n Some(PersistenceContext {\n builtin_db_pool: pool,\n setup_ctx: tokio::sync::RwLock::new(LibSetupContext {\n global_setup_status: setup::GlobalSetupStatus::from_setup_states(&all_setup_states),\n all_setup_states,\n }),\n })\n } else {\n // No database configured\n None\n };\n\n Ok(LibContext {\n db_pools,\n persistence_ctx,\n flows: Mutex::new(BTreeMap::new()),\n global_concurrency_controller: Arc::new(concur_control::ConcurrencyController::new(\n &concur_control::Options {\n max_inflight_rows: settings.global_execution_options.source_max_inflight_rows,\n max_inflight_bytes: settings.global_execution_options.source_max_inflight_bytes,\n },\n )),\n })\n}\n\npub static LIB_CONTEXT: RwLock>> = RwLock::new(None);\n\npub(crate) fn init_lib_context(settings: settings::Settings) -> Result<()> {\n let mut lib_context_locked = LIB_CONTEXT.write().unwrap();\n *lib_context_locked = Some(Arc::new(create_lib_context(settings)?));\n Ok(())\n}\n\npub(crate) fn get_lib_context() -> Result> {\n let lib_context_locked = LIB_CONTEXT.read().unwrap();\n lib_context_locked\n .as_ref()\n .cloned()\n .ok_or_else(|| anyhow!(\"CocoIndex library is not initialized or already stopped\"))\n}\n\npub(crate) fn clear_lib_context() {\n let mut lib_context_locked = LIB_CONTEXT.write().unwrap();\n *lib_context_locked = None;\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_db_pools_default() {\n let db_pools = DbPools::default();\n assert!(db_pools.pools.lock().unwrap().is_empty());\n }\n\n #[test]\n fn test_lib_context_without_database() {\n let lib_context = create_lib_context(settings::Settings::default()).unwrap();\n assert!(lib_context.persistence_ctx.is_none());\n assert!(lib_context.require_builtin_db_pool().is_err());\n }\n\n #[test]\n fn test_persistence_context_type_safety() {\n // This test ensures that PersistenceContext groups related fields together\n let settings = settings::Settings {\n database: Some(settings::DatabaseConnectionSpec {\n url: \"postgresql://test\".to_string(),\n user: None,\n password: None,\n }),\n ..Default::default()\n };\n\n // This would fail at runtime due to invalid connection, but we're testing the structure\n let result = create_lib_context(settings);\n // We expect this to fail due to invalid connection, but the structure should be correct\n assert!(result.is_err());\n }\n}\n"], ["/cocoindex/src/ops/functions/split_recursively.rs", "use anyhow::anyhow;\nuse log::{error, trace};\nuse regex::{Matches, Regex};\nuse std::collections::HashSet;\nuse std::sync::LazyLock;\nuse std::{collections::HashMap, sync::Arc};\nuse unicase::UniCase;\n\nuse crate::base::field_attrs;\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::{fields_value, ops::sdk::*};\n\n#[derive(Deserialize)]\nstruct CustomLanguageSpec {\n language_name: String,\n #[serde(default)]\n aliases: Vec,\n separators_regex: Vec,\n}\n\n#[derive(Deserialize)]\nstruct Spec {\n #[serde(default)]\n custom_languages: Vec,\n}\n\nconst SYNTAX_LEVEL_GAP_COST: usize = 512;\nconst MISSING_OVERLAP_COST: usize = 512;\nconst PER_LINE_BREAK_LEVEL_GAP_COST: usize = 64;\nconst TOO_SMALL_CHUNK_COST: usize = 1048576;\n\npub struct Args {\n text: ResolvedOpArg,\n chunk_size: ResolvedOpArg,\n min_chunk_size: Option,\n chunk_overlap: Option,\n language: Option,\n}\n\nstruct SimpleLanguageConfig {\n name: String,\n aliases: Vec,\n separator_regex: Vec,\n}\n\nstatic DEFAULT_LANGUAGE_CONFIG: LazyLock =\n LazyLock::new(|| SimpleLanguageConfig {\n name: \"_DEFAULT\".to_string(),\n aliases: vec![],\n separator_regex: [r\"\\n\\n+\", r\"\\n\", r\"\\s+\"]\n .into_iter()\n .map(|s| Regex::new(s).unwrap())\n .collect(),\n });\n\nstruct TreesitterLanguageConfig {\n name: String,\n tree_sitter_lang: tree_sitter::Language,\n terminal_node_kind_ids: HashSet,\n}\n\nfn add_treesitter_language<'a>(\n output: &'a mut HashMap, Arc>,\n name: &'static str,\n aliases: impl IntoIterator,\n lang_fn: impl Into,\n terminal_node_kinds: impl IntoIterator,\n) {\n let tree_sitter_lang: tree_sitter::Language = lang_fn.into();\n let terminal_node_kind_ids = terminal_node_kinds\n .into_iter()\n .filter_map(|kind| {\n let id = tree_sitter_lang.id_for_node_kind(kind, true);\n if id != 0 {\n trace!(\"Got id for node kind: `{kind}` -> {id}\");\n Some(id)\n } else {\n error!(\"Failed in getting id for node kind: `{kind}`\");\n None\n }\n })\n .collect();\n\n let config = Arc::new(TreesitterLanguageConfig {\n name: name.to_string(),\n tree_sitter_lang,\n terminal_node_kind_ids,\n });\n for name in std::iter::once(name).chain(aliases.into_iter()) {\n if output.insert(name.into(), config.clone()).is_some() {\n panic!(\"Language `{name}` already exists\");\n }\n }\n}\n\nstatic TREE_SITTER_LANGUAGE_BY_LANG: LazyLock<\n HashMap, Arc>,\n> = LazyLock::new(|| {\n let mut map = HashMap::new();\n add_treesitter_language(&mut map, \"C\", [\".c\"], tree_sitter_c::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"C++\",\n [\".cpp\", \".cc\", \".cxx\", \".h\", \".hpp\", \"cpp\"],\n tree_sitter_c::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"C#\",\n [\".cs\", \"cs\", \"csharp\"],\n tree_sitter_c_sharp::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"CSS\",\n [\".css\", \".scss\"],\n tree_sitter_css::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Fortran\",\n [\".f\", \".f90\", \".f95\", \".f03\", \"f\", \"f90\", \"f95\", \"f03\"],\n tree_sitter_fortran::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Go\",\n [\".go\", \"golang\"],\n tree_sitter_go::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"HTML\",\n [\".html\", \".htm\"],\n tree_sitter_html::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"Java\", [\".java\"], tree_sitter_java::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"JavaScript\",\n [\".js\", \"js\"],\n tree_sitter_javascript::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"JSON\", [\".json\"], tree_sitter_json::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Kotlin\",\n [\".kt\", \".kts\"],\n tree_sitter_kotlin_ng::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Markdown\",\n [\".md\", \".mdx\", \"md\"],\n tree_sitter_md::LANGUAGE,\n [\"inline\"],\n );\n add_treesitter_language(\n &mut map,\n \"Pascal\",\n [\".pas\", \"pas\", \".dpr\", \"dpr\", \"Delphi\"],\n tree_sitter_pascal::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"PHP\", [\".php\"], tree_sitter_php::LANGUAGE_PHP, []);\n add_treesitter_language(\n &mut map,\n \"Python\",\n [\".py\"],\n tree_sitter_python::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"R\", [\".r\"], tree_sitter_r::LANGUAGE, []);\n add_treesitter_language(&mut map, \"Ruby\", [\".rb\"], tree_sitter_ruby::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Rust\",\n [\".rs\", \"rs\"],\n tree_sitter_rust::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Scala\",\n [\".scala\"],\n tree_sitter_scala::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"SQL\", [\".sql\"], tree_sitter_sequel::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Swift\",\n [\".swift\"],\n tree_sitter_swift::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TOML\",\n [\".toml\"],\n tree_sitter_toml_ng::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TSX\",\n [\".tsx\"],\n tree_sitter_typescript::LANGUAGE_TSX,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TypeScript\",\n [\".ts\", \"ts\"],\n tree_sitter_typescript::LANGUAGE_TYPESCRIPT,\n [],\n );\n add_treesitter_language(&mut map, \"XML\", [\".xml\"], tree_sitter_xml::LANGUAGE_XML, []);\n add_treesitter_language(&mut map, \"DTD\", [\".dtd\"], tree_sitter_xml::LANGUAGE_DTD, []);\n add_treesitter_language(\n &mut map,\n \"YAML\",\n [\".yaml\", \".yml\"],\n tree_sitter_yaml::LANGUAGE,\n [],\n );\n map\n});\n\nenum ChunkKind<'t> {\n TreeSitterNode {\n lang_config: &'t TreesitterLanguageConfig,\n node: tree_sitter::Node<'t>,\n },\n RegexpSepChunk {\n lang_config: &'t SimpleLanguageConfig,\n next_regexp_sep_id: usize,\n },\n}\n\nstruct Chunk<'t, 's: 't> {\n full_text: &'s str,\n range: RangeValue,\n kind: ChunkKind<'t>,\n}\n\nimpl<'t, 's: 't> Chunk<'t, 's> {\n fn text(&self) -> &'s str {\n self.range.extract_str(self.full_text)\n }\n}\n\nstruct TextChunksIter<'t, 's: 't> {\n lang_config: &'t SimpleLanguageConfig,\n parent: &'t Chunk<'t, 's>,\n matches_iter: Matches<'t, 's>,\n regexp_sep_id: usize,\n next_start_pos: Option,\n}\n\nimpl<'t, 's: 't> TextChunksIter<'t, 's> {\n fn new(\n lang_config: &'t SimpleLanguageConfig,\n parent: &'t Chunk<'t, 's>,\n regexp_sep_id: usize,\n ) -> Self {\n Self {\n lang_config,\n parent,\n matches_iter: lang_config.separator_regex[regexp_sep_id].find_iter(parent.text()),\n regexp_sep_id,\n next_start_pos: Some(parent.range.start),\n }\n }\n}\n\nimpl<'t, 's: 't> Iterator for TextChunksIter<'t, 's> {\n type Item = Chunk<'t, 's>;\n\n fn next(&mut self) -> Option {\n let start_pos = self.next_start_pos?;\n let end_pos = match self.matches_iter.next() {\n Some(grp) => {\n self.next_start_pos = Some(self.parent.range.start + grp.end());\n self.parent.range.start + grp.start()\n }\n None => {\n self.next_start_pos = None;\n if start_pos >= self.parent.range.end {\n return None;\n }\n self.parent.range.end\n }\n };\n Some(Chunk {\n full_text: self.parent.full_text,\n range: RangeValue::new(start_pos, end_pos),\n kind: ChunkKind::RegexpSepChunk {\n lang_config: self.lang_config,\n next_regexp_sep_id: self.regexp_sep_id + 1,\n },\n })\n }\n}\n\nstruct TreeSitterNodeIter<'t, 's: 't> {\n lang_config: &'t TreesitterLanguageConfig,\n full_text: &'s str,\n cursor: Option>,\n next_start_pos: usize,\n end_pos: usize,\n}\n\nimpl<'t, 's: 't> TreeSitterNodeIter<'t, 's> {\n fn fill_gap(\n next_start_pos: &mut usize,\n gap_end_pos: usize,\n full_text: &'s str,\n ) -> Option> {\n let start_pos = *next_start_pos;\n if start_pos < gap_end_pos {\n *next_start_pos = gap_end_pos;\n Some(Chunk {\n full_text,\n range: RangeValue::new(start_pos, gap_end_pos),\n kind: ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n },\n })\n } else {\n None\n }\n }\n}\n\nimpl<'t, 's: 't> Iterator for TreeSitterNodeIter<'t, 's> {\n type Item = Chunk<'t, 's>;\n\n fn next(&mut self) -> Option {\n let cursor = if let Some(cursor) = &mut self.cursor {\n cursor\n } else {\n return Self::fill_gap(&mut self.next_start_pos, self.end_pos, self.full_text);\n };\n let node = cursor.node();\n if let Some(gap) =\n Self::fill_gap(&mut self.next_start_pos, node.start_byte(), self.full_text)\n {\n return Some(gap);\n }\n if !cursor.goto_next_sibling() {\n self.cursor = None;\n }\n self.next_start_pos = node.end_byte();\n Some(Chunk {\n full_text: self.full_text,\n range: RangeValue::new(node.start_byte(), node.end_byte()),\n kind: ChunkKind::TreeSitterNode {\n lang_config: self.lang_config,\n node,\n },\n })\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]\nenum LineBreakLevel {\n Inline,\n Newline,\n DoubleNewline,\n}\n\nimpl LineBreakLevel {\n fn ord(self) -> usize {\n match self {\n LineBreakLevel::Inline => 0,\n LineBreakLevel::Newline => 1,\n LineBreakLevel::DoubleNewline => 2,\n }\n }\n}\n\nfn line_break_level(c: &str) -> LineBreakLevel {\n let mut lb_level = LineBreakLevel::Inline;\n let mut iter = c.chars();\n while let Some(c) = iter.next() {\n if c == '\\n' || c == '\\r' {\n lb_level = LineBreakLevel::Newline;\n for c2 in iter.by_ref() {\n if c2 == '\\n' || c2 == '\\r' {\n if c == c2 {\n return LineBreakLevel::DoubleNewline;\n }\n } else {\n break;\n }\n }\n }\n }\n lb_level\n}\n\nconst INLINE_SPACE_CHARS: [char; 2] = [' ', '\\t'];\n\nstruct AtomChunk {\n range: RangeValue,\n boundary_syntax_level: usize,\n\n internal_lb_level: LineBreakLevel,\n boundary_lb_level: LineBreakLevel,\n}\n\nstruct AtomChunksCollector<'s> {\n full_text: &'s str,\n\n curr_level: usize,\n min_level: usize,\n atom_chunks: Vec,\n}\nimpl<'s> AtomChunksCollector<'s> {\n fn collect(&mut self, range: RangeValue) {\n // Trim trailing whitespaces.\n let end_trimmed_text = &self.full_text[range.start..range.end].trim_end();\n if end_trimmed_text.is_empty() {\n return;\n }\n\n // Trim leading whitespaces.\n let trimmed_text = end_trimmed_text.trim_start();\n let new_start = range.start + (end_trimmed_text.len() - trimmed_text.len());\n let new_end = new_start + trimmed_text.len();\n\n // Align to beginning of the line if possible.\n let prev_end = self.atom_chunks.last().map_or(0, |chunk| chunk.range.end);\n let gap = &self.full_text[prev_end..new_start];\n let boundary_lb_level = line_break_level(gap);\n let range = if boundary_lb_level != LineBreakLevel::Inline {\n let trimmed_gap = gap.trim_end_matches(INLINE_SPACE_CHARS);\n RangeValue::new(prev_end + trimmed_gap.len(), new_end)\n } else {\n RangeValue::new(new_start, new_end)\n };\n\n self.atom_chunks.push(AtomChunk {\n range,\n boundary_syntax_level: self.min_level,\n internal_lb_level: line_break_level(trimmed_text),\n boundary_lb_level,\n });\n self.min_level = self.curr_level;\n }\n\n fn into_atom_chunks(mut self) -> Vec {\n self.atom_chunks.push(AtomChunk {\n range: RangeValue::new(self.full_text.len(), self.full_text.len()),\n boundary_syntax_level: self.min_level,\n internal_lb_level: LineBreakLevel::Inline,\n boundary_lb_level: LineBreakLevel::DoubleNewline,\n });\n self.atom_chunks\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Eq)]\nstruct OutputPosition {\n char_offset: usize,\n line: u32,\n column: u32,\n}\n\nimpl OutputPosition {\n fn into_output(self) -> value::Value {\n value::Value::Struct(fields_value!(\n self.char_offset as i64,\n self.line as i64,\n self.column as i64\n ))\n }\n}\nstruct Position {\n byte_offset: usize,\n output: Option,\n}\n\nimpl Position {\n fn new(byte_offset: usize) -> Self {\n Self {\n byte_offset,\n output: None,\n }\n }\n}\n\nstruct ChunkOutput<'s> {\n start_pos: Position,\n end_pos: Position,\n text: &'s str,\n}\n\nstruct RecursiveChunker<'s> {\n full_text: &'s str,\n chunk_size: usize,\n chunk_overlap: usize,\n min_chunk_size: usize,\n}\n\nimpl<'t, 's: 't> RecursiveChunker<'s> {\n fn collect_atom_chunks_from_iter(\n &self,\n sub_chunks_iter: impl Iterator>,\n atom_collector: &mut AtomChunksCollector<'s>,\n ) -> Result<()> {\n atom_collector.curr_level += 1;\n for sub_chunk in sub_chunks_iter {\n let range = sub_chunk.range;\n if range.len() <= self.min_chunk_size {\n atom_collector.collect(range);\n } else {\n self.collect_atom_chunks(sub_chunk, atom_collector)?;\n }\n }\n atom_collector.curr_level -= 1;\n if atom_collector.curr_level < atom_collector.min_level {\n atom_collector.min_level = atom_collector.curr_level;\n }\n Ok(())\n }\n\n fn collect_atom_chunks(\n &self,\n chunk: Chunk<'t, 's>,\n atom_collector: &mut AtomChunksCollector<'s>,\n ) -> Result<()> {\n match chunk.kind {\n ChunkKind::TreeSitterNode { lang_config, node } => {\n if !lang_config.terminal_node_kind_ids.contains(&node.kind_id()) {\n let mut cursor = node.walk();\n if cursor.goto_first_child() {\n return self.collect_atom_chunks_from_iter(\n TreeSitterNodeIter {\n lang_config,\n full_text: self.full_text,\n cursor: Some(cursor),\n next_start_pos: node.start_byte(),\n end_pos: node.end_byte(),\n },\n atom_collector,\n );\n }\n }\n self.collect_atom_chunks(\n Chunk {\n full_text: self.full_text,\n range: chunk.range,\n kind: ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n },\n },\n atom_collector,\n )\n }\n ChunkKind::RegexpSepChunk {\n lang_config,\n next_regexp_sep_id,\n } => {\n if next_regexp_sep_id >= lang_config.separator_regex.len() {\n atom_collector.collect(chunk.range);\n Ok(())\n } else {\n self.collect_atom_chunks_from_iter(\n TextChunksIter::new(lang_config, &chunk, next_regexp_sep_id),\n atom_collector,\n )\n }\n }\n }\n }\n\n fn get_overlap_cost_base(&self, offset: usize) -> usize {\n if self.chunk_overlap == 0 {\n 0\n } else {\n (self.full_text.len() - offset) * MISSING_OVERLAP_COST / self.chunk_overlap\n }\n }\n\n fn merge_atom_chunks(&self, atom_chunks: Vec) -> Vec> {\n struct AtomRoutingPlan {\n start_idx: usize, // index of `atom_chunks` for the start chunk\n prev_plan_idx: usize, // index of `plans` for the previous plan\n cost: usize,\n overlap_cost_base: usize,\n }\n type PrevPlanCandidate = (std::cmp::Reverse, usize); // (cost, start_idx)\n\n let mut plans = Vec::with_capacity(atom_chunks.len());\n // Janitor\n plans.push(AtomRoutingPlan {\n start_idx: 0,\n prev_plan_idx: 0,\n cost: 0,\n overlap_cost_base: self.get_overlap_cost_base(0),\n });\n let mut prev_plan_candidates = std::collections::BinaryHeap::::new();\n\n let mut gap_cost_cache = vec![0];\n let mut syntax_level_gap_cost = |boundary: usize, internal: usize| -> usize {\n if boundary > internal {\n let gap = boundary - internal;\n for i in gap_cost_cache.len()..=gap {\n gap_cost_cache.push(gap_cost_cache[i - 1] + SYNTAX_LEVEL_GAP_COST / i);\n }\n gap_cost_cache[gap]\n } else {\n 0\n }\n };\n\n for (i, chunk) in atom_chunks[0..atom_chunks.len() - 1].iter().enumerate() {\n let mut min_cost = usize::MAX;\n let mut arg_min_start_idx: usize = 0;\n let mut arg_min_prev_plan_idx: usize = 0;\n let mut start_idx = i;\n\n let end_syntax_level = atom_chunks[i + 1].boundary_syntax_level;\n let end_lb_level = atom_chunks[i + 1].boundary_lb_level;\n\n let mut internal_syntax_level = usize::MAX;\n let mut internal_lb_level = LineBreakLevel::Inline;\n\n fn lb_level_gap(boundary: LineBreakLevel, internal: LineBreakLevel) -> usize {\n if boundary.ord() < internal.ord() {\n internal.ord() - boundary.ord()\n } else {\n 0\n }\n }\n loop {\n let start_chunk = &atom_chunks[start_idx];\n let chunk_size = chunk.range.end - start_chunk.range.start;\n\n let mut cost = 0;\n cost +=\n syntax_level_gap_cost(start_chunk.boundary_syntax_level, internal_syntax_level);\n cost += syntax_level_gap_cost(end_syntax_level, internal_syntax_level);\n cost += (lb_level_gap(start_chunk.boundary_lb_level, internal_lb_level)\n + lb_level_gap(end_lb_level, internal_lb_level))\n * PER_LINE_BREAK_LEVEL_GAP_COST;\n if chunk_size < self.min_chunk_size {\n cost += TOO_SMALL_CHUNK_COST;\n }\n\n if chunk_size > self.chunk_size {\n if min_cost == usize::MAX {\n min_cost = cost + plans[start_idx].cost;\n arg_min_start_idx = start_idx;\n arg_min_prev_plan_idx = start_idx;\n }\n break;\n }\n\n let prev_plan_idx = if self.chunk_overlap > 0 {\n while let Some(top_prev_plan) = prev_plan_candidates.peek() {\n let overlap_size =\n atom_chunks[top_prev_plan.1].range.end - start_chunk.range.start;\n if overlap_size <= self.chunk_overlap {\n break;\n }\n prev_plan_candidates.pop();\n }\n prev_plan_candidates.push((\n std::cmp::Reverse(\n plans[start_idx].cost + plans[start_idx].overlap_cost_base,\n ),\n start_idx,\n ));\n prev_plan_candidates.peek().unwrap().1\n } else {\n start_idx\n };\n let prev_plan = &plans[prev_plan_idx];\n cost += prev_plan.cost;\n if self.chunk_overlap == 0 {\n cost += MISSING_OVERLAP_COST / 2;\n } else {\n let start_cost_base = self.get_overlap_cost_base(start_chunk.range.start);\n cost += if prev_plan.overlap_cost_base < start_cost_base {\n MISSING_OVERLAP_COST + prev_plan.overlap_cost_base - start_cost_base\n } else {\n MISSING_OVERLAP_COST\n };\n }\n if cost < min_cost {\n min_cost = cost;\n arg_min_start_idx = start_idx;\n arg_min_prev_plan_idx = prev_plan_idx;\n }\n\n if start_idx == 0 {\n break;\n }\n\n start_idx -= 1;\n internal_syntax_level =\n internal_syntax_level.min(start_chunk.boundary_syntax_level);\n internal_lb_level = internal_lb_level.max(start_chunk.internal_lb_level);\n }\n plans.push(AtomRoutingPlan {\n start_idx: arg_min_start_idx,\n prev_plan_idx: arg_min_prev_plan_idx,\n cost: min_cost,\n overlap_cost_base: self.get_overlap_cost_base(chunk.range.end),\n });\n prev_plan_candidates.clear();\n }\n\n let mut output = Vec::new();\n let mut plan_idx = plans.len() - 1;\n while plan_idx > 0 {\n let plan = &plans[plan_idx];\n let start_chunk = &atom_chunks[plan.start_idx];\n let end_chunk = &atom_chunks[plan_idx - 1];\n output.push(ChunkOutput {\n start_pos: Position::new(start_chunk.range.start),\n end_pos: Position::new(end_chunk.range.end),\n text: &self.full_text[start_chunk.range.start..end_chunk.range.end],\n });\n plan_idx = plan.prev_plan_idx;\n }\n output.reverse();\n output\n }\n\n fn split_root_chunk(&self, kind: ChunkKind<'t>) -> Result>> {\n let mut atom_collector = AtomChunksCollector {\n full_text: self.full_text,\n min_level: 0,\n curr_level: 0,\n atom_chunks: Vec::new(),\n };\n self.collect_atom_chunks(\n Chunk {\n full_text: self.full_text,\n range: RangeValue::new(0, self.full_text.len()),\n kind,\n },\n &mut atom_collector,\n )?;\n let atom_chunks = atom_collector.into_atom_chunks();\n let output = self.merge_atom_chunks(atom_chunks);\n Ok(output)\n }\n}\n\nstruct Executor {\n args: Args,\n custom_languages: HashMap, Arc>,\n}\n\nimpl Executor {\n fn new(args: Args, spec: Spec) -> Result {\n let mut custom_languages = HashMap::new();\n for lang in spec.custom_languages {\n let separator_regex = lang\n .separators_regex\n .iter()\n .map(|s| Regex::new(s))\n .collect::>()\n .with_context(|| {\n format!(\n \"failed in parsing regexp for language `{}`\",\n lang.language_name\n )\n })?;\n let language_config = Arc::new(SimpleLanguageConfig {\n name: lang.language_name,\n aliases: lang.aliases,\n separator_regex,\n });\n if custom_languages\n .insert(\n UniCase::new(language_config.name.clone()),\n language_config.clone(),\n )\n .is_some()\n {\n api_bail!(\n \"duplicate language name / alias: `{}`\",\n language_config.name\n );\n }\n for alias in &language_config.aliases {\n if custom_languages\n .insert(UniCase::new(alias.clone()), language_config.clone())\n .is_some()\n {\n api_bail!(\"duplicate language name / alias: `{}`\", alias);\n }\n }\n }\n Ok(Self {\n args,\n custom_languages,\n })\n }\n}\n\nfn set_output_positions<'a>(text: &str, positions: impl Iterator) {\n let mut positions = positions.collect::>();\n positions.sort_by_key(|o| o.byte_offset);\n\n let mut positions_iter = positions.iter_mut();\n let Some(mut next_position) = positions_iter.next() else {\n return;\n };\n\n let mut char_offset = 0;\n let mut line = 1;\n let mut column = 1;\n for (byte_offset, ch) in text.char_indices() {\n while next_position.byte_offset == byte_offset {\n next_position.output = Some(OutputPosition {\n char_offset,\n line,\n column,\n });\n if let Some(position) = positions_iter.next() {\n next_position = position;\n } else {\n return;\n }\n }\n char_offset += 1;\n if ch == '\\n' {\n line += 1;\n column = 1;\n } else {\n column += 1;\n }\n }\n\n // Offsets after the last char.\n loop {\n next_position.output = Some(OutputPosition {\n char_offset,\n line,\n column,\n });\n if let Some(position) = positions_iter.next() {\n next_position = position;\n } else {\n return;\n }\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n async fn evaluate(&self, input: Vec) -> Result {\n let full_text = self.args.text.value(&input)?.as_str()?;\n let chunk_size = self.args.chunk_size.value(&input)?.as_int64()?;\n let recursive_chunker = RecursiveChunker {\n full_text,\n chunk_size: chunk_size as usize,\n chunk_overlap: (self.args.chunk_overlap.value(&input)?)\n .optional()\n .map(|v| v.as_int64())\n .transpose()?\n .unwrap_or(0) as usize,\n min_chunk_size: (self.args.min_chunk_size.value(&input)?)\n .optional()\n .map(|v| v.as_int64())\n .transpose()?\n .unwrap_or(chunk_size / 2) as usize,\n };\n\n let language = UniCase::new(\n (if let Some(language) = self.args.language.value(&input)?.optional() {\n language.as_str()?\n } else {\n \"\"\n })\n .to_string(),\n );\n let mut output = if let Some(lang_config) = self.custom_languages.get(&language) {\n recursive_chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config,\n next_regexp_sep_id: 0,\n })?\n } else if let Some(lang_config) = TREE_SITTER_LANGUAGE_BY_LANG.get(&language) {\n let mut parser = tree_sitter::Parser::new();\n parser.set_language(&lang_config.tree_sitter_lang)?;\n let tree = parser.parse(full_text.as_ref(), None).ok_or_else(|| {\n anyhow!(\"failed in parsing text in language: {}\", lang_config.name)\n })?;\n recursive_chunker.split_root_chunk(ChunkKind::TreeSitterNode {\n lang_config,\n node: tree.root_node(),\n })?\n } else {\n recursive_chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n })?\n };\n\n set_output_positions(\n full_text,\n output.iter_mut().flat_map(|chunk_output| {\n std::iter::once(&mut chunk_output.start_pos)\n .chain(std::iter::once(&mut chunk_output.end_pos))\n }),\n );\n\n let table = output\n .into_iter()\n .map(|chunk_output| {\n let output_start = chunk_output.start_pos.output.unwrap();\n let output_end = chunk_output.end_pos.output.unwrap();\n (\n RangeValue::new(output_start.char_offset, output_end.char_offset).into(),\n fields_value!(\n Arc::::from(chunk_output.text),\n output_start.into_output(),\n output_end.into_output()\n )\n .into(),\n )\n })\n .collect();\n\n Ok(Value::KTable(table))\n }\n}\n\nstruct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"SplitRecursively\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n _spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n chunk_size: args_resolver\n .next_arg(\"chunk_size\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n min_chunk_size: args_resolver\n .next_optional_arg(\"min_chunk_size\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n chunk_overlap: args_resolver\n .next_optional_arg(\"chunk_overlap\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n language: args_resolver\n .next_optional_arg(\"language\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n };\n\n let pos_struct = schema::ValueType::Struct(schema::StructSchema {\n fields: Arc::new(vec![\n schema::FieldSchema::new(\"offset\", make_output_type(BasicValueType::Int64)),\n schema::FieldSchema::new(\"line\", make_output_type(BasicValueType::Int64)),\n schema::FieldSchema::new(\"column\", make_output_type(BasicValueType::Int64)),\n ]),\n description: None,\n });\n\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n schema_builder.add_field(FieldSchema::new(\n \"location\",\n make_output_type(BasicValueType::Range),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"text\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"start\",\n schema::EnrichedValueType {\n typ: pos_struct.clone(),\n nullable: false,\n attrs: Default::default(),\n },\n ));\n schema_builder.add_field(FieldSchema::new(\n \"end\",\n schema::EnrichedValueType {\n typ: pos_struct,\n nullable: false,\n attrs: Default::default(),\n },\n ));\n let output_schema = make_output_type(TableSchema::new(TableKind::KTable, struct_schema))\n .with_attr(\n field_attrs::CHUNK_BASE_TEXT,\n serde_json::to_value(args_resolver.get_analyze_value(&args.text))?,\n );\n Ok((args, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(args, spec)?))\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory.register(registry)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n // Helper function to assert chunk text and its consistency with the range within the original text.\n fn assert_chunk_text_consistency(\n full_text: &str, // Added full text\n actual_chunk: &ChunkOutput<'_>,\n expected_text: &str,\n context: &str,\n ) {\n // Extract text using the chunk's range from the original full text.\n let extracted_text = full_text\n .get(actual_chunk.start_pos.byte_offset..actual_chunk.end_pos.byte_offset)\n .unwrap();\n // Assert that the expected text matches the text provided in the chunk.\n assert_eq!(\n actual_chunk.text, expected_text,\n \"Provided chunk text mismatch - {context}\"\n );\n // Assert that the expected text also matches the text extracted using the chunk's range.\n assert_eq!(\n extracted_text, expected_text,\n \"Range inconsistency: extracted text mismatch - {context}\"\n );\n }\n\n // Creates a default RecursiveChunker for testing, assuming no language-specific parsing.\n fn create_test_chunker(\n text: &str,\n chunk_size: usize,\n min_chunk_size: usize,\n chunk_overlap: usize,\n ) -> RecursiveChunker {\n RecursiveChunker {\n full_text: text,\n chunk_size,\n chunk_overlap,\n min_chunk_size,\n }\n }\n\n #[tokio::test]\n async fn test_split_recursively() {\n let spec = Spec {\n custom_languages: vec![],\n };\n let factory = Arc::new(Factory);\n let text_content = \"Linea 1.\\nLinea 2.\\n\\nLinea 3.\";\n\n let input_args_values = vec![\n text_content.to_string().into(),\n (15i64).into(),\n (5i64).into(),\n (0i64).into(),\n Value::Null,\n ];\n\n let input_arg_schemas = vec![\n build_arg_schema(\"text\", BasicValueType::Str),\n build_arg_schema(\"chunk_size\", BasicValueType::Int64),\n build_arg_schema(\"min_chunk_size\", BasicValueType::Int64),\n build_arg_schema(\"chunk_overlap\", BasicValueType::Int64),\n build_arg_schema(\"language\", BasicValueType::Str),\n ];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed: {:?}\",\n result.err()\n );\n let value = result.unwrap();\n\n match value {\n Value::KTable(table) => {\n let expected_chunks = vec![\n (RangeValue::new(0, 8), \"Linea 1.\"),\n (RangeValue::new(9, 17), \"Linea 2.\"),\n (RangeValue::new(19, 27), \"Linea 3.\"),\n ];\n\n for (range, expected_text) in expected_chunks {\n let key: KeyValue = range.into();\n match table.get(&key) {\n Some(scope_value_ref) => {\n let chunk_text =\n scope_value_ref.0.fields[0].as_str().unwrap_or_else(|_| {\n panic!(\"Chunk text not a string for key {key:?}\")\n });\n assert_eq!(**chunk_text, *expected_text);\n }\n None => panic!(\"Expected row value for key {key:?}, not found\"),\n }\n }\n }\n other => panic!(\"Expected Value::KTable, got {other:?}\"),\n }\n }\n\n #[test]\n fn test_translate_bytes_to_chars_simple() {\n let text = \"abc😄def\";\n let mut start1 = Position::new(0);\n let mut end1 = Position::new(3);\n let mut start2 = Position::new(3);\n let mut end2 = Position::new(7);\n let mut start3 = Position::new(7);\n let mut end3 = Position::new(10);\n let mut end_full = Position::new(text.len());\n\n let offsets = vec![\n &mut start1,\n &mut end1,\n &mut start2,\n &mut end2,\n &mut start3,\n &mut end3,\n &mut end_full,\n ];\n\n set_output_positions(text, offsets.into_iter());\n\n assert_eq!(\n start1.output,\n Some(OutputPosition {\n char_offset: 0,\n line: 1,\n column: 1,\n })\n );\n assert_eq!(\n end1.output,\n Some(OutputPosition {\n char_offset: 3,\n line: 1,\n column: 4,\n })\n );\n assert_eq!(\n start2.output,\n Some(OutputPosition {\n char_offset: 3,\n line: 1,\n column: 4,\n })\n );\n assert_eq!(\n end2.output,\n Some(OutputPosition {\n char_offset: 4,\n line: 1,\n column: 5,\n })\n );\n assert_eq!(\n end3.output,\n Some(OutputPosition {\n char_offset: 7,\n line: 1,\n column: 8,\n })\n );\n assert_eq!(\n end_full.output,\n Some(OutputPosition {\n char_offset: 7,\n line: 1,\n column: 8,\n })\n );\n }\n\n #[test]\n fn test_basic_split_no_overlap() {\n let text = \"Linea 1.\\nLinea 2.\\n\\nLinea 3.\";\n let chunker = create_test_chunker(text, 15, 5, 0);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert_eq!(chunks.len(), 3);\n assert_chunk_text_consistency(text, &chunks[0], \"Linea 1.\", \"Test 1, Chunk 0\");\n assert_chunk_text_consistency(text, &chunks[1], \"Linea 2.\", \"Test 1, Chunk 1\");\n assert_chunk_text_consistency(text, &chunks[2], \"Linea 3.\", \"Test 1, Chunk 2\");\n\n // Test splitting when chunk_size forces breaks within segments.\n let text2 = \"A very very long text that needs to be split.\";\n let chunker2 = create_test_chunker(text2, 20, 12, 0);\n let result2 = chunker2.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result2.is_ok());\n let chunks2 = result2.unwrap();\n\n // Expect multiple chunks, likely split by spaces due to chunk_size.\n assert!(chunks2.len() > 1);\n assert_chunk_text_consistency(text2, &chunks2[0], \"A very very long\", \"Test 2, Chunk 0\");\n assert!(chunks2[0].text.len() <= 20);\n }\n\n #[test]\n fn test_basic_split_with_overlap() {\n let text = \"This is a test text that is a bit longer to see how the overlap works.\";\n let chunker = create_test_chunker(text, 20, 10, 5);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert!(chunks.len() > 1);\n\n if chunks.len() >= 2 {\n assert!(chunks[0].text.len() <= 25);\n }\n }\n\n #[test]\n fn test_split_trims_whitespace() {\n let text = \" \\n First chunk. \\n\\n Second chunk with spaces at the end. \\n\";\n let chunker = create_test_chunker(text, 30, 10, 0);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert_eq!(chunks.len(), 3);\n\n assert_chunk_text_consistency(\n text,\n &chunks[0],\n \" First chunk.\",\n \"Whitespace Test, Chunk 0\",\n );\n assert_chunk_text_consistency(\n text,\n &chunks[1],\n \" Second chunk with spaces\",\n \"Whitespace Test, Chunk 1\",\n );\n assert_chunk_text_consistency(text, &chunks[2], \"at the end.\", \"Whitespace Test, Chunk 2\");\n }\n}\n"], ["/cocoindex/src/builder/flow_builder.rs", "use crate::{prelude::*, py::Pythonized};\n\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pyo3_async_runtimes::tokio::future_into_py;\nuse std::{collections::btree_map, ops::Deref};\nuse tokio::task::LocalSet;\n\nuse super::analyzer::{\n AnalyzerContext, CollectorBuilder, DataScopeBuilder, OpScope, build_flow_instance_context,\n};\nuse crate::{\n base::{\n schema::{CollectorSchema, FieldSchema},\n spec::{FieldName, NamedSpec},\n },\n lib_context::LibContext,\n ops::interface::FlowInstanceContext,\n py::IntoPyResult,\n};\nuse crate::{lib_context::FlowContext, py};\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct OpScopeRef(Arc);\n\nimpl From> for OpScopeRef {\n fn from(scope: Arc) -> Self {\n Self(scope)\n }\n}\n\nimpl Deref for OpScopeRef {\n type Target = Arc;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl std::fmt::Display for OpScopeRef {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.0)\n }\n}\n\n#[pymethods]\nimpl OpScopeRef {\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn add_collector(&mut self, name: String) -> PyResult {\n let collector = DataCollector {\n name,\n scope: self.0.clone(),\n collector: Mutex::new(None),\n };\n Ok(collector)\n }\n}\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct DataType {\n schema: schema::EnrichedValueType,\n}\n\nimpl From for DataType {\n fn from(schema: schema::EnrichedValueType) -> Self {\n Self { schema }\n }\n}\n\n#[pymethods]\nimpl DataType {\n pub fn __str__(&self) -> String {\n format!(\"{}\", self.schema)\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn schema(&self) -> Pythonized {\n Pythonized(self.schema.clone())\n }\n}\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct DataSlice {\n scope: Arc,\n value: Arc,\n data_type: DataType,\n}\n\n#[pymethods]\nimpl DataSlice {\n pub fn data_type(&self) -> DataType {\n self.data_type.clone()\n }\n\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn field(&self, field_name: &str) -> PyResult> {\n let field_schema = match &self.data_type.schema.typ {\n schema::ValueType::Struct(struct_type) => {\n match struct_type.fields.iter().find(|f| f.name == field_name) {\n Some(field) => field,\n None => return Ok(None),\n }\n }\n _ => return Err(PyException::new_err(\"expect struct type\")),\n };\n let value_mapping = match self.value.as_ref() {\n spec::ValueMapping::Field(spec::FieldMapping {\n scope,\n field_path: spec::FieldPath(field_path),\n }) => spec::ValueMapping::Field(spec::FieldMapping {\n scope: scope.clone(),\n field_path: spec::FieldPath(\n field_path\n .iter()\n .cloned()\n .chain([field_name.to_string()])\n .collect(),\n ),\n }),\n\n spec::ValueMapping::Struct(v) => v\n .fields\n .iter()\n .find(|f| f.name == field_name)\n .map(|f| f.spec.clone())\n .ok_or_else(|| PyException::new_err(format!(\"field {field_name} not found\")))?,\n\n spec::ValueMapping::Constant { .. } => {\n return Err(PyException::new_err(\n \"field access not supported for literal\",\n ));\n }\n };\n Ok(Some(DataSlice {\n scope: self.scope.clone(),\n value: Arc::new(value_mapping),\n data_type: field_schema.value_type.clone().into(),\n }))\n }\n}\n\nimpl DataSlice {\n fn extract_value_mapping(&self) -> spec::ValueMapping {\n match self.value.as_ref() {\n spec::ValueMapping::Field(v) => spec::ValueMapping::Field(spec::FieldMapping {\n field_path: v.field_path.clone(),\n scope: v.scope.clone().or_else(|| Some(self.scope.name.clone())),\n }),\n v => v.clone(),\n }\n }\n}\n\nimpl std::fmt::Display for DataSlice {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(\n f,\n \"DataSlice({}; {} {}) \",\n self.data_type.schema, self.scope, self.value\n )?;\n Ok(())\n }\n}\n\n#[pyclass]\npub struct DataCollector {\n name: String,\n scope: Arc,\n collector: Mutex>,\n}\n\n#[pymethods]\nimpl DataCollector {\n fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\nimpl std::fmt::Display for DataCollector {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let collector = self.collector.lock().unwrap();\n write!(f, \"DataCollector \\\"{}\\\" ({}\", self.name, self.scope)?;\n if let Some(collector) = collector.as_ref() {\n write!(f, \": {}\", collector.schema)?;\n if collector.is_used {\n write!(f, \" (used)\")?;\n }\n }\n write!(f, \")\")?;\n Ok(())\n }\n}\n\n#[pyclass]\npub struct FlowBuilder {\n lib_context: Arc,\n flow_inst_context: Arc,\n\n root_op_scope: Arc,\n flow_instance_name: String,\n reactive_ops: Vec>,\n\n direct_input_fields: Vec,\n direct_output_value: Option,\n\n import_ops: Vec>,\n export_ops: Vec>,\n\n declarations: Vec,\n\n next_generated_op_id: usize,\n}\n\n#[pymethods]\nimpl FlowBuilder {\n #[new]\n pub fn new(name: &str) -> PyResult {\n let lib_context = get_lib_context().into_py_result()?;\n let root_op_scope = OpScope::new(\n spec::ROOT_SCOPE_NAME.to_string(),\n None,\n Arc::new(Mutex::new(DataScopeBuilder::new())),\n );\n let flow_inst_context = build_flow_instance_context(name, None);\n let result = Self {\n lib_context,\n flow_inst_context,\n root_op_scope,\n flow_instance_name: name.to_string(),\n\n reactive_ops: vec![],\n\n import_ops: vec![],\n export_ops: vec![],\n\n direct_input_fields: vec![],\n direct_output_value: None,\n\n declarations: vec![],\n\n next_generated_op_id: 0,\n };\n Ok(result)\n }\n\n pub fn root_scope(&self) -> OpScopeRef {\n OpScopeRef(self.root_op_scope.clone())\n }\n\n #[pyo3(signature = (kind, op_spec, target_scope, name, refresh_options=None, execution_options=None))]\n #[allow(clippy::too_many_arguments)]\n pub fn add_source(\n &mut self,\n py: Python<'_>,\n kind: String,\n op_spec: py::Pythonized>,\n target_scope: Option,\n name: String,\n refresh_options: Option>,\n execution_options: Option>,\n ) -> PyResult {\n if let Some(target_scope) = target_scope {\n if *target_scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"source can only be added to the root scope\",\n ));\n }\n }\n let import_op = spec::NamedSpec {\n name,\n spec: spec::ImportOpSpec {\n source: spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n },\n refresh_options: refresh_options.map(|o| o.into_inner()).unwrap_or_default(),\n execution_options: execution_options\n .map(|o| o.into_inner())\n .unwrap_or_default(),\n },\n };\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(\n analyzer_ctx.analyze_import_op(&self.root_op_scope, import_op.clone()),\n )\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n let result = Self::last_field_to_data_slice(&self.root_op_scope).into_py_result()?;\n self.import_ops.push(import_op);\n Ok(result)\n }\n\n pub fn constant(\n &self,\n value_type: py::Pythonized,\n value: Bound<'_, PyAny>,\n ) -> PyResult {\n let schema = value_type.into_inner();\n let value = py::value_from_py_object(&schema.typ, &value)?;\n let slice = DataSlice {\n scope: self.root_op_scope.clone(),\n value: Arc::new(spec::ValueMapping::Constant(spec::ConstantMapping {\n schema: schema.clone(),\n value: serde_json::to_value(value).into_py_result()?,\n })),\n data_type: schema.into(),\n };\n Ok(slice)\n }\n\n pub fn add_direct_input(\n &mut self,\n name: String,\n value_type: py::Pythonized,\n ) -> PyResult {\n let value_type = value_type.into_inner();\n {\n let mut root_data_scope = self.root_op_scope.data.lock().unwrap();\n root_data_scope\n .add_field(name.clone(), &value_type)\n .into_py_result()?;\n }\n let result = Self::last_field_to_data_slice(&self.root_op_scope).into_py_result()?;\n self.direct_input_fields\n .push(FieldSchema { name, value_type });\n Ok(result)\n }\n\n pub fn set_direct_output(&mut self, data_slice: DataSlice) -> PyResult<()> {\n if data_slice.scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"direct output must be value in the root scope\",\n ));\n }\n self.direct_output_value = Some(data_slice.extract_value_mapping());\n Ok(())\n }\n\n #[pyo3(signature = (data_slice, execution_options=None))]\n pub fn for_each(\n &mut self,\n data_slice: DataSlice,\n execution_options: Option>,\n ) -> PyResult {\n let parent_scope = &data_slice.scope;\n let field_path = match data_slice.value.as_ref() {\n spec::ValueMapping::Field(v) => &v.field_path,\n _ => return Err(PyException::new_err(\"expect field path\")),\n };\n let num_parent_layers = parent_scope.ancestors().count();\n let scope_name = format!(\n \"{}_{}\",\n field_path.last().map_or(\"\", |s| s.as_str()),\n num_parent_layers\n );\n let (_, child_op_scope) = parent_scope\n .new_foreach_op_scope(scope_name.clone(), field_path)\n .into_py_result()?;\n\n let reactive_op = spec::NamedSpec {\n name: format!(\".for_each.{}\", self.next_generated_op_id),\n spec: spec::ReactiveOpSpec::ForEach(spec::ForEachOpSpec {\n field_path: field_path.clone(),\n op_scope: spec::ReactiveOpScope {\n name: scope_name,\n ops: vec![],\n },\n execution_options: execution_options\n .map(|o| o.into_inner())\n .unwrap_or_default(),\n }),\n };\n self.next_generated_op_id += 1;\n self.get_mut_reactive_ops(parent_scope)\n .into_py_result()?\n .push(reactive_op);\n\n Ok(OpScopeRef(child_op_scope))\n }\n\n #[pyo3(signature = (kind, op_spec, args, target_scope, name))]\n pub fn transform(\n &mut self,\n py: Python<'_>,\n kind: String,\n op_spec: py::Pythonized>,\n args: Vec<(DataSlice, Option)>,\n target_scope: Option,\n name: String,\n ) -> PyResult {\n let spec = spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n };\n let op_scope = Self::minimum_common_scope(\n args.iter().map(|(ds, _)| &ds.scope),\n target_scope.as_ref().map(|s| &s.0),\n )\n .into_py_result()?;\n\n let reactive_op = spec::NamedSpec {\n name,\n spec: spec::ReactiveOpSpec::Transform(spec::TransformOpSpec {\n inputs: args\n .iter()\n .map(|(ds, arg_name)| spec::OpArgBinding {\n arg_name: spec::OpArgName(arg_name.clone()),\n value: ds.extract_value_mapping(),\n })\n .collect(),\n op: spec,\n }),\n };\n\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(analyzer_ctx.analyze_reactive_op(op_scope, &reactive_op))\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n self.get_mut_reactive_ops(op_scope)\n .into_py_result()?\n .push(reactive_op);\n\n let result = Self::last_field_to_data_slice(op_scope).into_py_result()?;\n Ok(result)\n }\n\n #[pyo3(signature = (collector, fields, auto_uuid_field=None))]\n pub fn collect(\n &mut self,\n py: Python<'_>,\n collector: &DataCollector,\n fields: Vec<(FieldName, DataSlice)>,\n auto_uuid_field: Option,\n ) -> PyResult<()> {\n let common_scope = Self::minimum_common_scope(fields.iter().map(|(_, ds)| &ds.scope), None)\n .into_py_result()?;\n let name = format!(\".collect.{}\", self.next_generated_op_id);\n self.next_generated_op_id += 1;\n\n let reactive_op = spec::NamedSpec {\n name,\n spec: spec::ReactiveOpSpec::Collect(spec::CollectOpSpec {\n input: spec::StructMapping {\n fields: fields\n .iter()\n .map(|(name, ds)| NamedSpec {\n name: name.clone(),\n spec: ds.extract_value_mapping(),\n })\n .collect(),\n },\n scope_name: collector.scope.name.clone(),\n collector_name: collector.name.clone(),\n auto_uuid_field: auto_uuid_field.clone(),\n }),\n };\n\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(analyzer_ctx.analyze_reactive_op(common_scope, &reactive_op))\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n self.get_mut_reactive_ops(common_scope)\n .into_py_result()?\n .push(reactive_op);\n\n let collector_schema = CollectorSchema::from_fields(\n fields\n .into_iter()\n .map(|(name, ds)| FieldSchema {\n name,\n value_type: ds.data_type.schema,\n })\n .collect(),\n auto_uuid_field,\n );\n {\n let mut collector = collector.collector.lock().unwrap();\n if let Some(collector) = collector.as_mut() {\n collector.merge_schema(&collector_schema).into_py_result()?;\n } else {\n *collector = Some(CollectorBuilder::new(Arc::new(collector_schema)));\n }\n }\n\n Ok(())\n }\n\n #[pyo3(signature = (name, kind, op_spec, index_options, input, setup_by_user=false))]\n pub fn export(\n &mut self,\n name: String,\n kind: String,\n op_spec: py::Pythonized>,\n index_options: py::Pythonized,\n input: &DataCollector,\n setup_by_user: bool,\n ) -> PyResult<()> {\n let spec = spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n };\n\n if input.scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"Export can only work on collectors belonging to the root scope.\",\n ));\n }\n self.export_ops.push(spec::NamedSpec {\n name,\n spec: spec::ExportOpSpec {\n collector_name: input.name.clone(),\n target: spec,\n index_options: index_options.into_inner(),\n setup_by_user,\n },\n });\n Ok(())\n }\n\n pub fn declare(&mut self, op_spec: py::Pythonized) -> PyResult<()> {\n self.declarations.push(op_spec.into_inner());\n Ok(())\n }\n\n pub fn scope_field(&self, scope: OpScopeRef, field_name: &str) -> PyResult> {\n let field_type = {\n let scope_builder = scope.0.data.lock().unwrap();\n let (_, field_schema) = scope_builder\n .data\n .find_field(field_name)\n .ok_or_else(|| PyException::new_err(format!(\"field {field_name} not found\")))?;\n schema::EnrichedValueType::from_alternative(&field_schema.value_type)\n .into_py_result()?\n };\n Ok(Some(DataSlice {\n scope: scope.0,\n value: Arc::new(spec::ValueMapping::Field(spec::FieldMapping {\n scope: None,\n field_path: spec::FieldPath(vec![field_name.to_string()]),\n })),\n data_type: DataType { schema: field_type },\n }))\n }\n\n pub fn build_flow(&self, py: Python<'_>, py_event_loop: Py) -> PyResult {\n let spec = spec::FlowInstanceSpec {\n name: self.flow_instance_name.clone(),\n import_ops: self.import_ops.clone(),\n reactive_ops: self.reactive_ops.clone(),\n export_ops: self.export_ops.clone(),\n declarations: self.declarations.clone(),\n };\n let flow_instance_ctx = build_flow_instance_context(\n &self.flow_instance_name,\n Some(crate::py::PythonExecutionContext::new(py, py_event_loop)),\n );\n let flow_ctx = py\n .allow_threads(|| {\n get_runtime().block_on(async move {\n let analyzed_flow =\n super::AnalyzedFlow::from_flow_instance(spec, flow_instance_ctx).await?;\n let persistence_ctx = self.lib_context.require_persistence_ctx()?;\n let execution_ctx = {\n let flow_setup_ctx = persistence_ctx.setup_ctx.read().await;\n FlowContext::new(\n Arc::new(analyzed_flow),\n flow_setup_ctx\n .all_setup_states\n .flows\n .get(&self.flow_instance_name),\n )\n .await?\n };\n anyhow::Ok(execution_ctx)\n })\n })\n .into_py_result()?;\n let mut flow_ctxs = self.lib_context.flows.lock().unwrap();\n let flow_ctx = match flow_ctxs.entry(self.flow_instance_name.clone()) {\n btree_map::Entry::Occupied(_) => {\n return Err(PyException::new_err(format!(\n \"flow instance name already exists: {}\",\n self.flow_instance_name\n )));\n }\n btree_map::Entry::Vacant(entry) => {\n let flow_ctx = Arc::new(flow_ctx);\n entry.insert(flow_ctx.clone());\n flow_ctx\n }\n };\n Ok(py::Flow(flow_ctx))\n }\n\n pub fn build_transient_flow_async<'py>(\n &self,\n py: Python<'py>,\n py_event_loop: Py,\n ) -> PyResult> {\n if self.direct_input_fields.is_empty() {\n return Err(PyException::new_err(\"expect at least one direct input\"));\n }\n let direct_output_value = if let Some(direct_output_value) = &self.direct_output_value {\n direct_output_value\n } else {\n return Err(PyException::new_err(\"expect direct output\"));\n };\n let spec = spec::TransientFlowSpec {\n name: self.flow_instance_name.clone(),\n input_fields: self.direct_input_fields.clone(),\n reactive_ops: self.reactive_ops.clone(),\n output_value: direct_output_value.clone(),\n };\n let py_ctx = crate::py::PythonExecutionContext::new(py, py_event_loop);\n\n let analyzed_flow = get_runtime().spawn_blocking(|| {\n let local_set = LocalSet::new();\n local_set.block_on(\n get_runtime(),\n super::AnalyzedTransientFlow::from_transient_flow(spec, Some(py_ctx)),\n )\n });\n future_into_py(py, async move {\n Ok(py::TransientFlow(Arc::new(\n analyzed_flow.await.into_py_result()?.into_py_result()?,\n )))\n })\n }\n\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\nimpl std::fmt::Display for FlowBuilder {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Flow instance name: {}\\n\\n\", self.flow_instance_name)?;\n for op in self.import_ops.iter() {\n write!(\n f,\n \"Source op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n for field in self.direct_input_fields.iter() {\n writeln!(f, \"Direct input {}: {}\", field.name, field.value_type)?;\n }\n if !self.direct_input_fields.is_empty() {\n writeln!(f)?;\n }\n for op in self.reactive_ops.iter() {\n write!(\n f,\n \"Reactive op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n for op in self.export_ops.iter() {\n write!(\n f,\n \"Export op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n if let Some(output) = &self.direct_output_value {\n write!(f, \"Direct output: {output}\\n\\n\")?;\n }\n Ok(())\n }\n}\n\nimpl FlowBuilder {\n fn last_field_to_data_slice(op_scope: &Arc) -> Result {\n let data_scope = op_scope.data.lock().unwrap();\n let last_field = data_scope.last_field().unwrap();\n let result = DataSlice {\n scope: op_scope.clone(),\n value: Arc::new(spec::ValueMapping::Field(spec::FieldMapping {\n scope: None,\n field_path: spec::FieldPath(vec![last_field.name.clone()]),\n })),\n data_type: schema::EnrichedValueType::from_alternative(&last_field.value_type)?.into(),\n };\n Ok(result)\n }\n\n fn minimum_common_scope<'a>(\n scopes: impl Iterator>,\n target_scope: Option<&'a Arc>,\n ) -> Result<&'a Arc> {\n let mut scope_iter = scopes;\n let mut common_scope = scope_iter\n .next()\n .ok_or_else(|| PyException::new_err(\"expect at least one input\"))?;\n for scope in scope_iter {\n if scope.is_op_scope_descendant(common_scope) {\n common_scope = scope;\n } else if !common_scope.is_op_scope_descendant(scope) {\n api_bail!(\n \"expect all arguments share the common scope, got {} and {} exclusive to each other\",\n common_scope,\n scope\n );\n }\n }\n if let Some(target_scope) = target_scope {\n if !target_scope.is_op_scope_descendant(common_scope) {\n api_bail!(\n \"the field can only be attached to a scope or sub-scope of the input value. Target scope: {}, input scope: {}\",\n target_scope,\n common_scope\n );\n }\n common_scope = target_scope;\n }\n Ok(common_scope)\n }\n\n fn get_mut_reactive_ops<'a>(\n &'a mut self,\n op_scope: &OpScope,\n ) -> Result<&'a mut Vec>> {\n Self::get_mut_reactive_ops_internal(op_scope, &mut self.reactive_ops)\n }\n\n fn get_mut_reactive_ops_internal<'a>(\n op_scope: &OpScope,\n root_reactive_ops: &'a mut Vec>,\n ) -> Result<&'a mut Vec>> {\n let result = match &op_scope.parent {\n None => root_reactive_ops,\n Some((parent_op_scope, field_path)) => {\n let parent_reactive_ops =\n Self::get_mut_reactive_ops_internal(parent_op_scope, root_reactive_ops)?;\n // Reuse the last foreach if matched, otherwise create a new one.\n match parent_reactive_ops.last() {\n Some(spec::NamedSpec {\n spec: spec::ReactiveOpSpec::ForEach(foreach_spec),\n ..\n }) if &foreach_spec.field_path == field_path\n && foreach_spec.op_scope.name == op_scope.name => {}\n\n _ => {\n api_bail!(\"already out of op scope `{}`\", op_scope.name);\n }\n }\n match &mut parent_reactive_ops.last_mut().unwrap().spec {\n spec::ReactiveOpSpec::ForEach(foreach_spec) => &mut foreach_spec.op_scope.ops,\n _ => unreachable!(),\n }\n }\n };\n Ok(result)\n }\n}\n"], ["/cocoindex/src/execution/live_updater.rs", "use crate::{execution::stats::UpdateStats, prelude::*};\n\nuse super::stats;\nuse futures::future::try_join_all;\nuse sqlx::PgPool;\nuse tokio::{sync::watch, task::JoinSet, time::MissedTickBehavior};\n\npub struct FlowLiveUpdaterUpdates {\n pub active_sources: Vec,\n pub updated_sources: Vec,\n}\nstruct FlowLiveUpdaterStatus {\n pub active_source_idx: BTreeSet,\n pub source_updates_num: Vec,\n}\n\nstruct UpdateReceiveState {\n status_rx: watch::Receiver,\n last_num_source_updates: Vec,\n is_done: bool,\n}\n\npub struct FlowLiveUpdater {\n flow_ctx: Arc,\n join_set: Mutex>>>,\n stats_per_task: Vec>,\n recv_state: tokio::sync::Mutex,\n num_remaining_tasks_rx: watch::Receiver,\n\n // Hold tx to avoid dropping the sender.\n _status_tx: watch::Sender,\n _num_remaining_tasks_tx: watch::Sender,\n}\n\n#[derive(Debug, Clone, Default, Serialize, Deserialize)]\npub struct FlowLiveUpdaterOptions {\n /// If true, the updater will keep refreshing the index.\n /// Otherwise, it will only apply changes from the source up to the current time.\n pub live_mode: bool,\n\n /// If true, stats will be printed to the console.\n pub print_stats: bool,\n}\n\nconst REPORT_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10);\n\nstruct SharedAckFn Result<()>> {\n count: usize,\n ack_fn: Option,\n}\n\nimpl Result<()>> SharedAckFn {\n fn new(count: usize, ack_fn: AckAsyncFn) -> Self {\n Self {\n count,\n ack_fn: Some(ack_fn),\n }\n }\n\n async fn ack(v: &Mutex) -> Result<()> {\n let ack_fn = {\n let mut v = v.lock().unwrap();\n v.count -= 1;\n if v.count > 0 { None } else { v.ack_fn.take() }\n };\n if let Some(ack_fn) = ack_fn {\n ack_fn().await?;\n }\n Ok(())\n }\n}\n\nstruct SourceUpdateTask {\n source_idx: usize,\n\n flow: Arc,\n plan: Arc,\n execution_ctx: Arc>,\n source_update_stats: Arc,\n pool: PgPool,\n options: FlowLiveUpdaterOptions,\n\n status_tx: watch::Sender,\n num_remaining_tasks_tx: watch::Sender,\n}\n\nimpl Drop for SourceUpdateTask {\n fn drop(&mut self) {\n self.status_tx.send_modify(|update| {\n update.active_source_idx.remove(&self.source_idx);\n });\n self.num_remaining_tasks_tx.send_modify(|update| {\n *update -= 1;\n });\n }\n}\n\nimpl SourceUpdateTask {\n async fn run(self) -> Result<()> {\n let source_idx = self.source_idx;\n let source_context = self\n .execution_ctx\n .get_source_indexing_context(&self.flow, source_idx, &self.pool)\n .await?;\n\n let import_op = &self.plan.import_ops[source_idx];\n\n let report_stats = |stats: &stats::UpdateStats, kind: &str| {\n self.source_update_stats.merge(stats);\n if self.options.print_stats {\n println!(\n \"{}.{} ({kind}): {}\",\n self.flow.flow_instance.name, import_op.name, stats\n );\n } else {\n trace!(\n \"{}.{} ({kind}): {}\",\n self.flow.flow_instance.name, import_op.name, stats\n );\n }\n };\n\n let mut futs: Vec>> = Vec::new();\n\n // Deal with change streams.\n if self.options.live_mode {\n if let Some(change_stream) = import_op.executor.change_stream().await? {\n let change_stream_stats = Arc::new(stats::UpdateStats::default());\n futs.push(\n {\n let change_stream_stats = change_stream_stats.clone();\n let pool = self.pool.clone();\n let status_tx = self.status_tx.clone();\n async move {\n let mut change_stream = change_stream;\n let retry_options = retryable::RetryOptions {\n max_retries: None,\n initial_backoff: std::time::Duration::from_secs(5),\n max_backoff: std::time::Duration::from_secs(60),\n };\n loop {\n // Workaround as AsyncFnMut isn't mature yet.\n // Should be changed to use AsyncFnMut once it is.\n let change_stream = tokio::sync::Mutex::new(&mut change_stream);\n let change_msg = retryable::run(\n || async {\n let mut change_stream = change_stream.lock().await;\n change_stream\n .next()\n .await\n .transpose()\n .map_err(retryable::Error::always_retryable)\n },\n &retry_options,\n )\n .await?;\n let change_msg = if let Some(change_msg) = change_msg {\n change_msg\n } else {\n break;\n };\n\n let update_stats = Arc::new(stats::UpdateStats::default());\n let ack_fn = {\n let status_tx = status_tx.clone();\n let update_stats = update_stats.clone();\n let change_stream_stats = change_stream_stats.clone();\n async move || {\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n change_stream_stats.merge(&update_stats);\n }\n if let Some(ack_fn) = change_msg.ack_fn {\n ack_fn().await\n } else {\n Ok(())\n }\n }\n };\n let shared_ack_fn = Arc::new(Mutex::new(SharedAckFn::new(\n change_msg.changes.iter().len(),\n ack_fn,\n )));\n for change in change_msg.changes {\n let shared_ack_fn = shared_ack_fn.clone();\n let concur_permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n tokio::spawn(source_context.clone().process_source_key(\n change.key,\n change.data,\n update_stats.clone(),\n concur_permit,\n Some(move || async move {\n SharedAckFn::ack(&shared_ack_fn).await\n }),\n pool.clone(),\n ));\n }\n }\n Ok(())\n }\n }\n .boxed(),\n );\n\n futs.push(\n async move {\n let mut interval = tokio::time::interval(REPORT_INTERVAL);\n let mut last_change_stream_stats: UpdateStats =\n change_stream_stats.as_ref().clone();\n interval.set_missed_tick_behavior(MissedTickBehavior::Delay);\n interval.tick().await;\n loop {\n interval.tick().await;\n let curr_change_stream_stats = change_stream_stats.as_ref().clone();\n let delta = curr_change_stream_stats.delta(&last_change_stream_stats);\n if delta.has_any_change() {\n report_stats(&delta, \"change stream\");\n last_change_stream_stats = curr_change_stream_stats;\n }\n }\n }\n .boxed(),\n );\n }\n }\n\n // The main update loop.\n futs.push({\n let status_tx = self.status_tx.clone();\n let pool = self.pool.clone();\n let live_mode = self.options.live_mode;\n async move {\n let update_stats = Arc::new(stats::UpdateStats::default());\n source_context.update(&pool, &update_stats).await?;\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n }\n report_stats(&update_stats, \"batch update\");\n\n if let (true, Some(refresh_interval)) =\n (live_mode, import_op.refresh_options.refresh_interval)\n {\n let mut interval = tokio::time::interval(refresh_interval);\n interval.set_missed_tick_behavior(MissedTickBehavior::Delay);\n interval.tick().await;\n loop {\n interval.tick().await;\n\n let update_stats = Arc::new(stats::UpdateStats::default());\n source_context.update(&pool, &update_stats).await?;\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n }\n report_stats(&update_stats, \"interval refresh\");\n }\n }\n Ok(())\n }\n .boxed()\n });\n\n let join_result = try_join_all(futs).await;\n if let Err(err) = join_result {\n error!(\"Error in source `{}`: {:?}\", import_op.name, err);\n return Err(err);\n }\n Ok(())\n }\n}\n\nimpl FlowLiveUpdater {\n pub async fn start(\n flow_ctx: Arc,\n pool: &PgPool,\n options: FlowLiveUpdaterOptions,\n ) -> Result {\n let plan = flow_ctx.flow.get_execution_plan().await?;\n let execution_ctx = Arc::new(flow_ctx.use_owned_execution_ctx().await?);\n\n let (status_tx, status_rx) = watch::channel(FlowLiveUpdaterStatus {\n active_source_idx: BTreeSet::from_iter(0..plan.import_ops.len()),\n source_updates_num: vec![0; plan.import_ops.len()],\n });\n\n let (num_remaining_tasks_tx, num_remaining_tasks_rx) =\n watch::channel(plan.import_ops.len());\n\n let mut join_set = JoinSet::new();\n let mut stats_per_task = Vec::new();\n\n for source_idx in 0..plan.import_ops.len() {\n let source_update_stats = Arc::new(stats::UpdateStats::default());\n let source_update_task = SourceUpdateTask {\n source_idx,\n flow: flow_ctx.flow.clone(),\n plan: plan.clone(),\n execution_ctx: execution_ctx.clone(),\n source_update_stats: source_update_stats.clone(),\n pool: pool.clone(),\n options: options.clone(),\n status_tx: status_tx.clone(),\n num_remaining_tasks_tx: num_remaining_tasks_tx.clone(),\n };\n join_set.spawn(source_update_task.run());\n stats_per_task.push(source_update_stats);\n }\n Ok(Self {\n flow_ctx,\n join_set: Mutex::new(Some(join_set)),\n stats_per_task,\n recv_state: tokio::sync::Mutex::new(UpdateReceiveState {\n status_rx,\n last_num_source_updates: vec![0; plan.import_ops.len()],\n is_done: false,\n }),\n num_remaining_tasks_rx,\n\n _status_tx: status_tx,\n _num_remaining_tasks_tx: num_remaining_tasks_tx,\n })\n }\n\n pub async fn wait(&self) -> Result<()> {\n {\n let mut rx = self.num_remaining_tasks_rx.clone();\n rx.wait_for(|v| *v == 0).await?;\n }\n\n let Some(mut join_set) = self.join_set.lock().unwrap().take() else {\n return Ok(());\n };\n while let Some(task_result) = join_set.join_next().await {\n match task_result {\n Ok(Ok(_)) => {}\n Ok(Err(err)) => {\n return Err(err);\n }\n Err(err) if err.is_cancelled() => {}\n Err(err) => {\n return Err(err.into());\n }\n }\n }\n Ok(())\n }\n\n pub fn abort(&self) {\n let mut join_set = self.join_set.lock().unwrap();\n if let Some(join_set) = &mut *join_set {\n join_set.abort_all();\n }\n }\n\n pub fn index_update_info(&self) -> stats::IndexUpdateInfo {\n stats::IndexUpdateInfo {\n sources: std::iter::zip(\n self.flow_ctx.flow.flow_instance.import_ops.iter(),\n self.stats_per_task.iter(),\n )\n .map(|(import_op, stats)| stats::SourceUpdateInfo {\n source_name: import_op.name.clone(),\n stats: stats.as_ref().clone(),\n })\n .collect(),\n }\n }\n\n pub async fn next_status_updates(&self) -> Result {\n let mut recv_state = self.recv_state.lock().await;\n let recv_state = &mut *recv_state;\n\n if recv_state.is_done {\n return Ok(FlowLiveUpdaterUpdates {\n active_sources: vec![],\n updated_sources: vec![],\n });\n }\n\n recv_state.status_rx.changed().await?;\n let status = recv_state.status_rx.borrow_and_update();\n let updates = FlowLiveUpdaterUpdates {\n active_sources: status\n .active_source_idx\n .iter()\n .map(|idx| {\n self.flow_ctx.flow.flow_instance.import_ops[*idx]\n .name\n .clone()\n })\n .collect(),\n updated_sources: status\n .source_updates_num\n .iter()\n .enumerate()\n .filter_map(|(idx, num_updates)| {\n if num_updates > &recv_state.last_num_source_updates[idx] {\n Some(\n self.flow_ctx.flow.flow_instance.import_ops[idx]\n .name\n .clone(),\n )\n } else {\n None\n }\n })\n .collect(),\n };\n recv_state.last_num_source_updates = status.source_updates_num.clone();\n if status.active_source_idx.is_empty() {\n recv_state.is_done = true;\n }\n Ok(updates)\n }\n}\n"], ["/cocoindex/src/execution/dumper.rs", "use crate::prelude::*;\n\nuse futures::{StreamExt, future::try_join_all};\nuse itertools::Itertools;\nuse serde::ser::SerializeSeq;\nuse sqlx::PgPool;\nuse std::path::{Path, PathBuf};\nuse yaml_rust2::YamlEmitter;\n\nuse super::evaluator::SourceRowEvaluationContext;\nuse super::memoization::EvaluationMemoryOptions;\nuse super::row_indexer;\nuse crate::base::{schema, value};\nuse crate::builder::plan::{AnalyzedImportOp, ExecutionPlan};\nuse crate::ops::interface::SourceExecutorListOptions;\nuse crate::utils::yaml_ser::YamlSerializer;\n\n#[derive(Debug, Clone, Deserialize)]\npub struct EvaluateAndDumpOptions {\n pub output_dir: String,\n pub use_cache: bool,\n}\n\nconst FILENAME_PREFIX_MAX_LENGTH: usize = 128;\n\nstruct TargetExportData<'a> {\n schema: &'a Vec,\n // The purpose is to make rows sorted by primary key.\n data: BTreeMap,\n}\n\nimpl Serialize for TargetExportData<'_> {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let mut seq = serializer.serialize_seq(Some(self.data.len()))?;\n for (_, values) in self.data.iter() {\n seq.serialize_element(&value::TypedFieldsValue {\n schema: self.schema,\n values_iter: values.fields.iter(),\n })?;\n }\n seq.end()\n }\n}\n\n#[derive(Serialize)]\nstruct SourceOutputData<'a> {\n key: value::TypedValue<'a>,\n\n #[serde(skip_serializing_if = \"Option::is_none\")]\n exports: Option>>,\n\n #[serde(skip_serializing_if = \"Option::is_none\")]\n error: Option,\n}\n\nstruct Dumper<'a> {\n plan: &'a ExecutionPlan,\n setup_execution_ctx: &'a exec_ctx::FlowSetupExecutionContext,\n schema: &'a schema::FlowSchema,\n pool: &'a PgPool,\n options: EvaluateAndDumpOptions,\n}\n\nimpl<'a> Dumper<'a> {\n async fn evaluate_source_entry<'b>(\n &'a self,\n import_op_idx: usize,\n import_op: &'a AnalyzedImportOp,\n key: &value::KeyValue,\n collected_values_buffer: &'b mut Vec>,\n ) -> Result>>>\n where\n 'a: 'b,\n {\n let data_builder = row_indexer::evaluate_source_entry_with_memory(\n &SourceRowEvaluationContext {\n plan: self.plan,\n import_op,\n schema: self.schema,\n key,\n import_op_idx,\n },\n self.setup_execution_ctx,\n EvaluationMemoryOptions {\n enable_cache: self.options.use_cache,\n evaluation_only: true,\n },\n self.pool,\n )\n .await?;\n\n let data_builder = if let Some(data_builder) = data_builder {\n data_builder\n } else {\n return Ok(None);\n };\n\n *collected_values_buffer = data_builder.collected_values;\n let exports = self\n .plan\n .export_ops\n .iter()\n .map(|export_op| -> Result<_> {\n let collector_idx = export_op.input.collector_idx as usize;\n let entry = (\n export_op.name.as_str(),\n TargetExportData {\n schema: &self.schema.root_op_scope.collectors[collector_idx]\n .spec\n .fields,\n data: collected_values_buffer[collector_idx]\n .iter()\n .map(|v| -> Result<_> {\n let key = row_indexer::extract_primary_key(\n &export_op.primary_key_def,\n v,\n )?;\n Ok((key, v))\n })\n .collect::>()?,\n },\n );\n Ok(entry)\n })\n .collect::>()?;\n Ok(Some(exports))\n }\n\n async fn evaluate_and_dump_source_entry(\n &self,\n import_op_idx: usize,\n import_op: &AnalyzedImportOp,\n key: value::KeyValue,\n file_path: PathBuf,\n ) -> Result<()> {\n let _permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n let mut collected_values_buffer = Vec::new();\n let (exports, error) = match self\n .evaluate_source_entry(import_op_idx, import_op, &key, &mut collected_values_buffer)\n .await\n {\n Ok(exports) => (exports, None),\n Err(e) => (None, Some(format!(\"{e:?}\"))),\n };\n let key_value = value::Value::from(key);\n let file_data = SourceOutputData {\n key: value::TypedValue {\n t: &import_op.primary_key_type,\n v: &key_value,\n },\n exports,\n error,\n };\n\n let yaml_output = {\n let mut yaml_output = String::new();\n let yaml_data = YamlSerializer::serialize(&file_data)?;\n let mut yaml_emitter = YamlEmitter::new(&mut yaml_output);\n yaml_emitter.multiline_strings(true);\n yaml_emitter.compact(true);\n yaml_emitter.dump(&yaml_data)?;\n yaml_output\n };\n tokio::fs::write(file_path, yaml_output).await?;\n\n Ok(())\n }\n\n async fn evaluate_and_dump_for_source(\n &self,\n import_op_idx: usize,\n import_op: &AnalyzedImportOp,\n ) -> Result<()> {\n let mut keys_by_filename_prefix: IndexMap> = IndexMap::new();\n\n let mut rows_stream = import_op.executor.list(&SourceExecutorListOptions {\n include_ordinal: false,\n });\n while let Some(rows) = rows_stream.next().await {\n for row in rows?.into_iter() {\n let mut s = row\n .key\n .to_strs()\n .into_iter()\n .map(|s| urlencoding::encode(&s).into_owned())\n .join(\":\");\n s.truncate(\n (0..(FILENAME_PREFIX_MAX_LENGTH - import_op.name.as_str().len()))\n .rev()\n .find(|i| s.is_char_boundary(*i))\n .unwrap_or(0),\n );\n keys_by_filename_prefix.entry(s).or_default().push(row.key);\n }\n }\n let output_dir = Path::new(&self.options.output_dir);\n let evaluate_futs =\n keys_by_filename_prefix\n .into_iter()\n .flat_map(|(filename_prefix, keys)| {\n let num_keys = keys.len();\n keys.into_iter().enumerate().map(move |(i, key)| {\n let extra_id = if num_keys > 1 {\n Cow::Owned(format!(\".{i}\"))\n } else {\n Cow::Borrowed(\"\")\n };\n let file_name =\n format!(\"{}@{}{}.yaml\", import_op.name, filename_prefix, extra_id);\n let file_path = output_dir.join(Path::new(&file_name));\n self.evaluate_and_dump_source_entry(\n import_op_idx,\n import_op,\n key,\n file_path,\n )\n })\n });\n try_join_all(evaluate_futs).await?;\n Ok(())\n }\n\n async fn evaluate_and_dump(&self) -> Result<()> {\n try_join_all(\n self.plan\n .import_ops\n .iter()\n .enumerate()\n .map(|(idx, import_op)| self.evaluate_and_dump_for_source(idx, import_op)),\n )\n .await?;\n Ok(())\n }\n}\n\npub async fn evaluate_and_dump(\n plan: &ExecutionPlan,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n schema: &schema::FlowSchema,\n options: EvaluateAndDumpOptions,\n pool: &PgPool,\n) -> Result<()> {\n let output_dir = Path::new(&options.output_dir);\n if output_dir.exists() {\n if !output_dir.is_dir() {\n return Err(anyhow::anyhow!(\"The path exists and is not a directory\"));\n }\n } else {\n tokio::fs::create_dir(output_dir).await?;\n }\n\n let dumper = Dumper {\n plan,\n setup_execution_ctx,\n schema,\n pool,\n options,\n };\n dumper.evaluate_and_dump().await\n}\n"], ["/cocoindex/src/ops/sources/amazon_s3.rs", "use crate::fields_value;\nuse async_stream::try_stream;\nuse aws_config::BehaviorVersion;\nuse aws_sdk_s3::Client;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse std::sync::Arc;\nuse urlencoding;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\n/// Decode a form-encoded URL string, treating '+' as spaces\nfn decode_form_encoded_url(input: &str) -> Result> {\n // Replace '+' with spaces (form encoding convention), then decode\n // This handles both cases correctly:\n // - Literal '+' would be encoded as '%2B' and remain unchanged after replacement\n // - Space would be encoded as '+' and become ' ' after replacement\n let with_spaces = input.replace(\"+\", \" \");\n Ok(urlencoding::decode(&with_spaces)?.into())\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n bucket_name: String,\n prefix: Option,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n sqs_queue_url: Option,\n}\n\nstruct SqsContext {\n client: aws_sdk_sqs::Client,\n queue_url: String,\n}\n\nimpl SqsContext {\n async fn delete_message(&self, receipt_handle: String) -> Result<()> {\n self.client\n .delete_message()\n .queue_url(&self.queue_url)\n .receipt_handle(receipt_handle)\n .send()\n .await?;\n Ok(())\n }\n}\n\nstruct Executor {\n client: Client,\n bucket_name: String,\n prefix: Option,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n sqs_context: Option>,\n}\n\nimpl Executor {\n fn is_excluded(&self, key: &str) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(key))\n }\n\n fn is_file_included(&self, key: &str) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(key))\n && !self.is_excluded(key)\n }\n}\n\nfn datetime_to_ordinal(dt: &aws_sdk_s3::primitives::DateTime) -> Ordinal {\n Ordinal(Some((dt.as_nanos() / 1000) as i64))\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n _options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n try_stream! {\n let mut continuation_token = None;\n loop {\n let mut req = self.client\n .list_objects_v2()\n .bucket(&self.bucket_name);\n if let Some(ref p) = self.prefix {\n req = req.prefix(p);\n }\n if let Some(ref token) = continuation_token {\n req = req.continuation_token(token);\n }\n let resp = req.send().await?;\n if let Some(contents) = &resp.contents {\n let mut batch = Vec::new();\n for obj in contents {\n if let Some(key) = obj.key() {\n // Only include files (not folders)\n if key.ends_with('/') { continue; }\n let include = self.included_glob_set\n .as_ref()\n .map(|gs| gs.is_match(key))\n .unwrap_or(true);\n let exclude = self.excluded_glob_set\n .as_ref()\n .map(|gs| gs.is_match(key))\n .unwrap_or(false);\n if include && !exclude {\n batch.push(PartialSourceRowMetadata {\n key: KeyValue::Str(key.to_string().into()),\n ordinal: obj.last_modified().map(datetime_to_ordinal),\n });\n }\n }\n }\n if !batch.is_empty() {\n yield batch;\n }\n }\n if resp.is_truncated == Some(true) {\n continuation_token = resp.next_continuation_token.clone().map(|s| s.to_string());\n } else {\n break;\n }\n }\n }.boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let key_str = key.str_value()?;\n if !self.is_file_included(key_str) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n let resp = self\n .client\n .get_object()\n .bucket(&self.bucket_name)\n .key(key_str.as_ref())\n .send()\n .await;\n let obj = match resp {\n Err(e) if e.as_service_error().is_some_and(|e| e.is_no_such_key()) => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n r => r?,\n };\n let ordinal = if options.include_ordinal {\n obj.last_modified().map(datetime_to_ordinal)\n } else {\n None\n };\n let value = if options.include_value {\n let bytes = obj.body.collect().await?.into_bytes();\n Some(SourceValue::Existence(if self.binary {\n fields_value!(bytes.to_vec())\n } else {\n fields_value!(String::from_utf8_lossy(&bytes).to_string())\n }))\n } else {\n None\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n let sqs_context = if let Some(sqs_context) = &self.sqs_context {\n sqs_context\n } else {\n return Ok(None);\n };\n let stream = stream! {\n loop {\n match self.poll_sqs(sqs_context).await {\n Ok(messages) => {\n for message in messages {\n yield Ok(message);\n }\n }\n Err(e) => {\n yield Err(e);\n }\n };\n }\n };\n Ok(Some(stream.boxed()))\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3EventNotification {\n #[serde(default, rename = \"Records\")]\n pub records: Vec,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3EventRecord {\n #[serde(rename = \"eventName\")]\n pub event_name: String,\n pub s3: Option,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Entity {\n pub bucket: S3Bucket,\n pub object: S3Object,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Bucket {\n pub name: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Object {\n pub key: String,\n}\n\nimpl Executor {\n async fn poll_sqs(&self, sqs_context: &Arc) -> Result> {\n let resp = sqs_context\n .client\n .receive_message()\n .queue_url(&sqs_context.queue_url)\n .max_number_of_messages(10)\n .wait_time_seconds(20)\n .send()\n .await?;\n let messages = if let Some(messages) = resp.messages {\n messages\n } else {\n return Ok(Vec::new());\n };\n let mut change_messages = vec![];\n for message in messages.into_iter() {\n if let Some(body) = message.body {\n let notification: S3EventNotification = serde_json::from_str(&body)?;\n let mut changes = vec![];\n for record in notification.records {\n let s3 = if let Some(s3) = record.s3 {\n s3\n } else {\n continue;\n };\n if s3.bucket.name != self.bucket_name {\n continue;\n }\n if !self\n .prefix\n .as_ref()\n .is_none_or(|prefix| s3.object.key.starts_with(prefix))\n {\n continue;\n }\n if record.event_name.starts_with(\"ObjectCreated:\")\n || record.event_name.starts_with(\"ObjectRemoved:\")\n {\n let decoded_key = decode_form_encoded_url(&s3.object.key)?;\n changes.push(SourceChange {\n key: KeyValue::Str(decoded_key),\n data: None,\n });\n }\n }\n if let Some(receipt_handle) = message.receipt_handle {\n if !changes.is_empty() {\n let sqs_context = sqs_context.clone();\n change_messages.push(SourceChangeMessage {\n changes,\n ack_fn: Some(Box::new(move || {\n async move { sqs_context.delete_message(receipt_handle).await }\n .boxed()\n })),\n });\n } else {\n sqs_context.delete_message(receipt_handle).await?;\n }\n }\n }\n }\n Ok(change_messages)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"AmazonS3\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n let config = aws_config::load_defaults(BehaviorVersion::latest()).await;\n Ok(Box::new(Executor {\n client: Client::new(&config),\n bucket_name: spec.bucket_name,\n prefix: spec.prefix,\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n sqs_context: spec.sqs_queue_url.map(|url| {\n Arc::new(SqsContext {\n client: aws_sdk_sqs::Client::new(&config),\n queue_url: url,\n })\n }),\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/base/value.rs", "use super::schema::*;\nuse crate::base::duration::parse_duration;\nuse crate::prelude::invariance_violation;\nuse crate::{api_bail, api_error};\nuse anyhow::Result;\nuse base64::prelude::*;\nuse bytes::Bytes;\nuse chrono::Offset;\nuse log::warn;\nuse serde::{\n Deserialize, Serialize,\n de::{SeqAccess, Visitor},\n ser::{SerializeMap, SerializeSeq, SerializeTuple},\n};\nuse std::{collections::BTreeMap, ops::Deref, sync::Arc};\n\npub trait EstimatedByteSize: Sized {\n fn estimated_detached_byte_size(&self) -> usize;\n\n fn estimated_byte_size(&self) -> usize {\n self.estimated_detached_byte_size() + std::mem::size_of::()\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]\npub struct RangeValue {\n pub start: usize,\n pub end: usize,\n}\n\nimpl RangeValue {\n pub fn new(start: usize, end: usize) -> Self {\n RangeValue { start, end }\n }\n\n pub fn len(&self) -> usize {\n self.end - self.start\n }\n\n pub fn extract_str<'s>(&self, s: &'s (impl AsRef + ?Sized)) -> &'s str {\n let s = s.as_ref();\n &s[self.start..self.end]\n }\n}\n\nimpl Serialize for RangeValue {\n fn serialize(&self, serializer: S) -> Result {\n let mut tuple = serializer.serialize_tuple(2)?;\n tuple.serialize_element(&self.start)?;\n tuple.serialize_element(&self.end)?;\n tuple.end()\n }\n}\n\nimpl<'de> Deserialize<'de> for RangeValue {\n fn deserialize>(deserializer: D) -> Result {\n struct RangeVisitor;\n\n impl<'de> Visitor<'de> for RangeVisitor {\n type Value = RangeValue;\n\n fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {\n formatter.write_str(\"a tuple of two u64\")\n }\n\n fn visit_seq(self, mut seq: V) -> Result\n where\n V: SeqAccess<'de>,\n {\n let start = seq\n .next_element()?\n .ok_or_else(|| serde::de::Error::missing_field(\"missing begin\"))?;\n let end = seq\n .next_element()?\n .ok_or_else(|| serde::de::Error::missing_field(\"missing end\"))?;\n Ok(RangeValue { start, end })\n }\n }\n deserializer.deserialize_tuple(2, RangeVisitor)\n }\n}\n\n/// Value of key.\n#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize)]\npub enum KeyValue {\n Bytes(Bytes),\n Str(Arc),\n Bool(bool),\n Int64(i64),\n Range(RangeValue),\n Uuid(uuid::Uuid),\n Date(chrono::NaiveDate),\n Struct(Vec),\n}\n\nimpl From for KeyValue {\n fn from(value: Bytes) -> Self {\n KeyValue::Bytes(value)\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Vec) -> Self {\n KeyValue::Bytes(Bytes::from(value))\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Arc) -> Self {\n KeyValue::Str(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: String) -> Self {\n KeyValue::Str(Arc::from(value))\n }\n}\n\nimpl From for KeyValue {\n fn from(value: bool) -> Self {\n KeyValue::Bool(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: i64) -> Self {\n KeyValue::Int64(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: RangeValue) -> Self {\n KeyValue::Range(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: uuid::Uuid) -> Self {\n KeyValue::Uuid(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: chrono::NaiveDate) -> Self {\n KeyValue::Date(value)\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Vec) -> Self {\n KeyValue::Struct(value)\n }\n}\n\nimpl serde::Serialize for KeyValue {\n fn serialize(&self, serializer: S) -> Result {\n Value::from(self.clone()).serialize(serializer)\n }\n}\n\nimpl std::fmt::Display for KeyValue {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n KeyValue::Bytes(v) => write!(f, \"{}\", BASE64_STANDARD.encode(v)),\n KeyValue::Str(v) => write!(f, \"\\\"{}\\\"\", v.escape_default()),\n KeyValue::Bool(v) => write!(f, \"{v}\"),\n KeyValue::Int64(v) => write!(f, \"{v}\"),\n KeyValue::Range(v) => write!(f, \"[{}, {})\", v.start, v.end),\n KeyValue::Uuid(v) => write!(f, \"{v}\"),\n KeyValue::Date(v) => write!(f, \"{v}\"),\n KeyValue::Struct(v) => {\n write!(\n f,\n \"[{}]\",\n v.iter()\n .map(|v| v.to_string())\n .collect::>()\n .join(\", \")\n )\n }\n }\n }\n}\n\nimpl KeyValue {\n pub fn from_json(value: serde_json::Value, fields_schema: &[FieldSchema]) -> Result {\n let value = if fields_schema.len() == 1 {\n Value::from_json(value, &fields_schema[0].value_type.typ)?\n } else {\n let field_values: FieldValues = FieldValues::from_json(value, fields_schema)?;\n Value::Struct(field_values)\n };\n value.as_key()\n }\n\n pub fn from_values<'a>(values: impl ExactSizeIterator) -> Result {\n let key = if values.len() == 1 {\n let mut values = values;\n values.next().ok_or_else(invariance_violation)?.as_key()?\n } else {\n KeyValue::Struct(values.map(|v| v.as_key()).collect::>>()?)\n };\n Ok(key)\n }\n\n pub fn fields_iter(&self, num_fields: usize) -> Result> {\n let slice = if num_fields == 1 {\n std::slice::from_ref(self)\n } else {\n match self {\n KeyValue::Struct(v) => v,\n _ => api_bail!(\"Invalid key value type\"),\n }\n };\n Ok(slice.iter())\n }\n\n fn parts_from_str(\n values_iter: &mut impl Iterator,\n schema: &ValueType,\n ) -> Result {\n let result = match schema {\n ValueType::Basic(basic_type) => {\n let v = values_iter\n .next()\n .ok_or_else(|| api_error!(\"Key parts less than expected\"))?;\n match basic_type {\n BasicValueType::Bytes => {\n KeyValue::Bytes(Bytes::from(BASE64_STANDARD.decode(v)?))\n }\n BasicValueType::Str => KeyValue::Str(Arc::from(v)),\n BasicValueType::Bool => KeyValue::Bool(v.parse()?),\n BasicValueType::Int64 => KeyValue::Int64(v.parse()?),\n BasicValueType::Range => {\n let v2 = values_iter\n .next()\n .ok_or_else(|| api_error!(\"Key parts less than expected\"))?;\n KeyValue::Range(RangeValue {\n start: v.parse()?,\n end: v2.parse()?,\n })\n }\n BasicValueType::Uuid => KeyValue::Uuid(v.parse()?),\n BasicValueType::Date => KeyValue::Date(v.parse()?),\n schema => api_bail!(\"Invalid key type {schema}\"),\n }\n }\n ValueType::Struct(s) => KeyValue::Struct(\n s.fields\n .iter()\n .map(|f| KeyValue::parts_from_str(values_iter, &f.value_type.typ))\n .collect::>>()?,\n ),\n _ => api_bail!(\"Invalid key type {schema}\"),\n };\n Ok(result)\n }\n\n fn parts_to_strs(&self, output: &mut Vec) {\n match self {\n KeyValue::Bytes(v) => output.push(BASE64_STANDARD.encode(v)),\n KeyValue::Str(v) => output.push(v.to_string()),\n KeyValue::Bool(v) => output.push(v.to_string()),\n KeyValue::Int64(v) => output.push(v.to_string()),\n KeyValue::Range(v) => {\n output.push(v.start.to_string());\n output.push(v.end.to_string());\n }\n KeyValue::Uuid(v) => output.push(v.to_string()),\n KeyValue::Date(v) => output.push(v.to_string()),\n KeyValue::Struct(v) => {\n for part in v {\n part.parts_to_strs(output);\n }\n }\n }\n }\n\n pub fn from_strs(value: impl IntoIterator, schema: &ValueType) -> Result {\n let mut values_iter = value.into_iter();\n let result = Self::parts_from_str(&mut values_iter, schema)?;\n if values_iter.next().is_some() {\n api_bail!(\"Key parts more than expected\");\n }\n Ok(result)\n }\n\n pub fn to_strs(&self) -> Vec {\n let mut output = Vec::with_capacity(self.num_parts());\n self.parts_to_strs(&mut output);\n output\n }\n\n pub fn kind_str(&self) -> &'static str {\n match self {\n KeyValue::Bytes(_) => \"bytes\",\n KeyValue::Str(_) => \"str\",\n KeyValue::Bool(_) => \"bool\",\n KeyValue::Int64(_) => \"int64\",\n KeyValue::Range { .. } => \"range\",\n KeyValue::Uuid(_) => \"uuid\",\n KeyValue::Date(_) => \"date\",\n KeyValue::Struct(_) => \"struct\",\n }\n }\n\n pub fn bytes_value(&self) -> Result<&Bytes> {\n match self {\n KeyValue::Bytes(v) => Ok(v),\n _ => anyhow::bail!(\"expected bytes value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn str_value(&self) -> Result<&Arc> {\n match self {\n KeyValue::Str(v) => Ok(v),\n _ => anyhow::bail!(\"expected str value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn bool_value(&self) -> Result {\n match self {\n KeyValue::Bool(v) => Ok(*v),\n _ => anyhow::bail!(\"expected bool value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn int64_value(&self) -> Result {\n match self {\n KeyValue::Int64(v) => Ok(*v),\n _ => anyhow::bail!(\"expected int64 value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn range_value(&self) -> Result {\n match self {\n KeyValue::Range(v) => Ok(*v),\n _ => anyhow::bail!(\"expected range value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn uuid_value(&self) -> Result {\n match self {\n KeyValue::Uuid(v) => Ok(*v),\n _ => anyhow::bail!(\"expected uuid value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn date_value(&self) -> Result {\n match self {\n KeyValue::Date(v) => Ok(*v),\n _ => anyhow::bail!(\"expected date value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn struct_value(&self) -> Result<&Vec> {\n match self {\n KeyValue::Struct(v) => Ok(v),\n _ => anyhow::bail!(\"expected struct value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn num_parts(&self) -> usize {\n match self {\n KeyValue::Range(_) => 2,\n KeyValue::Struct(v) => v.iter().map(|v| v.num_parts()).sum(),\n _ => 1,\n }\n }\n\n fn estimated_detached_byte_size(&self) -> usize {\n match self {\n KeyValue::Bytes(v) => v.len(),\n KeyValue::Str(v) => v.len(),\n KeyValue::Struct(v) => {\n v.iter()\n .map(KeyValue::estimated_detached_byte_size)\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n KeyValue::Bool(_)\n | KeyValue::Int64(_)\n | KeyValue::Range(_)\n | KeyValue::Uuid(_)\n | KeyValue::Date(_) => 0,\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Deserialize)]\npub enum BasicValue {\n Bytes(Bytes),\n Str(Arc),\n Bool(bool),\n Int64(i64),\n Float32(f32),\n Float64(f64),\n Range(RangeValue),\n Uuid(uuid::Uuid),\n Date(chrono::NaiveDate),\n Time(chrono::NaiveTime),\n LocalDateTime(chrono::NaiveDateTime),\n OffsetDateTime(chrono::DateTime),\n TimeDelta(chrono::Duration),\n Json(Arc),\n Vector(Arc<[BasicValue]>),\n UnionVariant {\n tag_id: usize,\n value: Box,\n },\n}\n\nimpl From for BasicValue {\n fn from(value: Bytes) -> Self {\n BasicValue::Bytes(value)\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: Vec) -> Self {\n BasicValue::Bytes(Bytes::from(value))\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: Arc) -> Self {\n BasicValue::Str(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: String) -> Self {\n BasicValue::Str(Arc::from(value))\n }\n}\n\nimpl From for BasicValue {\n fn from(value: bool) -> Self {\n BasicValue::Bool(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: i64) -> Self {\n BasicValue::Int64(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: f32) -> Self {\n BasicValue::Float32(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: f64) -> Self {\n BasicValue::Float64(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: uuid::Uuid) -> Self {\n BasicValue::Uuid(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveDate) -> Self {\n BasicValue::Date(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveTime) -> Self {\n BasicValue::Time(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveDateTime) -> Self {\n BasicValue::LocalDateTime(value)\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: chrono::DateTime) -> Self {\n BasicValue::OffsetDateTime(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::Duration) -> Self {\n BasicValue::TimeDelta(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: serde_json::Value) -> Self {\n BasicValue::Json(Arc::from(value))\n }\n}\n\nimpl> From> for BasicValue {\n fn from(value: Vec) -> Self {\n BasicValue::Vector(Arc::from(\n value.into_iter().map(|v| v.into()).collect::>(),\n ))\n }\n}\n\nimpl BasicValue {\n pub fn into_key(self) -> Result {\n let result = match self {\n BasicValue::Bytes(v) => KeyValue::Bytes(v),\n BasicValue::Str(v) => KeyValue::Str(v),\n BasicValue::Bool(v) => KeyValue::Bool(v),\n BasicValue::Int64(v) => KeyValue::Int64(v),\n BasicValue::Range(v) => KeyValue::Range(v),\n BasicValue::Uuid(v) => KeyValue::Uuid(v),\n BasicValue::Date(v) => KeyValue::Date(v),\n BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_)\n | BasicValue::Json(_)\n | BasicValue::Vector(_)\n | BasicValue::UnionVariant { .. } => api_bail!(\"invalid key value type\"),\n };\n Ok(result)\n }\n\n pub fn as_key(&self) -> Result {\n let result = match self {\n BasicValue::Bytes(v) => KeyValue::Bytes(v.clone()),\n BasicValue::Str(v) => KeyValue::Str(v.clone()),\n BasicValue::Bool(v) => KeyValue::Bool(*v),\n BasicValue::Int64(v) => KeyValue::Int64(*v),\n BasicValue::Range(v) => KeyValue::Range(*v),\n BasicValue::Uuid(v) => KeyValue::Uuid(*v),\n BasicValue::Date(v) => KeyValue::Date(*v),\n BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_)\n | BasicValue::Json(_)\n | BasicValue::Vector(_)\n | BasicValue::UnionVariant { .. } => api_bail!(\"invalid key value type\"),\n };\n Ok(result)\n }\n\n pub fn kind(&self) -> &'static str {\n match &self {\n BasicValue::Bytes(_) => \"bytes\",\n BasicValue::Str(_) => \"str\",\n BasicValue::Bool(_) => \"bool\",\n BasicValue::Int64(_) => \"int64\",\n BasicValue::Float32(_) => \"float32\",\n BasicValue::Float64(_) => \"float64\",\n BasicValue::Range(_) => \"range\",\n BasicValue::Uuid(_) => \"uuid\",\n BasicValue::Date(_) => \"date\",\n BasicValue::Time(_) => \"time\",\n BasicValue::LocalDateTime(_) => \"local_datetime\",\n BasicValue::OffsetDateTime(_) => \"offset_datetime\",\n BasicValue::TimeDelta(_) => \"timedelta\",\n BasicValue::Json(_) => \"json\",\n BasicValue::Vector(_) => \"vector\",\n BasicValue::UnionVariant { .. } => \"union\",\n }\n }\n\n /// Returns the estimated byte size of the value, for detached data (i.e. allocated on heap).\n fn estimated_detached_byte_size(&self) -> usize {\n fn json_estimated_detached_byte_size(val: &serde_json::Value) -> usize {\n match val {\n serde_json::Value::String(s) => s.len(),\n serde_json::Value::Array(arr) => {\n arr.iter()\n .map(json_estimated_detached_byte_size)\n .sum::()\n + arr.len() * std::mem::size_of::()\n }\n serde_json::Value::Object(map) => map\n .iter()\n .map(|(k, v)| {\n std::mem::size_of::()\n + k.len()\n + json_estimated_detached_byte_size(v)\n })\n .sum(),\n serde_json::Value::Null\n | serde_json::Value::Bool(_)\n | serde_json::Value::Number(_) => 0,\n }\n }\n match self {\n BasicValue::Bytes(v) => v.len(),\n BasicValue::Str(v) => v.len(),\n BasicValue::Json(v) => json_estimated_detached_byte_size(v),\n BasicValue::Vector(v) => {\n v.iter()\n .map(BasicValue::estimated_detached_byte_size)\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n BasicValue::UnionVariant { value, .. } => {\n value.estimated_detached_byte_size() + std::mem::size_of::()\n }\n BasicValue::Bool(_)\n | BasicValue::Int64(_)\n | BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Range(_)\n | BasicValue::Uuid(_)\n | BasicValue::Date(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_) => 0,\n }\n }\n}\n\n#[derive(Debug, Clone, Default, PartialEq, Deserialize)]\npub enum Value {\n #[default]\n Null,\n Basic(BasicValue),\n Struct(FieldValues),\n UTable(Vec),\n KTable(BTreeMap),\n LTable(Vec),\n}\n\nimpl> From for Value {\n fn from(value: T) -> Self {\n Value::Basic(value.into())\n }\n}\n\nimpl From for Value {\n fn from(value: KeyValue) -> Self {\n match value {\n KeyValue::Bytes(v) => Value::Basic(BasicValue::Bytes(v)),\n KeyValue::Str(v) => Value::Basic(BasicValue::Str(v)),\n KeyValue::Bool(v) => Value::Basic(BasicValue::Bool(v)),\n KeyValue::Int64(v) => Value::Basic(BasicValue::Int64(v)),\n KeyValue::Range(v) => Value::Basic(BasicValue::Range(v)),\n KeyValue::Uuid(v) => Value::Basic(BasicValue::Uuid(v)),\n KeyValue::Date(v) => Value::Basic(BasicValue::Date(v)),\n KeyValue::Struct(v) => Value::Struct(FieldValues {\n fields: v.into_iter().map(Value::from).collect(),\n }),\n }\n }\n}\n\nimpl From<&KeyValue> for Value {\n fn from(value: &KeyValue) -> Self {\n match value {\n KeyValue::Bytes(v) => Value::Basic(BasicValue::Bytes(v.clone())),\n KeyValue::Str(v) => Value::Basic(BasicValue::Str(v.clone())),\n KeyValue::Bool(v) => Value::Basic(BasicValue::Bool(*v)),\n KeyValue::Int64(v) => Value::Basic(BasicValue::Int64(*v)),\n KeyValue::Range(v) => Value::Basic(BasicValue::Range(*v)),\n KeyValue::Uuid(v) => Value::Basic(BasicValue::Uuid(*v)),\n KeyValue::Date(v) => Value::Basic(BasicValue::Date(*v)),\n KeyValue::Struct(v) => Value::Struct(FieldValues {\n fields: v.iter().map(Value::from).collect(),\n }),\n }\n }\n}\n\nimpl From for Value {\n fn from(value: FieldValues) -> Self {\n Value::Struct(value)\n }\n}\n\nimpl> From> for Value {\n fn from(value: Option) -> Self {\n match value {\n Some(v) => v.into(),\n None => Value::Null,\n }\n }\n}\n\nimpl Value {\n pub fn from_alternative(value: Value) -> Self\n where\n AltVS: Into,\n {\n match value {\n Value::Null => Value::Null,\n Value::Basic(v) => Value::Basic(v),\n Value::Struct(v) => Value::Struct(FieldValues:: {\n fields: v\n .fields\n .into_iter()\n .map(|v| Value::::from_alternative(v))\n .collect(),\n }),\n Value::UTable(v) => Value::UTable(v.into_iter().map(|v| v.into()).collect()),\n Value::KTable(v) => {\n Value::KTable(v.into_iter().map(|(k, v)| (k.clone(), v.into())).collect())\n }\n Value::LTable(v) => Value::LTable(v.into_iter().map(|v| v.into()).collect()),\n }\n }\n\n pub fn from_alternative_ref(value: &Value) -> Self\n where\n for<'a> &'a AltVS: Into,\n {\n match value {\n Value::Null => Value::Null,\n Value::Basic(v) => Value::Basic(v.clone()),\n Value::Struct(v) => Value::Struct(FieldValues:: {\n fields: v\n .fields\n .iter()\n .map(|v| Value::::from_alternative_ref(v))\n .collect(),\n }),\n Value::UTable(v) => Value::UTable(v.iter().map(|v| v.into()).collect()),\n Value::KTable(v) => {\n Value::KTable(v.iter().map(|(k, v)| (k.clone(), v.into())).collect())\n }\n Value::LTable(v) => Value::LTable(v.iter().map(|v| v.into()).collect()),\n }\n }\n\n pub fn is_null(&self) -> bool {\n matches!(self, Value::Null)\n }\n\n pub fn into_key(self) -> Result {\n let result = match self {\n Value::Basic(v) => v.into_key()?,\n Value::Struct(v) => KeyValue::Struct(\n v.fields\n .into_iter()\n .map(|v| v.into_key())\n .collect::>>()?,\n ),\n Value::Null | Value::UTable(_) | Value::KTable(_) | Value::LTable(_) => {\n anyhow::bail!(\"invalid key value type\")\n }\n };\n Ok(result)\n }\n\n pub fn as_key(&self) -> Result {\n let result = match self {\n Value::Basic(v) => v.as_key()?,\n Value::Struct(v) => KeyValue::Struct(\n v.fields\n .iter()\n .map(|v| v.as_key())\n .collect::>>()?,\n ),\n Value::Null | Value::UTable(_) | Value::KTable(_) | Value::LTable(_) => {\n anyhow::bail!(\"invalid key value type\")\n }\n };\n Ok(result)\n }\n\n pub fn kind(&self) -> &'static str {\n match self {\n Value::Null => \"null\",\n Value::Basic(v) => v.kind(),\n Value::Struct(_) => \"Struct\",\n Value::UTable(_) => \"UTable\",\n Value::KTable(_) => \"KTable\",\n Value::LTable(_) => \"LTable\",\n }\n }\n\n pub fn optional(&self) -> Option<&Self> {\n match self {\n Value::Null => None,\n _ => Some(self),\n }\n }\n\n pub fn as_bytes(&self) -> Result<&Bytes> {\n match self {\n Value::Basic(BasicValue::Bytes(v)) => Ok(v),\n _ => anyhow::bail!(\"expected bytes value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_str(&self) -> Result<&Arc> {\n match self {\n Value::Basic(BasicValue::Str(v)) => Ok(v),\n _ => anyhow::bail!(\"expected str value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_bool(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Bool(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected bool value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_int64(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Int64(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected int64 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_float32(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Float32(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected float32 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_float64(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Float64(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected float64 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_range(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Range(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected range value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_json(&self) -> Result<&Arc> {\n match self {\n Value::Basic(BasicValue::Json(v)) => Ok(v),\n _ => anyhow::bail!(\"expected json value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_vector(&self) -> Result<&Arc<[BasicValue]>> {\n match self {\n Value::Basic(BasicValue::Vector(v)) => Ok(v),\n _ => anyhow::bail!(\"expected vector value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_struct(&self) -> Result<&FieldValues> {\n match self {\n Value::Struct(v) => Ok(v),\n _ => anyhow::bail!(\"expected struct value, but got {}\", self.kind()),\n }\n }\n}\n\nimpl Value {\n pub fn estimated_byte_size(&self) -> usize {\n std::mem::size_of::()\n + match self {\n Value::Null => 0,\n Value::Basic(v) => v.estimated_detached_byte_size(),\n Value::Struct(v) => v.estimated_detached_byte_size(),\n Value::UTable(v) | Value::LTable(v) => {\n v.iter()\n .map(|v| v.estimated_detached_byte_size())\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n Value::KTable(v) => {\n v.iter()\n .map(|(k, v)| {\n k.estimated_detached_byte_size() + v.estimated_detached_byte_size()\n })\n .sum::()\n + v.len() * std::mem::size_of::<(String, ScopeValue)>()\n }\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Deserialize)]\npub struct FieldValues {\n pub fields: Vec>,\n}\n\nimpl EstimatedByteSize for FieldValues {\n fn estimated_detached_byte_size(&self) -> usize {\n self.fields\n .iter()\n .map(Value::::estimated_byte_size)\n .sum::()\n + self.fields.len() * std::mem::size_of::>()\n }\n}\n\nimpl serde::Serialize for FieldValues {\n fn serialize(&self, serializer: S) -> Result {\n self.fields.serialize(serializer)\n }\n}\n\nimpl FieldValues\nwhere\n FieldValues: Into,\n{\n pub fn new(num_fields: usize) -> Self {\n let mut fields = Vec::with_capacity(num_fields);\n fields.resize(num_fields, Value::::Null);\n Self { fields }\n }\n\n fn from_json_values<'a>(\n fields: impl Iterator,\n ) -> Result {\n Ok(Self {\n fields: fields\n .map(|(s, v)| {\n let value = Value::::from_json(v, &s.value_type.typ)?;\n if value.is_null() && !s.value_type.nullable {\n api_bail!(\"expected non-null value for `{}`\", s.name);\n }\n Ok(value)\n })\n .collect::>>()?,\n })\n }\n\n fn from_json_object<'a>(\n values: serde_json::Map,\n fields_schema: impl Iterator,\n ) -> Result {\n let mut values = values;\n Ok(Self {\n fields: fields_schema\n .map(|field| {\n let value = match values.get_mut(&field.name) {\n Some(v) => {\n Value::::from_json(std::mem::take(v), &field.value_type.typ)?\n }\n None => Value::::default(),\n };\n if value.is_null() && !field.value_type.nullable {\n api_bail!(\"expected non-null value for `{}`\", field.name);\n }\n Ok(value)\n })\n .collect::>>()?,\n })\n }\n\n pub fn from_json(value: serde_json::Value, fields_schema: &[FieldSchema]) -> Result {\n match value {\n serde_json::Value::Array(v) => {\n if v.len() != fields_schema.len() {\n api_bail!(\"unmatched value length\");\n }\n Self::from_json_values(fields_schema.iter().zip(v))\n }\n serde_json::Value::Object(v) => Self::from_json_object(v, fields_schema.iter()),\n _ => api_bail!(\"invalid value type\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct ScopeValue(pub FieldValues);\n\nimpl EstimatedByteSize for ScopeValue {\n fn estimated_detached_byte_size(&self) -> usize {\n self.0.estimated_detached_byte_size()\n }\n}\n\nimpl Deref for ScopeValue {\n type Target = FieldValues;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl From for ScopeValue {\n fn from(value: FieldValues) -> Self {\n Self(value)\n }\n}\n\nimpl serde::Serialize for BasicValue {\n fn serialize(&self, serializer: S) -> Result {\n match self {\n BasicValue::Bytes(v) => serializer.serialize_str(&BASE64_STANDARD.encode(v)),\n BasicValue::Str(v) => serializer.serialize_str(v),\n BasicValue::Bool(v) => serializer.serialize_bool(*v),\n BasicValue::Int64(v) => serializer.serialize_i64(*v),\n BasicValue::Float32(v) => serializer.serialize_f32(*v),\n BasicValue::Float64(v) => serializer.serialize_f64(*v),\n BasicValue::Range(v) => v.serialize(serializer),\n BasicValue::Uuid(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Date(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Time(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::LocalDateTime(v) => {\n serializer.serialize_str(&v.format(\"%Y-%m-%dT%H:%M:%S%.6f\").to_string())\n }\n BasicValue::OffsetDateTime(v) => {\n serializer.serialize_str(&v.to_rfc3339_opts(chrono::SecondsFormat::AutoSi, true))\n }\n BasicValue::TimeDelta(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Json(v) => v.serialize(serializer),\n BasicValue::Vector(v) => v.serialize(serializer),\n BasicValue::UnionVariant { tag_id, value } => {\n let mut s = serializer.serialize_tuple(2)?;\n s.serialize_element(tag_id)?;\n s.serialize_element(value)?;\n s.end()\n }\n }\n }\n}\n\nimpl BasicValue {\n pub fn from_json(value: serde_json::Value, schema: &BasicValueType) -> Result {\n let result = match (value, schema) {\n (serde_json::Value::String(v), BasicValueType::Bytes) => {\n BasicValue::Bytes(Bytes::from(BASE64_STANDARD.decode(v)?))\n }\n (serde_json::Value::String(v), BasicValueType::Str) => BasicValue::Str(Arc::from(v)),\n (serde_json::Value::Bool(v), BasicValueType::Bool) => BasicValue::Bool(v),\n (serde_json::Value::Number(v), BasicValueType::Int64) => BasicValue::Int64(\n v.as_i64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid int64 value {v}\"))?,\n ),\n (serde_json::Value::Number(v), BasicValueType::Float32) => BasicValue::Float32(\n v.as_f64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid fp32 value {v}\"))?\n as f32,\n ),\n (serde_json::Value::Number(v), BasicValueType::Float64) => BasicValue::Float64(\n v.as_f64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid fp64 value {v}\"))?,\n ),\n (v, BasicValueType::Range) => BasicValue::Range(serde_json::from_value(v)?),\n (serde_json::Value::String(v), BasicValueType::Uuid) => BasicValue::Uuid(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::Date) => BasicValue::Date(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::Time) => BasicValue::Time(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::LocalDateTime) => {\n BasicValue::LocalDateTime(v.parse()?)\n }\n (serde_json::Value::String(v), BasicValueType::OffsetDateTime) => {\n match chrono::DateTime::parse_from_rfc3339(&v) {\n Ok(dt) => BasicValue::OffsetDateTime(dt),\n Err(e) => {\n if let Ok(dt) = v.parse::() {\n warn!(\"Datetime without timezone offset, assuming UTC\");\n BasicValue::OffsetDateTime(chrono::DateTime::from_naive_utc_and_offset(\n dt,\n chrono::Utc.fix(),\n ))\n } else {\n Err(e)?\n }\n }\n }\n }\n (serde_json::Value::String(v), BasicValueType::TimeDelta) => {\n BasicValue::TimeDelta(parse_duration(&v)?)\n }\n (v, BasicValueType::Json) => BasicValue::Json(Arc::from(v)),\n (\n serde_json::Value::Array(v),\n BasicValueType::Vector(VectorTypeSchema { element_type, .. }),\n ) => {\n let vec = v\n .into_iter()\n .map(|v| BasicValue::from_json(v, element_type))\n .collect::>>()?;\n BasicValue::Vector(Arc::from(vec))\n }\n (v, BasicValueType::Union(typ)) => {\n let arr = match v {\n serde_json::Value::Array(arr) => arr,\n _ => anyhow::bail!(\"Invalid JSON value for union, expect array\"),\n };\n\n if arr.len() != 2 {\n anyhow::bail!(\n \"Invalid union tuple: expect 2 values, received {}\",\n arr.len()\n );\n }\n\n let mut obj_iter = arr.into_iter();\n\n // Take first element\n let tag_id = obj_iter\n .next()\n .and_then(|value| value.as_u64().map(|num_u64| num_u64 as usize))\n .unwrap();\n\n // Take second element\n let value = obj_iter.next().unwrap();\n\n let cur_type = typ\n .types\n .get(tag_id)\n .ok_or_else(|| anyhow::anyhow!(\"No type in `tag_id` \\\"{tag_id}\\\" found\"))?;\n\n BasicValue::UnionVariant {\n tag_id,\n value: Box::new(BasicValue::from_json(value, cur_type)?),\n }\n }\n (v, t) => {\n anyhow::bail!(\"Value and type not matched.\\nTarget type {t:?}\\nJSON value: {v}\\n\")\n }\n };\n Ok(result)\n }\n}\n\nstruct TableEntry<'a>(&'a KeyValue, &'a ScopeValue);\n\nimpl serde::Serialize for Value {\n fn serialize(&self, serializer: S) -> Result {\n match self {\n Value::Null => serializer.serialize_none(),\n Value::Basic(v) => v.serialize(serializer),\n Value::Struct(v) => v.serialize(serializer),\n Value::UTable(v) => v.serialize(serializer),\n Value::KTable(m) => {\n let mut seq = serializer.serialize_seq(Some(m.len()))?;\n for (k, v) in m.iter() {\n seq.serialize_element(&TableEntry(k, v))?;\n }\n seq.end()\n }\n Value::LTable(v) => v.serialize(serializer),\n }\n }\n}\n\nimpl serde::Serialize for TableEntry<'_> {\n fn serialize(&self, serializer: S) -> Result {\n let &TableEntry(key, value) = self;\n let mut seq = serializer.serialize_seq(Some(value.0.fields.len() + 1))?;\n seq.serialize_element(key)?;\n for item in value.0.fields.iter() {\n seq.serialize_element(item)?;\n }\n seq.end()\n }\n}\n\nimpl Value\nwhere\n FieldValues: Into,\n{\n pub fn from_json(value: serde_json::Value, schema: &ValueType) -> Result {\n let result = match (value, schema) {\n (serde_json::Value::Null, _) => Value::::Null,\n (v, ValueType::Basic(t)) => Value::::Basic(BasicValue::from_json(v, t)?),\n (v, ValueType::Struct(s)) => {\n Value::::Struct(FieldValues::::from_json(v, &s.fields)?)\n }\n (serde_json::Value::Array(v), ValueType::Table(s)) => match s.kind {\n TableKind::UTable => {\n let rows = v\n .into_iter()\n .map(|v| Ok(FieldValues::from_json(v, &s.row.fields)?.into()))\n .collect::>>()?;\n Value::LTable(rows)\n }\n TableKind::KTable => {\n let rows = v\n .into_iter()\n .map(|v| {\n let mut fields_iter = s.row.fields.iter();\n let key_field = fields_iter\n .next()\n .ok_or_else(|| api_error!(\"Empty struct field values\"))?;\n\n match v {\n serde_json::Value::Array(v) => {\n let mut field_vals_iter = v.into_iter();\n let key = Self::from_json(\n field_vals_iter.next().ok_or_else(|| {\n api_error!(\"Empty struct field values\")\n })?,\n &key_field.value_type.typ,\n )?\n .into_key()?;\n let values = FieldValues::from_json_values(\n fields_iter.zip(field_vals_iter),\n )?;\n Ok((key, values.into()))\n }\n serde_json::Value::Object(mut v) => {\n let key = Self::from_json(\n std::mem::take(v.get_mut(&key_field.name).ok_or_else(\n || {\n api_error!(\n \"key field `{}` doesn't exist in value\",\n key_field.name\n )\n },\n )?),\n &key_field.value_type.typ,\n )?\n .into_key()?;\n let values = FieldValues::from_json_object(v, fields_iter)?;\n Ok((key, values.into()))\n }\n _ => api_bail!(\"Table value must be a JSON array or object\"),\n }\n })\n .collect::>>()?;\n Value::KTable(rows)\n }\n TableKind::LTable => {\n let rows = v\n .into_iter()\n .map(|v| Ok(FieldValues::from_json(v, &s.row.fields)?.into()))\n .collect::>>()?;\n Value::LTable(rows)\n }\n },\n (v, t) => {\n anyhow::bail!(\"Value and type not matched.\\nTarget type {t:?}\\nJSON value: {v}\\n\")\n }\n };\n Ok(result)\n }\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct TypedValue<'a> {\n pub t: &'a ValueType,\n pub v: &'a Value,\n}\n\nimpl Serialize for TypedValue<'_> {\n fn serialize(&self, serializer: S) -> Result {\n match (self.t, self.v) {\n (_, Value::Null) => serializer.serialize_none(),\n (ValueType::Basic(t), v) => match t {\n BasicValueType::Union(_) => match v {\n Value::Basic(BasicValue::UnionVariant { value, .. }) => {\n value.serialize(serializer)\n }\n _ => Err(serde::ser::Error::custom(\n \"Unmatched union type and value for `TypedValue`\",\n )),\n },\n _ => v.serialize(serializer),\n },\n (ValueType::Struct(s), Value::Struct(field_values)) => TypedFieldsValue {\n schema: &s.fields,\n values_iter: field_values.fields.iter(),\n }\n .serialize(serializer),\n (ValueType::Table(c), Value::UTable(rows) | Value::LTable(rows)) => {\n let mut seq = serializer.serialize_seq(Some(rows.len()))?;\n for row in rows {\n seq.serialize_element(&TypedFieldsValue {\n schema: &c.row.fields,\n values_iter: row.fields.iter(),\n })?;\n }\n seq.end()\n }\n (ValueType::Table(c), Value::KTable(rows)) => {\n let mut seq = serializer.serialize_seq(Some(rows.len()))?;\n for (k, v) in rows {\n seq.serialize_element(&TypedFieldsValue {\n schema: &c.row.fields,\n values_iter: std::iter::once(&Value::from(k.clone()))\n .chain(v.fields.iter()),\n })?;\n }\n seq.end()\n }\n _ => Err(serde::ser::Error::custom(format!(\n \"Incompatible value type: {:?} {:?}\",\n self.t, self.v\n ))),\n }\n }\n}\n\npub struct TypedFieldsValue<'a, I: Iterator + Clone> {\n pub schema: &'a [FieldSchema],\n pub values_iter: I,\n}\n\nimpl<'a, I: Iterator + Clone> Serialize for TypedFieldsValue<'a, I> {\n fn serialize(&self, serializer: S) -> Result {\n let mut map = serializer.serialize_map(Some(self.schema.len()))?;\n let values_iter = self.values_iter.clone();\n for (field, value) in self.schema.iter().zip(values_iter) {\n map.serialize_entry(\n &field.name,\n &TypedValue {\n t: &field.value_type.typ,\n v: value,\n },\n )?;\n }\n map.end()\n }\n}\n\npub mod test_util {\n use super::*;\n\n pub fn seder_roundtrip(value: &Value, typ: &ValueType) -> Result {\n let json_value = serde_json::to_value(value)?;\n let roundtrip_value = Value::from_json(json_value, typ)?;\n Ok(roundtrip_value)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use std::collections::BTreeMap;\n\n #[test]\n fn test_estimated_byte_size_null() {\n let value = Value::::Null;\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_primitive() {\n // Test primitives that should have 0 detached byte size\n let value = Value::::Basic(BasicValue::Bool(true));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n let value = Value::::Basic(BasicValue::Int64(42));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n let value = Value::::Basic(BasicValue::Float64(3.14));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_string() {\n let test_str = \"hello world\";\n let value = Value::::Basic(BasicValue::Str(Arc::from(test_str)));\n let size = value.estimated_byte_size();\n\n let expected_size = std::mem::size_of::>() + test_str.len();\n assert_eq!(size, expected_size);\n }\n\n #[test]\n fn test_estimated_byte_size_basic_bytes() {\n let test_bytes = b\"hello world\";\n let value = Value::::Basic(BasicValue::Bytes(Bytes::from(test_bytes.to_vec())));\n let size = value.estimated_byte_size();\n\n let expected_size = std::mem::size_of::>() + test_bytes.len();\n assert_eq!(size, expected_size);\n }\n\n #[test]\n fn test_estimated_byte_size_basic_json() {\n let json_val = serde_json::json!({\"key\": \"value\", \"number\": 42});\n let value = Value::::Basic(BasicValue::Json(Arc::from(json_val)));\n let size = value.estimated_byte_size();\n\n // Should include the size of the JSON structure\n // The exact size depends on the internal JSON representation\n assert!(size > std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_vector() {\n let vec_elements = vec![\n BasicValue::Str(Arc::from(\"hello\")),\n BasicValue::Str(Arc::from(\"world\")),\n BasicValue::Int64(42),\n ];\n let value = Value::::Basic(BasicValue::Vector(Arc::from(vec_elements)));\n let size = value.estimated_byte_size();\n\n // Should include the size of the vector elements\n let expected_min_size = std::mem::size_of::>()\n + \"hello\".len()\n + \"world\".len()\n + 3 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_struct() {\n let fields = vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"test\"))),\n Value::::Basic(BasicValue::Int64(123)),\n ];\n let field_values = FieldValues { fields };\n let value = Value::::Struct(field_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"test\".len()\n + 2 * std::mem::size_of::>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_utable() {\n let scope_values = vec![\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"item1\",\n )))],\n }),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"item2\",\n )))],\n }),\n ];\n let value = Value::::UTable(scope_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"item1\".len()\n + \"item2\".len()\n + 2 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_ltable() {\n let scope_values = vec![\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"list1\",\n )))],\n }),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"list2\",\n )))],\n }),\n ];\n let value = Value::::LTable(scope_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"list1\".len()\n + \"list2\".len()\n + 2 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_ktable() {\n let mut map = BTreeMap::new();\n map.insert(\n KeyValue::Str(Arc::from(\"key1\")),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"value1\",\n )))],\n }),\n );\n map.insert(\n KeyValue::Str(Arc::from(\"key2\")),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"value2\",\n )))],\n }),\n );\n let value = Value::::KTable(map);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"key1\".len()\n + \"key2\".len()\n + \"value1\".len()\n + \"value2\".len()\n + 2 * std::mem::size_of::<(String, ScopeValue)>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_nested_struct() {\n let inner_struct = Value::::Struct(FieldValues {\n fields: vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"inner\"))),\n Value::::Basic(BasicValue::Int64(456)),\n ],\n });\n\n let outer_struct = Value::::Struct(FieldValues {\n fields: vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"outer\"))),\n inner_struct,\n ],\n });\n\n let size = outer_struct.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"outer\".len()\n + \"inner\".len()\n + 4 * std::mem::size_of::>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_empty_collections() {\n // Empty UTable\n let value = Value::::UTable(vec![]);\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty LTable\n let value = Value::::LTable(vec![]);\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty KTable\n let value = Value::::KTable(BTreeMap::new());\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty Struct\n let value = Value::::Struct(FieldValues { fields: vec![] });\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n}\n"], ["/cocoindex/src/ops/targets/shared/property_graph.rs", "use crate::prelude::*;\n\nuse crate::ops::sdk::{AuthEntryReference, FieldSchema};\n\n#[derive(Debug, Deserialize)]\npub struct TargetFieldMapping {\n pub source: spec::FieldName,\n\n /// Field name for the node in the Knowledge Graph.\n /// If unspecified, it's the same as `field_name`.\n #[serde(default)]\n pub target: Option,\n}\n\nimpl TargetFieldMapping {\n pub fn get_target(&self) -> &spec::FieldName {\n self.target.as_ref().unwrap_or(&self.source)\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct NodeFromFieldsSpec {\n pub label: String,\n pub fields: Vec,\n}\n\n#[derive(Debug, Deserialize)]\npub struct NodesSpec {\n pub label: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct RelationshipsSpec {\n pub rel_type: String,\n pub source: NodeFromFieldsSpec,\n pub target: NodeFromFieldsSpec,\n}\n\n#[derive(Debug, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum GraphElementMapping {\n Relationship(RelationshipsSpec),\n Node(NodesSpec),\n}\n\n#[derive(Debug, Deserialize)]\npub struct GraphDeclaration {\n pub nodes_label: String,\n\n #[serde(flatten)]\n pub index_options: spec::IndexOptions,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Clone)]\npub enum ElementType {\n Node(String),\n Relationship(String),\n}\n\nimpl ElementType {\n pub fn label(&self) -> &str {\n match self {\n ElementType::Node(label) => label,\n ElementType::Relationship(label) => label,\n }\n }\n\n pub fn from_mapping_spec(spec: &GraphElementMapping) -> Self {\n match spec {\n GraphElementMapping::Relationship(spec) => {\n ElementType::Relationship(spec.rel_type.clone())\n }\n GraphElementMapping::Node(spec) => ElementType::Node(spec.label.clone()),\n }\n }\n\n pub fn matcher(&self, var_name: &str) -> String {\n match self {\n ElementType::Relationship(label) => format!(\"()-[{var_name}:{label}]->()\"),\n ElementType::Node(label) => format!(\"({var_name}:{label})\"),\n }\n }\n}\n\nimpl std::fmt::Display for ElementType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ElementType::Node(label) => write!(f, \"Node(label:{label})\"),\n ElementType::Relationship(rel_type) => write!(f, \"Relationship(type:{rel_type})\"),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Derivative)]\n#[derivative(\n Clone(bound = \"\"),\n PartialEq(bound = \"\"),\n Eq(bound = \"\"),\n Hash(bound = \"\")\n)]\npub struct GraphElementType {\n #[serde(bound = \"\")]\n pub connection: AuthEntryReference,\n pub typ: ElementType,\n}\n\nimpl std::fmt::Display for GraphElementType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}/{}\", self.connection.key, self.typ)\n }\n}\n\npub struct GraphElementSchema {\n pub elem_type: ElementType,\n pub key_fields: Vec,\n pub value_fields: Vec,\n}\n\npub struct GraphElementInputFieldsIdx {\n pub key: Vec,\n pub value: Vec,\n}\n\nimpl GraphElementInputFieldsIdx {\n pub fn extract_key(&self, fields: &[value::Value]) -> Result {\n value::KeyValue::from_values(self.key.iter().map(|idx| &fields[*idx]))\n }\n}\n\npub struct AnalyzedGraphElementFieldMapping {\n pub schema: Arc,\n pub fields_input_idx: GraphElementInputFieldsIdx,\n}\n\nimpl AnalyzedGraphElementFieldMapping {\n pub fn has_value_fields(&self) -> bool {\n !self.fields_input_idx.value.is_empty()\n }\n}\n\npub struct AnalyzedRelationshipInfo {\n pub source: AnalyzedGraphElementFieldMapping,\n pub target: AnalyzedGraphElementFieldMapping,\n}\n\npub struct AnalyzedDataCollection {\n pub schema: Arc,\n pub value_fields_input_idx: Vec,\n\n pub rel: Option,\n}\n\nimpl AnalyzedDataCollection {\n pub fn dependent_node_labels(&self) -> IndexSet<&str> {\n let mut dependent_node_labels = IndexSet::new();\n if let Some(rel) = &self.rel {\n dependent_node_labels.insert(rel.source.schema.elem_type.label());\n dependent_node_labels.insert(rel.target.schema.elem_type.label());\n }\n dependent_node_labels\n }\n}\n\nstruct GraphElementSchemaBuilder {\n elem_type: ElementType,\n key_fields: Vec,\n value_fields: Vec,\n}\n\nimpl GraphElementSchemaBuilder {\n fn new(elem_type: ElementType) -> Self {\n Self {\n elem_type,\n key_fields: vec![],\n value_fields: vec![],\n }\n }\n\n fn merge_fields(\n elem_type: &ElementType,\n kind: &str,\n existing_fields: &mut Vec,\n fields: Vec<(usize, schema::FieldSchema)>,\n ) -> Result> {\n if fields.is_empty() {\n return Ok(vec![]);\n }\n let result: Vec = if existing_fields.is_empty() {\n let fields_idx: Vec = fields.iter().map(|(idx, _)| *idx).collect();\n existing_fields.extend(fields.into_iter().map(|(_, f)| f));\n fields_idx\n } else {\n if existing_fields.len() != fields.len() {\n bail!(\n \"{elem_type} {kind} fields number mismatch: {} vs {}\",\n existing_fields.len(),\n fields.len()\n );\n }\n let mut fields_map: HashMap<_, _> = fields\n .into_iter()\n .map(|(idx, schema)| (schema.name, (idx, schema.value_type)))\n .collect();\n // Follow the order of existing fields\n existing_fields\n .iter()\n .map(|existing_field| {\n let (idx, typ) = fields_map.remove(&existing_field.name).ok_or_else(|| {\n anyhow!(\n \"{elem_type} {kind} field `{}` not found in some collector\",\n existing_field.name\n )\n })?;\n if typ != existing_field.value_type {\n bail!(\n \"{elem_type} {kind} field `{}` type mismatch: {} vs {}\",\n existing_field.name,\n typ,\n existing_field.value_type\n )\n }\n Ok(idx)\n })\n .collect::>>()?\n };\n Ok(result)\n }\n\n fn merge(\n &mut self,\n key_fields: Vec<(usize, schema::FieldSchema)>,\n value_fields: Vec<(usize, schema::FieldSchema)>,\n ) -> Result {\n let key_fields_idx =\n Self::merge_fields(&self.elem_type, \"key\", &mut self.key_fields, key_fields)?;\n let value_fields_idx = Self::merge_fields(\n &self.elem_type,\n \"value\",\n &mut self.value_fields,\n value_fields,\n )?;\n Ok(GraphElementInputFieldsIdx {\n key: key_fields_idx,\n value: value_fields_idx,\n })\n }\n\n fn build_schema(self) -> Result {\n if self.key_fields.is_empty() {\n bail!(\n \"No key fields specified for Node label `{}`\",\n self.elem_type\n );\n }\n Ok(GraphElementSchema {\n elem_type: self.elem_type,\n key_fields: self.key_fields,\n value_fields: self.value_fields,\n })\n }\n}\nstruct DependentNodeLabelAnalyzer<'a, AuthEntry> {\n graph_elem_type: GraphElementType,\n fields: IndexMap,\n remaining_fields: HashMap<&'a str, &'a TargetFieldMapping>,\n primary_key_fields: &'a [String],\n}\n\nimpl<'a, AuthEntry> DependentNodeLabelAnalyzer<'a, AuthEntry> {\n fn new(\n conn: &'a spec::AuthEntryReference,\n rel_end_spec: &'a NodeFromFieldsSpec,\n primary_key_fields_map: &'a HashMap<&'a GraphElementType, &'a [String]>,\n ) -> Result {\n let graph_elem_type = GraphElementType {\n connection: conn.clone(),\n typ: ElementType::Node(rel_end_spec.label.clone()),\n };\n let primary_key_fields = primary_key_fields_map\n .get(&graph_elem_type)\n .ok_or_else(invariance_violation)?;\n Ok(Self {\n graph_elem_type,\n fields: IndexMap::new(),\n remaining_fields: rel_end_spec\n .fields\n .iter()\n .map(|f| (f.source.as_str(), f))\n .collect(),\n primary_key_fields,\n })\n }\n\n fn process_field(&mut self, field_idx: usize, field_schema: &schema::FieldSchema) -> bool {\n let field_mapping = match self.remaining_fields.remove(field_schema.name.as_str()) {\n Some(field_mapping) => field_mapping,\n None => return false,\n };\n self.fields.insert(\n field_mapping.get_target().clone(),\n (field_idx, field_schema.value_type.clone()),\n );\n true\n }\n\n fn build(\n self,\n schema_builders: &mut HashMap, GraphElementSchemaBuilder>,\n ) -> Result<(GraphElementType, GraphElementInputFieldsIdx)> {\n if !self.remaining_fields.is_empty() {\n anyhow::bail!(\n \"Fields not mapped for {}: {}\",\n self.graph_elem_type,\n self.remaining_fields.keys().join(\", \")\n );\n }\n\n let (mut key_fields, value_fields): (Vec<_>, Vec<_>) = self\n .fields\n .into_iter()\n .map(|(field_name, (idx, typ))| (idx, FieldSchema::new(field_name, typ)))\n .partition(|(_, f)| self.primary_key_fields.contains(&f.name));\n if key_fields.len() != self.primary_key_fields.len() {\n bail!(\n \"Primary key fields number mismatch: {} vs {}\",\n key_fields.iter().map(|(_, f)| &f.name).join(\", \"),\n self.primary_key_fields.iter().join(\", \")\n );\n }\n key_fields.sort_by_key(|(_, f)| {\n self.primary_key_fields\n .iter()\n .position(|k| k == &f.name)\n .unwrap()\n });\n\n let fields_idx = schema_builders\n .entry(self.graph_elem_type.clone())\n .or_insert_with(|| GraphElementSchemaBuilder::new(self.graph_elem_type.typ.clone()))\n .merge(key_fields, value_fields)?;\n Ok((self.graph_elem_type, fields_idx))\n }\n}\n\npub struct DataCollectionGraphMappingInput<'a, AuthEntry> {\n pub auth_ref: &'a spec::AuthEntryReference,\n pub mapping: &'a GraphElementMapping,\n pub index_options: &'a spec::IndexOptions,\n\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n}\n\npub fn analyze_graph_mappings<'a, AuthEntry: 'a>(\n data_coll_inputs: impl Iterator>,\n declarations: impl Iterator<\n Item = (\n &'a spec::AuthEntryReference,\n &'a GraphDeclaration,\n ),\n >,\n) -> Result<(Vec, Vec>)> {\n let data_coll_inputs: Vec<_> = data_coll_inputs.collect();\n let decls: Vec<_> = declarations.collect();\n\n // 1a. Prepare graph element types\n let graph_elem_types = data_coll_inputs\n .iter()\n .map(|d| GraphElementType {\n connection: d.auth_ref.clone(),\n typ: ElementType::from_mapping_spec(d.mapping),\n })\n .collect::>();\n let decl_graph_elem_types = decls\n .iter()\n .map(|(auth_ref, decl)| GraphElementType {\n connection: (*auth_ref).clone(),\n typ: ElementType::Node(decl.nodes_label.clone()),\n })\n .collect::>();\n\n // 1b. Prepare primary key fields map\n let primary_key_fields_map: HashMap<&GraphElementType, &[spec::FieldName]> =\n std::iter::zip(data_coll_inputs.iter(), graph_elem_types.iter())\n .map(|(data_coll_input, graph_elem_type)| {\n (\n graph_elem_type,\n data_coll_input.index_options.primary_key_fields(),\n )\n })\n .chain(\n std::iter::zip(decl_graph_elem_types.iter(), decls.iter()).map(\n |(graph_elem_type, (_, decl))| {\n (graph_elem_type, decl.index_options.primary_key_fields())\n },\n ),\n )\n .map(|(graph_elem_type, primary_key_fields)| {\n Ok((\n graph_elem_type,\n primary_key_fields.with_context(|| {\n format!(\"Primary key fields are not set for {graph_elem_type}\")\n })?,\n ))\n })\n .collect::>()?;\n\n // 2. Analyze data collection graph mappings and build target schema\n let mut node_schema_builders =\n HashMap::, GraphElementSchemaBuilder>::new();\n struct RelationshipProcessedInfo {\n rel_schema: GraphElementSchema,\n source_typ: GraphElementType,\n source_fields_idx: GraphElementInputFieldsIdx,\n target_typ: GraphElementType,\n target_fields_idx: GraphElementInputFieldsIdx,\n }\n struct DataCollectionProcessedInfo {\n value_input_fields_idx: Vec,\n rel_specific: Option>,\n }\n let data_collection_processed_info = std::iter::zip(data_coll_inputs, graph_elem_types.iter())\n .map(|(data_coll_input, graph_elem_type)| -> Result<_> {\n let processed_info = match data_coll_input.mapping {\n GraphElementMapping::Node(_) => {\n let input_fields_idx = node_schema_builders\n .entry(graph_elem_type.clone())\n .or_insert_with_key(|graph_elem| {\n GraphElementSchemaBuilder::new(graph_elem.typ.clone())\n })\n .merge(\n data_coll_input\n .key_fields_schema\n .into_iter()\n .enumerate()\n .collect(),\n data_coll_input\n .value_fields_schema\n .into_iter()\n .enumerate()\n .collect(),\n )?;\n\n if !(0..input_fields_idx.key.len()).eq(input_fields_idx.key.into_iter()) {\n return Err(invariance_violation());\n }\n DataCollectionProcessedInfo {\n value_input_fields_idx: input_fields_idx.value,\n rel_specific: None,\n }\n }\n GraphElementMapping::Relationship(rel_spec) => {\n let mut src_analyzer = DependentNodeLabelAnalyzer::new(\n data_coll_input.auth_ref,\n &rel_spec.source,\n &primary_key_fields_map,\n )?;\n let mut tgt_analyzer = DependentNodeLabelAnalyzer::new(\n data_coll_input.auth_ref,\n &rel_spec.target,\n &primary_key_fields_map,\n )?;\n\n let mut value_fields_schema = vec![];\n let mut value_input_fields_idx = vec![];\n for (field_idx, field_schema) in\n data_coll_input.value_fields_schema.into_iter().enumerate()\n {\n if !src_analyzer.process_field(field_idx, &field_schema)\n && !tgt_analyzer.process_field(field_idx, &field_schema)\n {\n value_fields_schema.push(field_schema.clone());\n value_input_fields_idx.push(field_idx);\n }\n }\n\n let rel_schema = GraphElementSchema {\n elem_type: graph_elem_type.typ.clone(),\n key_fields: data_coll_input.key_fields_schema,\n value_fields: value_fields_schema,\n };\n let (source_typ, source_fields_idx) =\n src_analyzer.build(&mut node_schema_builders)?;\n let (target_typ, target_fields_idx) =\n tgt_analyzer.build(&mut node_schema_builders)?;\n DataCollectionProcessedInfo {\n value_input_fields_idx,\n rel_specific: Some(RelationshipProcessedInfo {\n rel_schema,\n source_typ,\n source_fields_idx,\n target_typ,\n target_fields_idx,\n }),\n }\n }\n };\n Ok(processed_info)\n })\n .collect::>>()?;\n\n let node_schemas: HashMap, Arc> =\n node_schema_builders\n .into_iter()\n .map(|(graph_elem_type, schema_builder)| {\n Ok((graph_elem_type, Arc::new(schema_builder.build_schema()?)))\n })\n .collect::>()?;\n\n // 3. Build output\n let analyzed_data_colls: Vec =\n std::iter::zip(data_collection_processed_info, graph_elem_types.iter())\n .map(|(processed_info, graph_elem_type)| {\n let result = match processed_info.rel_specific {\n // Node\n None => AnalyzedDataCollection {\n schema: node_schemas\n .get(graph_elem_type)\n .ok_or_else(invariance_violation)?\n .clone(),\n value_fields_input_idx: processed_info.value_input_fields_idx,\n rel: None,\n },\n // Relationship\n Some(rel_info) => AnalyzedDataCollection {\n schema: Arc::new(rel_info.rel_schema),\n value_fields_input_idx: processed_info.value_input_fields_idx,\n rel: Some(AnalyzedRelationshipInfo {\n source: AnalyzedGraphElementFieldMapping {\n schema: node_schemas\n .get(&rel_info.source_typ)\n .ok_or_else(invariance_violation)?\n .clone(),\n fields_input_idx: rel_info.source_fields_idx,\n },\n target: AnalyzedGraphElementFieldMapping {\n schema: node_schemas\n .get(&rel_info.target_typ)\n .ok_or_else(invariance_violation)?\n .clone(),\n fields_input_idx: rel_info.target_fields_idx,\n },\n }),\n },\n };\n Ok(result)\n })\n .collect::>()?;\n let decl_schemas: Vec> = decl_graph_elem_types\n .iter()\n .map(|graph_elem_type| {\n Ok(node_schemas\n .get(graph_elem_type)\n .ok_or_else(invariance_violation)?\n .clone())\n })\n .collect::>()?;\n Ok((analyzed_data_colls, decl_schemas))\n}\n"], ["/cocoindex/src/base/json_schema.rs", "use crate::prelude::*;\n\nuse crate::utils::immutable::RefList;\nuse schemars::schema::{\n ArrayValidation, InstanceType, ObjectValidation, Schema, SchemaObject, SingleOrVec,\n SubschemaValidation,\n};\nuse std::fmt::Write;\n\npub struct ToJsonSchemaOptions {\n /// If true, mark all fields as required.\n /// Use union type (with `null`) for optional fields instead.\n /// Models like OpenAI will reject the schema if a field is not required.\n pub fields_always_required: bool,\n\n /// If true, the JSON schema supports the `format` keyword.\n pub supports_format: bool,\n\n /// If true, extract descriptions to a separate extra instruction.\n pub extract_descriptions: bool,\n\n /// If true, the top level must be a JSON object.\n pub top_level_must_be_object: bool,\n}\n\nstruct JsonSchemaBuilder {\n options: ToJsonSchemaOptions,\n extra_instructions_per_field: IndexMap,\n}\n\nimpl JsonSchemaBuilder {\n fn new(options: ToJsonSchemaOptions) -> Self {\n Self {\n options,\n extra_instructions_per_field: IndexMap::new(),\n }\n }\n\n fn set_description(\n &mut self,\n schema: &mut SchemaObject,\n description: impl ToString,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) {\n if self.options.extract_descriptions {\n let mut fields: Vec<_> = field_path.iter().map(|f| f.as_str()).collect();\n fields.reverse();\n self.extra_instructions_per_field\n .insert(fields.join(\".\"), description.to_string());\n } else {\n schema.metadata.get_or_insert_default().description = Some(description.to_string());\n }\n }\n\n fn for_basic_value_type(\n &mut self,\n basic_type: &schema::BasicValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n let mut schema = SchemaObject::default();\n match basic_type {\n schema::BasicValueType::Str => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n }\n schema::BasicValueType::Bytes => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n }\n schema::BasicValueType::Bool => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Boolean)));\n }\n schema::BasicValueType::Int64 => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Integer)));\n }\n schema::BasicValueType::Float32 | schema::BasicValueType::Float64 => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Number)));\n }\n schema::BasicValueType::Range => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Array)));\n schema.array = Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n SchemaObject {\n instance_type: Some(SingleOrVec::Single(Box::new(\n InstanceType::Integer,\n ))),\n ..Default::default()\n }\n .into(),\n ))),\n min_items: Some(2),\n max_items: Some(2),\n ..Default::default()\n }));\n self.set_description(\n &mut schema,\n \"A range represented by a list of two positions, start pos (inclusive), end pos (exclusive).\",\n field_path,\n );\n }\n schema::BasicValueType::Uuid => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"uuid\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A UUID, e.g. 123e4567-e89b-12d3-a456-426614174000\",\n field_path,\n );\n }\n schema::BasicValueType::Date => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A date in YYYY-MM-DD format, e.g. 2025-03-27\",\n field_path,\n );\n }\n schema::BasicValueType::Time => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A time in HH:MM:SS format, e.g. 13:32:12\",\n field_path,\n );\n }\n schema::BasicValueType::LocalDateTime => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date-time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"Date time without timezone offset in YYYY-MM-DDTHH:MM:SS format, e.g. 2025-03-27T13:32:12\",\n field_path,\n );\n }\n schema::BasicValueType::OffsetDateTime => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date-time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"Date time with timezone offset in RFC3339, e.g. 2025-03-27T13:32:12Z, 2025-03-27T07:32:12.313-06:00\",\n field_path,\n );\n }\n &schema::BasicValueType::TimeDelta => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"duration\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A duration, e.g. 'PT1H2M3S' (ISO 8601) or '1 day 2 hours 3 seconds'\",\n field_path,\n );\n }\n schema::BasicValueType::Json => {\n // Can be any value. No type constraint.\n }\n schema::BasicValueType::Vector(s) => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Array)));\n schema.array = Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n self.for_basic_value_type(&s.element_type, field_path)\n .into(),\n ))),\n min_items: s.dimension.and_then(|d| u32::try_from(d).ok()),\n max_items: s.dimension.and_then(|d| u32::try_from(d).ok()),\n ..Default::default()\n }));\n }\n schema::BasicValueType::Union(s) => {\n schema.subschemas = Some(Box::new(SubschemaValidation {\n one_of: Some(\n s.types\n .iter()\n .map(|t| Schema::Object(self.for_basic_value_type(t, field_path)))\n .collect(),\n ),\n ..Default::default()\n }));\n }\n }\n schema\n }\n\n fn for_struct_schema(\n &mut self,\n struct_schema: &schema::StructSchema,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n let mut schema = SchemaObject::default();\n if let Some(description) = &struct_schema.description {\n self.set_description(&mut schema, description, field_path);\n }\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Object)));\n schema.object = Some(Box::new(ObjectValidation {\n properties: struct_schema\n .fields\n .iter()\n .map(|f| {\n let mut schema =\n self.for_enriched_value_type(&f.value_type, field_path.prepend(&f.name));\n if self.options.fields_always_required && f.value_type.nullable {\n if let Some(instance_type) = &mut schema.instance_type {\n let mut types = match instance_type {\n SingleOrVec::Single(t) => vec![**t],\n SingleOrVec::Vec(t) => std::mem::take(t),\n };\n types.push(InstanceType::Null);\n *instance_type = SingleOrVec::Vec(types);\n }\n }\n (f.name.to_string(), schema.into())\n })\n .collect(),\n required: struct_schema\n .fields\n .iter()\n .filter(|&f| (self.options.fields_always_required || !f.value_type.nullable))\n .map(|f| f.name.to_string())\n .collect(),\n additional_properties: Some(Schema::Bool(false).into()),\n ..Default::default()\n }));\n schema\n }\n\n fn for_value_type(\n &mut self,\n value_type: &schema::ValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n match value_type {\n schema::ValueType::Basic(b) => self.for_basic_value_type(b, field_path),\n schema::ValueType::Struct(s) => self.for_struct_schema(s, field_path),\n schema::ValueType::Table(c) => SchemaObject {\n instance_type: Some(SingleOrVec::Single(Box::new(InstanceType::Array))),\n array: Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n self.for_struct_schema(&c.row, field_path).into(),\n ))),\n ..Default::default()\n })),\n ..Default::default()\n },\n }\n }\n\n fn for_enriched_value_type(\n &mut self,\n enriched_value_type: &schema::EnrichedValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n self.for_value_type(&enriched_value_type.typ, field_path)\n }\n\n fn build_extra_instructions(&self) -> Result> {\n if self.extra_instructions_per_field.is_empty() {\n return Ok(None);\n }\n\n let mut instructions = String::new();\n write!(&mut instructions, \"Instructions for specific fields:\\n\\n\")?;\n for (field_path, instruction) in self.extra_instructions_per_field.iter() {\n write!(\n &mut instructions,\n \"- {}: {}\\n\\n\",\n if field_path.is_empty() {\n \"(root object)\"\n } else {\n field_path.as_str()\n },\n instruction\n )?;\n }\n Ok(Some(instructions))\n }\n}\n\npub struct ValueExtractor {\n value_type: schema::ValueType,\n object_wrapper_field_name: Option,\n}\n\nimpl ValueExtractor {\n pub fn extract_value(&self, json_value: serde_json::Value) -> Result {\n let unwrapped_json_value =\n if let Some(object_wrapper_field_name) = &self.object_wrapper_field_name {\n match json_value {\n serde_json::Value::Object(mut o) => o\n .remove(object_wrapper_field_name)\n .unwrap_or(serde_json::Value::Null),\n _ => {\n bail!(\"Field `{}` not found\", object_wrapper_field_name)\n }\n }\n } else {\n json_value\n };\n let result = value::Value::from_json(unwrapped_json_value, &self.value_type)?;\n Ok(result)\n }\n}\n\npub struct BuildJsonSchemaOutput {\n pub schema: SchemaObject,\n pub extra_instructions: Option,\n pub value_extractor: ValueExtractor,\n}\n\npub fn build_json_schema(\n value_type: schema::EnrichedValueType,\n options: ToJsonSchemaOptions,\n) -> Result {\n let mut builder = JsonSchemaBuilder::new(options);\n let (schema, object_wrapper_field_name) = if builder.options.top_level_must_be_object\n && !matches!(value_type.typ, schema::ValueType::Struct(_))\n {\n let object_wrapper_field_name = \"value\".to_string();\n let wrapper_struct = schema::StructSchema {\n fields: Arc::new(vec![schema::FieldSchema {\n name: object_wrapper_field_name.clone(),\n value_type: value_type.clone(),\n }]),\n description: None,\n };\n (\n builder.for_struct_schema(&wrapper_struct, RefList::Nil),\n Some(object_wrapper_field_name),\n )\n } else {\n (\n builder.for_enriched_value_type(&value_type, RefList::Nil),\n None,\n )\n };\n Ok(BuildJsonSchemaOutput {\n schema,\n extra_instructions: builder.build_extra_instructions()?,\n value_extractor: ValueExtractor {\n value_type: value_type.typ,\n object_wrapper_field_name,\n },\n })\n}\n"], ["/cocoindex/src/execution/evaluator.rs", "use crate::prelude::*;\n\nuse anyhow::{Context, Ok};\nuse futures::future::try_join_all;\n\nuse crate::base::value::EstimatedByteSize;\nuse crate::builder::{AnalyzedTransientFlow, plan::*};\nuse crate::py::IntoPyResult;\nuse crate::{\n base::{schema, value},\n utils::immutable::RefList,\n};\n\nuse super::memoization::{EvaluationMemory, EvaluationMemoryOptions, evaluate_with_cell};\n\n#[derive(Debug)]\npub struct ScopeValueBuilder {\n // TODO: Share the same lock for values produced in the same execution scope, for stricter atomicity.\n pub fields: Vec>>,\n}\n\nimpl value::EstimatedByteSize for ScopeValueBuilder {\n fn estimated_detached_byte_size(&self) -> usize {\n self.fields\n .iter()\n .map(|f| f.get().map_or(0, |v| v.estimated_byte_size()))\n .sum()\n }\n}\n\nimpl From<&ScopeValueBuilder> for value::ScopeValue {\n fn from(val: &ScopeValueBuilder) -> Self {\n value::ScopeValue(value::FieldValues {\n fields: val\n .fields\n .iter()\n .map(|f| value::Value::from_alternative_ref(f.get().unwrap()))\n .collect(),\n })\n }\n}\n\nimpl From for value::ScopeValue {\n fn from(val: ScopeValueBuilder) -> Self {\n value::ScopeValue(value::FieldValues {\n fields: val\n .fields\n .into_iter()\n .map(|f| value::Value::from_alternative(f.into_inner().unwrap()))\n .collect(),\n })\n }\n}\n\nimpl ScopeValueBuilder {\n fn new(num_fields: usize) -> Self {\n let mut fields = Vec::with_capacity(num_fields);\n fields.resize_with(num_fields, OnceLock::new);\n Self { fields }\n }\n\n fn augmented_from(source: &value::ScopeValue, schema: &schema::TableSchema) -> Result {\n let val_index_base = if schema.has_key() { 1 } else { 0 };\n let len = schema.row.fields.len() - val_index_base;\n\n let mut builder = Self::new(len);\n\n let value::ScopeValue(source_fields) = source;\n for ((v, t), r) in source_fields\n .fields\n .iter()\n .zip(schema.row.fields[val_index_base..(val_index_base + len)].iter())\n .zip(&mut builder.fields)\n {\n r.set(augmented_value(v, &t.value_type.typ)?)\n .into_py_result()?;\n }\n Ok(builder)\n }\n}\n\nfn augmented_value(\n val: &value::Value,\n val_type: &schema::ValueType,\n) -> Result> {\n let value = match (val, val_type) {\n (value::Value::Null, _) => value::Value::Null,\n (value::Value::Basic(v), _) => value::Value::Basic(v.clone()),\n (value::Value::Struct(v), schema::ValueType::Struct(t)) => {\n value::Value::Struct(value::FieldValues {\n fields: v\n .fields\n .iter()\n .enumerate()\n .map(|(i, v)| augmented_value(v, &t.fields[i].value_type.typ))\n .collect::>>()?,\n })\n }\n (value::Value::UTable(v), schema::ValueType::Table(t)) => value::Value::UTable(\n v.iter()\n .map(|v| ScopeValueBuilder::augmented_from(v, t))\n .collect::>>()?,\n ),\n (value::Value::KTable(v), schema::ValueType::Table(t)) => value::Value::KTable(\n v.iter()\n .map(|(k, v)| Ok((k.clone(), ScopeValueBuilder::augmented_from(v, t)?)))\n .collect::>>()?,\n ),\n (value::Value::LTable(v), schema::ValueType::Table(t)) => value::Value::LTable(\n v.iter()\n .map(|v| ScopeValueBuilder::augmented_from(v, t))\n .collect::>>()?,\n ),\n (val, _) => bail!(\"Value kind doesn't match the type {val_type}: {val:?}\"),\n };\n Ok(value)\n}\n\nenum ScopeKey<'a> {\n /// For root struct and UTable.\n None,\n /// For KTable row.\n MapKey(&'a value::KeyValue),\n /// For LTable row.\n ListIndex(usize),\n}\n\nimpl<'a> ScopeKey<'a> {\n pub fn key(&self) -> Option> {\n match self {\n ScopeKey::None => None,\n ScopeKey::MapKey(k) => Some(Cow::Borrowed(k)),\n ScopeKey::ListIndex(i) => Some(Cow::Owned(value::KeyValue::Int64(*i as i64))),\n }\n }\n\n pub fn value_field_index_base(&self) -> u32 {\n match *self {\n ScopeKey::None => 0,\n ScopeKey::MapKey(_) => 1,\n ScopeKey::ListIndex(_) => 0,\n }\n }\n}\n\nimpl std::fmt::Display for ScopeKey<'_> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ScopeKey::None => write!(f, \"()\"),\n ScopeKey::MapKey(k) => write!(f, \"{{{k}}}\"),\n ScopeKey::ListIndex(i) => write!(f, \"[{i}]\"),\n }\n }\n}\n\nstruct ScopeEntry<'a> {\n key: ScopeKey<'a>,\n value: &'a ScopeValueBuilder,\n schema: &'a schema::StructSchema,\n collected_values: Vec>>,\n}\n\nimpl<'a> ScopeEntry<'a> {\n fn new(\n key: ScopeKey<'a>,\n value: &'a ScopeValueBuilder,\n schema: &'a schema::StructSchema,\n analyzed_op_scope: &AnalyzedOpScope,\n ) -> Self {\n let mut collected_values = Vec::with_capacity(analyzed_op_scope.collector_len);\n collected_values.resize_with(analyzed_op_scope.collector_len, Default::default);\n\n Self {\n key,\n value,\n schema,\n collected_values,\n }\n }\n\n fn get_local_field_schema<'b>(\n schema: &'b schema::StructSchema,\n indices: &[u32],\n ) -> Result<&'b schema::FieldSchema> {\n let field_idx = indices[0] as usize;\n let field_schema = &schema.fields[field_idx];\n let result = if indices.len() == 1 {\n field_schema\n } else {\n let struct_field_schema = match &field_schema.value_type.typ {\n schema::ValueType::Struct(s) => s,\n _ => bail!(\"Expect struct field\"),\n };\n Self::get_local_field_schema(struct_field_schema, &indices[1..])?\n };\n Ok(result)\n }\n\n fn get_local_key_field<'b>(\n key_val: &'b value::KeyValue,\n indices: &'_ [u32],\n ) -> &'b value::KeyValue {\n if indices.is_empty() {\n key_val\n } else if let value::KeyValue::Struct(fields) = key_val {\n Self::get_local_key_field(&fields[indices[0] as usize], &indices[1..])\n } else {\n panic!(\"Only struct can be accessed by sub field\");\n }\n }\n\n fn get_local_field<'b>(\n val: &'b value::Value,\n indices: &'_ [u32],\n ) -> &'b value::Value {\n if indices.is_empty() {\n val\n } else if let value::Value::Struct(fields) = val {\n Self::get_local_field(&fields.fields[indices[0] as usize], &indices[1..])\n } else {\n panic!(\"Only struct can be accessed by sub field\");\n }\n }\n\n fn get_value_field_builder(\n &self,\n field_ref: &AnalyzedLocalFieldReference,\n ) -> &value::Value {\n let first_index = field_ref.fields_idx[0];\n let index_base = self.key.value_field_index_base();\n let val = self.value.fields[(first_index - index_base) as usize]\n .get()\n .unwrap();\n Self::get_local_field(val, &field_ref.fields_idx[1..])\n }\n\n fn get_field(&self, field_ref: &AnalyzedLocalFieldReference) -> value::Value {\n let first_index = field_ref.fields_idx[0];\n let index_base = self.key.value_field_index_base();\n if first_index < index_base {\n let key_val = self.key.key().unwrap().into_owned();\n let key_part = Self::get_local_key_field(&key_val, &field_ref.fields_idx[1..]);\n key_part.clone().into()\n } else {\n let val = self.value.fields[(first_index - index_base) as usize]\n .get()\n .unwrap();\n let val_part = Self::get_local_field(val, &field_ref.fields_idx[1..]);\n value::Value::from_alternative_ref(val_part)\n }\n }\n\n fn get_field_schema(\n &self,\n field_ref: &AnalyzedLocalFieldReference,\n ) -> Result<&schema::FieldSchema> {\n Ok(Self::get_local_field_schema(\n self.schema,\n &field_ref.fields_idx,\n )?)\n }\n\n fn define_field_w_builder(\n &self,\n output_field: &AnalyzedOpOutput,\n val: value::Value,\n ) -> Result<()> {\n let field_index = output_field.field_idx as usize;\n let index_base = self.key.value_field_index_base() as usize;\n self.value.fields[field_index - index_base].set(val).map_err(|_| {\n anyhow!(\"Field {field_index} for scope is already set, violating single-definition rule.\")\n })?;\n Ok(())\n }\n\n fn define_field(&self, output_field: &AnalyzedOpOutput, val: &value::Value) -> Result<()> {\n let field_index = output_field.field_idx as usize;\n let field_schema = &self.schema.fields[field_index];\n let val = augmented_value(val, &field_schema.value_type.typ)?;\n self.define_field_w_builder(output_field, val)?;\n Ok(())\n }\n}\n\nfn assemble_value(\n value_mapping: &AnalyzedValueMapping,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n) -> value::Value {\n match value_mapping {\n AnalyzedValueMapping::Constant { value } => value.clone(),\n AnalyzedValueMapping::Field(field_ref) => scoped_entries\n .headn(field_ref.scope_up_level as usize)\n .unwrap()\n .get_field(&field_ref.local),\n AnalyzedValueMapping::Struct(mapping) => {\n let fields = mapping\n .fields\n .iter()\n .map(|f| assemble_value(f, scoped_entries))\n .collect();\n value::Value::Struct(value::FieldValues { fields })\n }\n }\n}\n\nfn assemble_input_values<'a>(\n value_mappings: &'a [AnalyzedValueMapping],\n scoped_entries: RefList<'a, &ScopeEntry<'a>>,\n) -> impl Iterator + 'a {\n value_mappings\n .iter()\n .map(move |value_mapping| assemble_value(value_mapping, scoped_entries))\n}\n\nasync fn evaluate_child_op_scope(\n op_scope: &AnalyzedOpScope,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n child_scope_entry: ScopeEntry<'_>,\n concurrency_controller: &concur_control::ConcurrencyController,\n memory: &EvaluationMemory,\n) -> Result<()> {\n let _permit = concurrency_controller\n .acquire(Some(|| {\n child_scope_entry\n .value\n .fields\n .iter()\n .map(|f| f.get().map_or(0, |v| v.estimated_byte_size()))\n .sum()\n }))\n .await?;\n evaluate_op_scope(op_scope, scoped_entries.prepend(&child_scope_entry), memory)\n .await\n .with_context(|| {\n format!(\n \"Evaluating in scope with key {}\",\n match child_scope_entry.key.key() {\n Some(k) => k.to_string(),\n None => \"()\".to_string(),\n }\n )\n })\n}\n\nasync fn evaluate_op_scope(\n op_scope: &AnalyzedOpScope,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n memory: &EvaluationMemory,\n) -> Result<()> {\n let head_scope = *scoped_entries.head().unwrap();\n for reactive_op in op_scope.reactive_ops.iter() {\n match reactive_op {\n AnalyzedReactiveOp::Transform(op) => {\n let mut input_values = Vec::with_capacity(op.inputs.len());\n input_values\n .extend(assemble_input_values(&op.inputs, scoped_entries).collect::>());\n let output_value_cell = memory.get_cache_entry(\n || {\n Ok(op\n .function_exec_info\n .fingerprinter\n .clone()\n .with(&input_values)?\n .into_fingerprint())\n },\n &op.function_exec_info.output_type,\n /*ttl=*/ None,\n )?;\n let output_value = evaluate_with_cell(output_value_cell.as_ref(), move || {\n op.executor.evaluate(input_values)\n })\n .await\n .with_context(|| format!(\"Evaluating Transform op `{}`\", op.name,))?;\n head_scope.define_field(&op.output, &output_value)?;\n }\n\n AnalyzedReactiveOp::ForEach(op) => {\n let target_field_schema = head_scope.get_field_schema(&op.local_field_ref)?;\n let table_schema = match &target_field_schema.value_type.typ {\n schema::ValueType::Table(cs) => cs,\n _ => bail!(\"Expect target field to be a table\"),\n };\n\n let target_field = head_scope.get_value_field_builder(&op.local_field_ref);\n let task_futs = match target_field {\n value::Value::UTable(v) => v\n .iter()\n .map(|item| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::None,\n item,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n value::Value::KTable(v) => v\n .iter()\n .map(|(k, v)| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::MapKey(k),\n v,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n value::Value::LTable(v) => v\n .iter()\n .enumerate()\n .map(|(i, item)| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::ListIndex(i),\n item,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n _ => {\n bail!(\"Target field type is expected to be a table\");\n }\n };\n try_join_all(task_futs)\n .await\n .with_context(|| format!(\"Evaluating ForEach op `{}`\", op.name,))?;\n }\n\n AnalyzedReactiveOp::Collect(op) => {\n let mut field_values = Vec::with_capacity(\n op.input.fields.len() + if op.has_auto_uuid_field { 1 } else { 0 },\n );\n let field_values_iter = assemble_input_values(&op.input.fields, scoped_entries);\n if op.has_auto_uuid_field {\n field_values.push(value::Value::Null);\n field_values.extend(field_values_iter);\n let uuid = memory.next_uuid(\n op.fingerprinter\n .clone()\n .with(&field_values[1..])?\n .into_fingerprint(),\n )?;\n field_values[0] = value::Value::Basic(value::BasicValue::Uuid(uuid));\n } else {\n field_values.extend(field_values_iter);\n };\n let collector_entry = scoped_entries\n .headn(op.collector_ref.scope_up_level as usize)\n .ok_or_else(|| anyhow::anyhow!(\"Collector level out of bound\"))?;\n {\n let mut collected_records = collector_entry.collected_values\n [op.collector_ref.local.collector_idx as usize]\n .lock()\n .unwrap();\n collected_records.push(value::FieldValues {\n fields: field_values,\n });\n }\n }\n }\n }\n Ok(())\n}\n\npub struct SourceRowEvaluationContext<'a> {\n pub plan: &'a ExecutionPlan,\n pub import_op: &'a AnalyzedImportOp,\n pub schema: &'a schema::FlowSchema,\n pub key: &'a value::KeyValue,\n pub import_op_idx: usize,\n}\n\n#[derive(Debug)]\npub struct EvaluateSourceEntryOutput {\n pub data_scope: ScopeValueBuilder,\n pub collected_values: Vec>,\n}\n\npub async fn evaluate_source_entry(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n source_value: value::FieldValues,\n memory: &EvaluationMemory,\n) -> Result {\n let _permit = src_eval_ctx\n .import_op\n .concurrency_controller\n .acquire_bytes_with_reservation(|| source_value.estimated_byte_size())\n .await?;\n let root_schema = &src_eval_ctx.schema.schema;\n let root_scope_value = ScopeValueBuilder::new(root_schema.fields.len());\n let root_scope_entry = ScopeEntry::new(\n ScopeKey::None,\n &root_scope_value,\n root_schema,\n &src_eval_ctx.plan.op_scope,\n );\n\n let table_schema = match &root_schema.fields[src_eval_ctx.import_op.output.field_idx as usize]\n .value_type\n .typ\n {\n schema::ValueType::Table(cs) => cs,\n _ => {\n bail!(\"Expect source output to be a table\")\n }\n };\n\n let scope_value =\n ScopeValueBuilder::augmented_from(&value::ScopeValue(source_value), table_schema)?;\n root_scope_entry.define_field_w_builder(\n &src_eval_ctx.import_op.output,\n value::Value::KTable(BTreeMap::from([(src_eval_ctx.key.clone(), scope_value)])),\n )?;\n\n evaluate_op_scope(\n &src_eval_ctx.plan.op_scope,\n RefList::Nil.prepend(&root_scope_entry),\n memory,\n )\n .await?;\n let collected_values = root_scope_entry\n .collected_values\n .into_iter()\n .map(|v| v.into_inner().unwrap())\n .collect::>();\n Ok(EvaluateSourceEntryOutput {\n data_scope: root_scope_value,\n collected_values,\n })\n}\n\npub async fn evaluate_transient_flow(\n flow: &AnalyzedTransientFlow,\n input_values: &Vec,\n) -> Result {\n let root_schema = &flow.data_schema.schema;\n let root_scope_value = ScopeValueBuilder::new(root_schema.fields.len());\n let root_scope_entry = ScopeEntry::new(\n ScopeKey::None,\n &root_scope_value,\n root_schema,\n &flow.execution_plan.op_scope,\n );\n\n if input_values.len() != flow.execution_plan.input_fields.len() {\n bail!(\n \"Input values length mismatch: expect {}, got {}\",\n flow.execution_plan.input_fields.len(),\n input_values.len()\n );\n }\n for (field, value) in flow.execution_plan.input_fields.iter().zip(input_values) {\n root_scope_entry.define_field(field, value)?;\n }\n let eval_memory = EvaluationMemory::new(\n chrono::Utc::now(),\n None,\n EvaluationMemoryOptions {\n enable_cache: false,\n evaluation_only: true,\n },\n );\n evaluate_op_scope(\n &flow.execution_plan.op_scope,\n RefList::Nil.prepend(&root_scope_entry),\n &eval_memory,\n )\n .await?;\n let output_value = assemble_value(\n &flow.execution_plan.output_value,\n RefList::Nil.prepend(&root_scope_entry),\n );\n Ok(output_value)\n}\n"], ["/cocoindex/src/py/convert.rs", "use crate::prelude::*;\n\nuse bytes::Bytes;\nuse numpy::{PyArray1, PyArrayDyn, PyArrayMethods};\nuse pyo3::IntoPyObjectExt;\nuse pyo3::exceptions::PyTypeError;\nuse pyo3::types::PyAny;\nuse pyo3::types::{PyList, PyTuple};\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pythonize::{depythonize, pythonize};\nuse serde::de::DeserializeOwned;\nuse std::ops::Deref;\n\nuse super::IntoPyResult;\n\n#[derive(Debug)]\npub struct Pythonized(pub T);\n\nimpl<'py, T: DeserializeOwned> FromPyObject<'py> for Pythonized {\n fn extract_bound(obj: &Bound<'py, PyAny>) -> PyResult {\n Ok(Pythonized(depythonize(obj).into_py_result()?))\n }\n}\n\nimpl<'py, T: Serialize> IntoPyObject<'py> for &Pythonized {\n type Target = PyAny;\n type Output = Bound<'py, PyAny>;\n type Error = PyErr;\n\n fn into_pyobject(self, py: Python<'py>) -> PyResult {\n pythonize(py, &self.0).into_py_result()\n }\n}\n\nimpl<'py, T: Serialize> IntoPyObject<'py> for Pythonized {\n type Target = PyAny;\n type Output = Bound<'py, PyAny>;\n type Error = PyErr;\n\n fn into_pyobject(self, py: Python<'py>) -> PyResult {\n (&self).into_pyobject(py)\n }\n}\n\nimpl Pythonized {\n pub fn into_inner(self) -> T {\n self.0\n }\n}\n\nimpl Deref for Pythonized {\n type Target = T;\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nfn basic_value_to_py_object<'py>(\n py: Python<'py>,\n v: &value::BasicValue,\n) -> PyResult> {\n let result = match v {\n value::BasicValue::Bytes(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Str(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Bool(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Int64(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Float32(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Float64(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Range(v) => pythonize(py, v).into_py_result()?,\n value::BasicValue::Uuid(uuid_val) => uuid_val.into_bound_py_any(py)?,\n value::BasicValue::Date(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Time(v) => v.into_bound_py_any(py)?,\n value::BasicValue::LocalDateTime(v) => v.into_bound_py_any(py)?,\n value::BasicValue::OffsetDateTime(v) => v.into_bound_py_any(py)?,\n value::BasicValue::TimeDelta(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Json(v) => pythonize(py, v).into_py_result()?,\n value::BasicValue::Vector(v) => handle_vector_to_py(py, v)?,\n value::BasicValue::UnionVariant { tag_id, value } => {\n (*tag_id, basic_value_to_py_object(py, value)?).into_bound_py_any(py)?\n }\n };\n Ok(result)\n}\n\npub fn field_values_to_py_object<'py, 'a>(\n py: Python<'py>,\n values: impl Iterator,\n) -> PyResult> {\n let fields = values\n .map(|v| value_to_py_object(py, v))\n .collect::>>()?;\n Ok(PyTuple::new(py, fields)?.into_any())\n}\n\npub fn value_to_py_object<'py>(py: Python<'py>, v: &value::Value) -> PyResult> {\n let result = match v {\n value::Value::Null => py.None().into_bound(py),\n value::Value::Basic(v) => basic_value_to_py_object(py, v)?,\n value::Value::Struct(v) => field_values_to_py_object(py, v.fields.iter())?,\n value::Value::UTable(v) | value::Value::LTable(v) => {\n let rows = v\n .iter()\n .map(|v| field_values_to_py_object(py, v.0.fields.iter()))\n .collect::>>()?;\n PyList::new(py, rows)?.into_any()\n }\n value::Value::KTable(v) => {\n let rows = v\n .iter()\n .map(|(k, v)| {\n field_values_to_py_object(\n py,\n std::iter::once(&value::Value::from(k.clone())).chain(v.0.fields.iter()),\n )\n })\n .collect::>>()?;\n PyList::new(py, rows)?.into_any()\n }\n };\n Ok(result)\n}\n\nfn basic_value_from_py_object<'py>(\n typ: &schema::BasicValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let result = match typ {\n schema::BasicValueType::Bytes => {\n value::BasicValue::Bytes(Bytes::from(v.extract::>()?))\n }\n schema::BasicValueType::Str => value::BasicValue::Str(Arc::from(v.extract::()?)),\n schema::BasicValueType::Bool => value::BasicValue::Bool(v.extract::()?),\n schema::BasicValueType::Int64 => value::BasicValue::Int64(v.extract::()?),\n schema::BasicValueType::Float32 => value::BasicValue::Float32(v.extract::()?),\n schema::BasicValueType::Float64 => value::BasicValue::Float64(v.extract::()?),\n schema::BasicValueType::Range => value::BasicValue::Range(depythonize(v)?),\n schema::BasicValueType::Uuid => value::BasicValue::Uuid(v.extract::()?),\n schema::BasicValueType::Date => value::BasicValue::Date(v.extract::()?),\n schema::BasicValueType::Time => value::BasicValue::Time(v.extract::()?),\n schema::BasicValueType::LocalDateTime => {\n value::BasicValue::LocalDateTime(v.extract::()?)\n }\n schema::BasicValueType::OffsetDateTime => {\n if v.getattr_opt(\"tzinfo\")?\n .ok_or_else(|| {\n PyErr::new::(format!(\n \"expecting a datetime.datetime value, got {}\",\n v.get_type()\n ))\n })?\n .is_none()\n {\n value::BasicValue::OffsetDateTime(\n v.extract::()?.and_utc().into(),\n )\n } else {\n value::BasicValue::OffsetDateTime(\n v.extract::>()?,\n )\n }\n }\n schema::BasicValueType::TimeDelta => {\n value::BasicValue::TimeDelta(v.extract::()?)\n }\n schema::BasicValueType::Json => {\n value::BasicValue::Json(Arc::from(depythonize::(v)?))\n }\n schema::BasicValueType::Vector(elem) => {\n if let Some(vector) = handle_ndarray_from_py(&elem.element_type, v)? {\n vector\n } else {\n // Fallback to list\n value::BasicValue::Vector(Arc::from(\n v.extract::>>()?\n .into_iter()\n .map(|v| basic_value_from_py_object(&elem.element_type, &v))\n .collect::>>()?,\n ))\n }\n }\n schema::BasicValueType::Union(s) => {\n let mut valid_value = None;\n\n // Try parsing the value\n for (i, typ) in s.types.iter().enumerate() {\n if let Ok(value) = basic_value_from_py_object(typ, v) {\n valid_value = Some(value::BasicValue::UnionVariant {\n tag_id: i,\n value: Box::new(value),\n });\n break;\n }\n }\n\n valid_value.ok_or_else(|| {\n PyErr::new::(format!(\n \"invalid union value: {}, available types: {:?}\",\n v, s.types\n ))\n })?\n }\n };\n Ok(result)\n}\n\n// Helper function to convert PyAny to BasicValue for NDArray\nfn handle_ndarray_from_py<'py>(\n elem_type: &schema::BasicValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult> {\n macro_rules! try_convert {\n ($t:ty, $cast:expr) => {\n if let Ok(array) = v.downcast::>() {\n let data = array.readonly().as_slice()?.to_vec();\n let vec = data.into_iter().map($cast).collect::>();\n return Ok(Some(value::BasicValue::Vector(Arc::from(vec))));\n }\n };\n }\n\n match *elem_type {\n schema::BasicValueType::Float32 => try_convert!(f32, value::BasicValue::Float32),\n schema::BasicValueType::Float64 => try_convert!(f64, value::BasicValue::Float64),\n schema::BasicValueType::Int64 => try_convert!(i64, value::BasicValue::Int64),\n _ => {}\n }\n\n Ok(None)\n}\n\n// Helper function to convert BasicValue::Vector to PyAny\nfn handle_vector_to_py<'py>(\n py: Python<'py>,\n v: &[value::BasicValue],\n) -> PyResult> {\n match v.first() {\n Some(value::BasicValue::Float32(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Float32(f) => Ok(*f),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Float32\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n Some(value::BasicValue::Float64(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Float64(f) => Ok(*f),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Float64\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n Some(value::BasicValue::Int64(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Int64(i) => Ok(*i),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Int64\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n _ => Ok(v\n .iter()\n .map(|v| basic_value_to_py_object(py, v))\n .collect::>>()?\n .into_bound_py_any(py)?),\n }\n}\n\nfn field_values_from_py_object<'py>(\n schema: &schema::StructSchema,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let list = v.extract::>>()?;\n if list.len() != schema.fields.len() {\n return Err(PyException::new_err(format!(\n \"struct field number mismatch, expected {}, got {}\",\n schema.fields.len(),\n list.len()\n )));\n }\n\n Ok(value::FieldValues {\n fields: schema\n .fields\n .iter()\n .zip(list.into_iter())\n .map(|(f, v)| value_from_py_object(&f.value_type.typ, &v))\n .collect::>>()?,\n })\n}\n\npub fn value_from_py_object<'py>(\n typ: &schema::ValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let result = if v.is_none() {\n value::Value::Null\n } else {\n match typ {\n schema::ValueType::Basic(typ) => {\n value::Value::Basic(basic_value_from_py_object(typ, v)?)\n }\n schema::ValueType::Struct(schema) => {\n value::Value::Struct(field_values_from_py_object(schema, v)?)\n }\n schema::ValueType::Table(schema) => {\n let list = v.extract::>>()?;\n let values = list\n .into_iter()\n .map(|v| field_values_from_py_object(&schema.row, &v))\n .collect::>>()?;\n\n match schema.kind {\n schema::TableKind::UTable => {\n value::Value::UTable(values.into_iter().map(|v| v.into()).collect())\n }\n schema::TableKind::LTable => {\n value::Value::LTable(values.into_iter().map(|v| v.into()).collect())\n }\n\n schema::TableKind::KTable => value::Value::KTable(\n values\n .into_iter()\n .map(|v| {\n let mut iter = v.fields.into_iter();\n let key = iter.next().unwrap().into_key().into_py_result()?;\n Ok((\n key,\n value::ScopeValue(value::FieldValues {\n fields: iter.collect::>(),\n }),\n ))\n })\n .collect::>>()?,\n ),\n }\n }\n }\n };\n Ok(result)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::base::schema;\n use crate::base::value;\n use crate::base::value::ScopeValue;\n use pyo3::Python;\n use std::collections::BTreeMap;\n use std::sync::Arc;\n\n fn assert_roundtrip_conversion(original_value: &value::Value, value_type: &schema::ValueType) {\n Python::with_gil(|py| {\n // Convert Rust value to Python object using value_to_py_object\n let py_object = value_to_py_object(py, original_value)\n .expect(\"Failed to convert Rust value to Python object\");\n\n println!(\"Python object: {py_object:?}\");\n let roundtripped_value = value_from_py_object(value_type, &py_object)\n .expect(\"Failed to convert Python object back to Rust value\");\n\n println!(\"Roundtripped value: {roundtripped_value:?}\");\n assert_eq!(\n original_value, &roundtripped_value,\n \"Value mismatch after roundtrip\"\n );\n });\n }\n\n #[test]\n fn test_roundtrip_basic_values() {\n let values_and_types = vec![\n (\n value::Value::Basic(value::BasicValue::Int64(42)),\n schema::ValueType::Basic(schema::BasicValueType::Int64),\n ),\n (\n value::Value::Basic(value::BasicValue::Float64(3.14)),\n schema::ValueType::Basic(schema::BasicValueType::Float64),\n ),\n (\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"hello\"))),\n schema::ValueType::Basic(schema::BasicValueType::Str),\n ),\n (\n value::Value::Basic(value::BasicValue::Bool(true)),\n schema::ValueType::Basic(schema::BasicValueType::Bool),\n ),\n ];\n\n for (val, typ) in values_and_types {\n assert_roundtrip_conversion(&val, &typ);\n }\n }\n\n #[test]\n fn test_roundtrip_struct() {\n let struct_schema = schema::StructSchema {\n description: Some(Arc::from(\"Test struct description\")),\n fields: Arc::new(vec![\n schema::FieldSchema {\n name: \"a\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Int64),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"b\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Str),\n nullable: false,\n attrs: Default::default(),\n },\n },\n ]),\n };\n\n let struct_val_data = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(10)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"world\"))),\n ],\n };\n\n let struct_val = value::Value::Struct(struct_val_data);\n let struct_typ = schema::ValueType::Struct(struct_schema); // No clone needed\n\n assert_roundtrip_conversion(&struct_val, &struct_typ);\n }\n\n #[test]\n fn test_roundtrip_table_types() {\n let row_schema_struct = Arc::new(schema::StructSchema {\n description: Some(Arc::from(\"Test table row description\")),\n fields: Arc::new(vec![\n schema::FieldSchema {\n name: \"key_col\".to_string(), // Will be used as key for KTable implicitly\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Int64),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"data_col_1\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Str),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"data_col_2\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Bool),\n nullable: false,\n attrs: Default::default(),\n },\n },\n ]),\n });\n\n let row1_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(1)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row1_data\"))),\n value::Value::Basic(value::BasicValue::Bool(true)),\n ],\n };\n let row1_scope_val: value::ScopeValue = row1_fields.into();\n\n let row2_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(2)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row2_data\"))),\n value::Value::Basic(value::BasicValue::Bool(false)),\n ],\n };\n let row2_scope_val: value::ScopeValue = row2_fields.into();\n\n // UTable\n let utable_schema = schema::TableSchema {\n kind: schema::TableKind::UTable,\n row: (*row_schema_struct).clone(),\n };\n let utable_val = value::Value::UTable(vec![row1_scope_val.clone(), row2_scope_val.clone()]);\n let utable_typ = schema::ValueType::Table(utable_schema);\n assert_roundtrip_conversion(&utable_val, &utable_typ);\n\n // LTable\n let ltable_schema = schema::TableSchema {\n kind: schema::TableKind::LTable,\n row: (*row_schema_struct).clone(),\n };\n let ltable_val = value::Value::LTable(vec![row1_scope_val.clone(), row2_scope_val.clone()]);\n let ltable_typ = schema::ValueType::Table(ltable_schema);\n assert_roundtrip_conversion(<able_val, <able_typ);\n\n // KTable\n let ktable_schema = schema::TableSchema {\n kind: schema::TableKind::KTable,\n row: (*row_schema_struct).clone(),\n };\n let mut ktable_data = BTreeMap::new();\n\n // Create KTable entries where the ScopeValue doesn't include the key field\n // This matches how the Python code will serialize/deserialize\n let row1_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row1_data\"))),\n value::Value::Basic(value::BasicValue::Bool(true)),\n ],\n };\n let row1_scope_val: value::ScopeValue = row1_fields.into();\n\n let row2_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row2_data\"))),\n value::Value::Basic(value::BasicValue::Bool(false)),\n ],\n };\n let row2_scope_val: value::ScopeValue = row2_fields.into();\n\n // For KTable, the key is extracted from the first field of ScopeValue based on current serialization\n let key1 = value::Value::::Basic(value::BasicValue::Int64(1))\n .into_key()\n .unwrap();\n let key2 = value::Value::::Basic(value::BasicValue::Int64(2))\n .into_key()\n .unwrap();\n\n ktable_data.insert(key1, row1_scope_val.clone());\n ktable_data.insert(key2, row2_scope_val.clone());\n\n let ktable_val = value::Value::KTable(ktable_data);\n let ktable_typ = schema::ValueType::Table(ktable_schema);\n assert_roundtrip_conversion(&ktable_val, &ktable_typ);\n }\n}\n"], ["/cocoindex/src/ops/functions/extract_by_llm.rs", "use crate::llm::{\n LlmGenerateRequest, LlmGenerationClient, LlmSpec, OutputFormat, new_llm_generation_client,\n};\nuse crate::ops::sdk::*;\nuse crate::prelude::*;\nuse base::json_schema::build_json_schema;\nuse schemars::schema::SchemaObject;\nuse std::borrow::Cow;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Spec {\n llm_spec: LlmSpec,\n output_type: EnrichedValueType,\n instruction: Option,\n}\n\npub struct Args {\n text: Option,\n image: Option,\n}\n\nstruct Executor {\n args: Args,\n client: Box,\n model: String,\n output_json_schema: SchemaObject,\n system_prompt: String,\n value_extractor: base::json_schema::ValueExtractor,\n}\n\nfn get_system_prompt(instructions: &Option, extra_instructions: Option) -> String {\n let mut message =\n \"You are a helpful assistant that processes user-provided inputs (text, images, or both) to produce structured outputs. \\\nYour task is to follow the provided instructions to generate or extract information and output valid JSON matching the specified schema. \\\nBase your response solely on the content of the input. \\\nFor generative tasks, respond accurately and relevantly based on what is provided. \\\nUnless explicitly instructed otherwise, output only the JSON. DO NOT include explanations, descriptions, or formatting outside the JSON.\"\n .to_string();\n\n if let Some(custom_instructions) = instructions {\n message.push_str(\"\\n\\n\");\n message.push_str(custom_instructions);\n }\n\n if let Some(extra_instructions) = extra_instructions {\n message.push_str(\"\\n\\n\");\n message.push_str(&extra_instructions);\n }\n\n message\n}\n\nimpl Executor {\n async fn new(spec: Spec, args: Args) -> Result {\n let client = new_llm_generation_client(\n spec.llm_spec.api_type,\n spec.llm_spec.address,\n spec.llm_spec.api_config,\n )\n .await?;\n let schema_output = build_json_schema(spec.output_type, client.json_schema_options())?;\n Ok(Self {\n args,\n client,\n model: spec.llm_spec.model,\n output_json_schema: schema_output.schema,\n system_prompt: get_system_prompt(&spec.instruction, schema_output.extra_instructions),\n value_extractor: schema_output.value_extractor,\n })\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n fn behavior_version(&self) -> Option {\n Some(1)\n }\n\n fn enable_cache(&self) -> bool {\n true\n }\n\n async fn evaluate(&self, input: Vec) -> Result {\n let image_bytes: Option> = self\n .args\n .image\n .as_ref()\n .map(|arg| arg.value(&input)?.as_bytes())\n .transpose()?\n .map(|bytes| Cow::Borrowed(bytes.as_ref()));\n let text = self\n .args\n .text\n .as_ref()\n .map(|arg| arg.value(&input)?.as_str())\n .transpose()?;\n\n if text.is_none() && image_bytes.is_none() {\n api_bail!(\"At least one of `text` or `image` must be provided\");\n }\n\n let user_prompt = text.map_or(\"\", |v| v);\n let req = LlmGenerateRequest {\n model: &self.model,\n system_prompt: Some(Cow::Borrowed(&self.system_prompt)),\n user_prompt: Cow::Borrowed(user_prompt),\n image: image_bytes,\n output_format: Some(OutputFormat::JsonSchema {\n name: Cow::Borrowed(\"ExtractedData\"),\n schema: Cow::Borrowed(&self.output_json_schema),\n }),\n };\n let res = self.client.generate(req).await?;\n let json_value: serde_json::Value = serde_json::from_str(res.text.as_str())?;\n let value = self.value_extractor.extract_value(json_value)?;\n Ok(value)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"ExtractByLlm\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_optional_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n image: args_resolver\n .next_optional_arg(\"image\")?\n .expect_type(&ValueType::Basic(BasicValueType::Bytes))?,\n };\n\n if args.text.is_none() && args.image.is_none() {\n api_bail!(\"At least one of 'text' or 'image' must be provided\");\n }\n\n Ok((args, spec.output_type.clone()))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n resolved_input_schema: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(spec, resolved_input_schema).await?))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n #[tokio::test]\n #[ignore = \"This test requires an OpenAI API key or a configured local LLM and may make network calls.\"]\n async fn test_extract_by_llm() {\n // Define the expected output structure\n let target_output_schema = StructSchema {\n fields: Arc::new(vec![\n FieldSchema::new(\n \"extracted_field_name\",\n make_output_type(BasicValueType::Str),\n ),\n FieldSchema::new(\n \"extracted_field_value\",\n make_output_type(BasicValueType::Int64),\n ),\n ]),\n description: Some(\"A test structure for extraction\".into()),\n };\n\n let output_type_spec = EnrichedValueType {\n typ: ValueType::Struct(target_output_schema.clone()),\n nullable: false,\n attrs: Arc::new(BTreeMap::new()),\n };\n\n let spec = Spec {\n llm_spec: LlmSpec {\n api_type: crate::llm::LlmApiType::OpenAi,\n model: \"gpt-4o\".to_string(),\n address: None,\n api_config: None,\n },\n output_type: output_type_spec,\n instruction: Some(\"Extract the name and value from the text. The name is a string, the value is an integer.\".to_string()),\n };\n\n let factory = Arc::new(Factory);\n let text_content = \"The item is called 'CocoIndex Test' and its value is 42.\";\n\n let input_args_values = vec![text_content.to_string().into()];\n\n let input_arg_schemas = vec![build_arg_schema(\"text\", BasicValueType::Str)];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n if result.is_err() {\n eprintln!(\n \"test_extract_by_llm: test_flow_function returned error (potentially expected for evaluate): {:?}\",\n result.as_ref().err()\n );\n }\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed. NOTE: This test may require network access/API keys for OpenAI. Error: {:?}\",\n result.err()\n );\n\n let value = result.unwrap();\n\n match value {\n Value::Struct(field_values) => {\n assert_eq!(\n field_values.fields.len(),\n target_output_schema.fields.len(),\n \"Mismatched number of fields in output struct\"\n );\n for (idx, field_schema) in target_output_schema.fields.iter().enumerate() {\n match (&field_values.fields[idx], &field_schema.value_type.typ) {\n (\n Value::Basic(BasicValue::Str(_)),\n ValueType::Basic(BasicValueType::Str),\n ) => {}\n (\n Value::Basic(BasicValue::Int64(_)),\n ValueType::Basic(BasicValueType::Int64),\n ) => {}\n (val, expected_type) => panic!(\n \"Field '{}' type mismatch. Got {:?}, expected type compatible with {:?}\",\n field_schema.name,\n val.kind(),\n expected_type\n ),\n }\n }\n }\n _ => panic!(\"Expected Value::Struct, got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/base/spec.rs", "use crate::prelude::*;\n\nuse super::schema::{EnrichedValueType, FieldSchema};\nuse serde::{Deserialize, Serialize};\nuse std::fmt;\nuse std::ops::Deref;\n\n/// OutputMode enum for displaying spec info in different granularity\n#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"lowercase\")]\npub enum OutputMode {\n Concise,\n Verbose,\n}\n\n/// Formatting spec per output mode\npub trait SpecFormatter {\n fn format(&self, mode: OutputMode) -> String;\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum SpecString {\n /// The value comes from the environment variable.\n Env(String),\n /// The value is defined by the literal string.\n #[serde(untagged)]\n Literal(String),\n}\n\npub type ScopeName = String;\n\n/// Used to identify a data field within a flow.\n/// Within a flow, in each specific scope, each field name must be unique.\n/// - A field is defined by `outputs` of an operation. There must be exactly one definition for each field.\n/// - A field can be used as an input for multiple operations.\npub type FieldName = String;\n\npub const ROOT_SCOPE_NAME: &str = \"_root\";\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Default)]\npub struct FieldPath(pub Vec);\n\nimpl Deref for FieldPath {\n type Target = Vec;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl fmt::Display for FieldPath {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if self.is_empty() {\n write!(f, \"*\")\n } else {\n write!(f, \"{}\", self.join(\".\"))\n }\n }\n}\n\n/// Used to identify an input or output argument for an operator.\n/// Useful to identify different inputs/outputs of the same operation. Usually omitted for operations with the same purpose of input/output.\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]\npub struct OpArgName(pub Option);\n\nimpl fmt::Display for OpArgName {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if let Some(arg_name) = &self.0 {\n write!(f, \"${arg_name}\")\n } else {\n write!(f, \"?\")\n }\n }\n}\n\nimpl OpArgName {\n pub fn is_unnamed(&self) -> bool {\n self.0.is_none()\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct NamedSpec {\n pub name: String,\n\n #[serde(flatten)]\n pub spec: T,\n}\n\nimpl fmt::Display for NamedSpec {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}: {}\", self.name, self.spec)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FieldMapping {\n /// If unspecified, means the current scope.\n /// \"_root\" refers to the top-level scope.\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub scope: Option,\n\n pub field_path: FieldPath,\n}\n\nimpl fmt::Display for FieldMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let scope = self.scope.as_deref().unwrap_or(\"\");\n write!(\n f,\n \"{}{}\",\n if scope.is_empty() {\n \"\".to_string()\n } else {\n format!(\"{scope}.\")\n },\n self.field_path\n )\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ConstantMapping {\n pub schema: EnrichedValueType,\n pub value: serde_json::Value,\n}\n\nimpl fmt::Display for ConstantMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let value = serde_json::to_string(&self.value).unwrap_or(\"#serde_error\".to_string());\n write!(f, \"{value}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CollectionMapping {\n pub field: FieldMapping,\n pub scope_name: ScopeName,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StructMapping {\n pub fields: Vec>,\n}\n\nimpl fmt::Display for StructMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let fields = self\n .fields\n .iter()\n .map(|field| field.name.clone())\n .collect::>()\n .join(\",\");\n write!(f, \"{fields}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum ValueMapping {\n Constant(ConstantMapping),\n Field(FieldMapping),\n Struct(StructMapping),\n // TODO: Add support for collections\n}\n\nimpl ValueMapping {\n pub fn is_entire_scope(&self) -> bool {\n match self {\n ValueMapping::Field(FieldMapping {\n scope: None,\n field_path,\n }) => field_path.is_empty(),\n _ => false,\n }\n }\n}\n\nimpl std::fmt::Display for ValueMapping {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result {\n match self {\n ValueMapping::Constant(v) => write!(\n f,\n \"{}\",\n serde_json::to_string(&v.value)\n .unwrap_or_else(|_| \"#(invalid json value)\".to_string())\n ),\n ValueMapping::Field(v) => {\n write!(f, \"{}.{}\", v.scope.as_deref().unwrap_or(\"\"), v.field_path)\n }\n ValueMapping::Struct(v) => write!(\n f,\n \"Struct({})\",\n v.fields\n .iter()\n .map(|f| format!(\"{}={}\", f.name, f.spec))\n .collect::>()\n .join(\", \")\n ),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct OpArgBinding {\n #[serde(default, skip_serializing_if = \"OpArgName::is_unnamed\")]\n pub arg_name: OpArgName,\n\n #[serde(flatten)]\n pub value: ValueMapping,\n}\n\nimpl fmt::Display for OpArgBinding {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if self.arg_name.is_unnamed() {\n write!(f, \"{}\", self.value)\n } else {\n write!(f, \"{}={}\", self.arg_name, self.value)\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct OpSpec {\n pub kind: String,\n #[serde(flatten, default)]\n pub spec: serde_json::Map,\n}\n\nimpl SpecFormatter for OpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match mode {\n OutputMode::Concise => self.kind.clone(),\n OutputMode::Verbose => {\n let spec_str = serde_json::to_string_pretty(&self.spec)\n .map(|s| {\n let lines: Vec<&str> = s.lines().collect();\n if lines.len() < s.lines().count() {\n lines\n .into_iter()\n .chain([\"...\"])\n .collect::>()\n .join(\"\\n \")\n } else {\n lines.join(\"\\n \")\n }\n })\n .unwrap_or(\"#serde_error\".to_string());\n format!(\"{}({})\", self.kind, spec_str)\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct ExecutionOptions {\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub max_inflight_rows: Option,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub max_inflight_bytes: Option,\n}\n\nimpl ExecutionOptions {\n pub fn get_concur_control_options(&self) -> concur_control::Options {\n concur_control::Options {\n max_inflight_rows: self.max_inflight_rows,\n max_inflight_bytes: self.max_inflight_bytes,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct SourceRefreshOptions {\n pub refresh_interval: Option,\n}\n\nimpl fmt::Display for SourceRefreshOptions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let refresh = self\n .refresh_interval\n .map(|d| format!(\"{d:?}\"))\n .unwrap_or(\"none\".to_string());\n write!(f, \"{refresh}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ImportOpSpec {\n pub source: OpSpec,\n\n #[serde(default)]\n pub refresh_options: SourceRefreshOptions,\n\n #[serde(default)]\n pub execution_options: ExecutionOptions,\n}\n\nimpl SpecFormatter for ImportOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let source = self.source.format(mode);\n format!(\"source={}, refresh={}\", source, self.refresh_options)\n }\n}\n\nimpl fmt::Display for ImportOpSpec {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}\", self.format(OutputMode::Concise))\n }\n}\n\n/// Transform data using a given operator.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TransformOpSpec {\n pub inputs: Vec,\n pub op: OpSpec,\n}\n\nimpl SpecFormatter for TransformOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let inputs = self\n .inputs\n .iter()\n .map(ToString::to_string)\n .collect::>()\n .join(\",\");\n let op_str = self.op.format(mode);\n match mode {\n OutputMode::Concise => format!(\"op={op_str}, inputs={inputs}\"),\n OutputMode::Verbose => format!(\"op={op_str}, inputs=[{inputs}]\"),\n }\n }\n}\n\n/// Apply reactive operations to each row of the input field.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ForEachOpSpec {\n /// Mapping that provides a table to apply reactive operations to.\n pub field_path: FieldPath,\n pub op_scope: ReactiveOpScope,\n\n #[serde(default)]\n pub execution_options: ExecutionOptions,\n}\n\nimpl ForEachOpSpec {\n pub fn get_label(&self) -> String {\n format!(\"Loop over {}\", self.field_path)\n }\n}\n\nimpl SpecFormatter for ForEachOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match mode {\n OutputMode::Concise => self.get_label(),\n OutputMode::Verbose => format!(\"field={}\", self.field_path),\n }\n }\n}\n\n/// Emit data to a given collector at the given scope.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CollectOpSpec {\n /// Field values to be collected.\n pub input: StructMapping,\n /// Scope for the collector.\n pub scope_name: ScopeName,\n /// Name of the collector.\n pub collector_name: FieldName,\n /// If specified, the collector will have an automatically generated UUID field with the given name.\n /// The uuid will remain stable when collected input values remain unchanged.\n pub auto_uuid_field: Option,\n}\n\nimpl SpecFormatter for CollectOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let uuid = self.auto_uuid_field.as_deref().unwrap_or(\"none\");\n match mode {\n OutputMode::Concise => {\n format!(\n \"collector={}, input={}, uuid={}\",\n self.collector_name, self.input, uuid\n )\n }\n OutputMode::Verbose => {\n format!(\n \"scope={}, collector={}, input=[{}], uuid={}\",\n self.scope_name, self.collector_name, self.input, uuid\n )\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum VectorSimilarityMetric {\n CosineSimilarity,\n L2Distance,\n InnerProduct,\n}\n\nimpl fmt::Display for VectorSimilarityMetric {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n match self {\n VectorSimilarityMetric::CosineSimilarity => write!(f, \"Cosine\"),\n VectorSimilarityMetric::L2Distance => write!(f, \"L2\"),\n VectorSimilarityMetric::InnerProduct => write!(f, \"InnerProduct\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct VectorIndexDef {\n pub field_name: FieldName,\n pub metric: VectorSimilarityMetric,\n}\n\nimpl fmt::Display for VectorIndexDef {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}:{}\", self.field_name, self.metric)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct IndexOptions {\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub primary_key_fields: Option>,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n pub vector_indexes: Vec,\n}\n\nimpl IndexOptions {\n pub fn primary_key_fields(&self) -> Result<&[FieldName]> {\n Ok(self\n .primary_key_fields\n .as_ref()\n .ok_or(api_error!(\"Primary key fields are not set\"))?\n .as_ref())\n }\n}\n\nimpl fmt::Display for IndexOptions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let primary_keys = self\n .primary_key_fields\n .as_ref()\n .map(|p| p.join(\",\"))\n .unwrap_or_default();\n let vector_indexes = self\n .vector_indexes\n .iter()\n .map(|v| v.to_string())\n .collect::>()\n .join(\",\");\n write!(f, \"keys={primary_keys}, indexes={vector_indexes}\")\n }\n}\n\n/// Store data to a given sink.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ExportOpSpec {\n pub collector_name: FieldName,\n pub target: OpSpec,\n pub index_options: IndexOptions,\n pub setup_by_user: bool,\n}\n\nimpl SpecFormatter for ExportOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let target_str = self.target.format(mode);\n let base = format!(\n \"collector={}, target={}, {}\",\n self.collector_name, target_str, self.index_options\n );\n match mode {\n OutputMode::Concise => base,\n OutputMode::Verbose => format!(\"{}, setup_by_user={}\", base, self.setup_by_user),\n }\n }\n}\n\n/// A reactive operation reacts on given input values.\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"action\")]\npub enum ReactiveOpSpec {\n Transform(TransformOpSpec),\n ForEach(ForEachOpSpec),\n Collect(CollectOpSpec),\n}\n\nimpl SpecFormatter for ReactiveOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match self {\n ReactiveOpSpec::Transform(t) => format!(\"Transform: {}\", t.format(mode)),\n ReactiveOpSpec::ForEach(fe) => match mode {\n OutputMode::Concise => fe.get_label().to_string(),\n OutputMode::Verbose => format!(\"ForEach: {}\", fe.format(mode)),\n },\n ReactiveOpSpec::Collect(c) => format!(\"Collect: {}\", c.format(mode)),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ReactiveOpScope {\n pub name: ScopeName,\n pub ops: Vec>,\n // TODO: Suport collectors\n}\n\nimpl fmt::Display for ReactiveOpScope {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"Scope: name={}\", self.name)\n }\n}\n\n/// A flow defines the rule to sync data from given sources to given sinks with given transformations.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FlowInstanceSpec {\n /// Name of the flow instance.\n pub name: String,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub import_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub reactive_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub export_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub declarations: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TransientFlowSpec {\n pub name: String,\n pub input_fields: Vec,\n pub reactive_ops: Vec>,\n pub output_value: ValueMapping,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SimpleSemanticsQueryHandlerSpec {\n pub name: String,\n pub flow_instance_name: String,\n pub export_target_name: String,\n pub query_transform_flow: TransientFlowSpec,\n pub default_similarity_metric: VectorSimilarityMetric,\n}\n\npub struct AuthEntryReference {\n pub key: String,\n _phantom: std::marker::PhantomData,\n}\n\nimpl fmt::Debug for AuthEntryReference {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"AuthEntryReference({})\", self.key)\n }\n}\n\nimpl fmt::Display for AuthEntryReference {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"AuthEntryReference({})\", self.key)\n }\n}\n\nimpl Clone for AuthEntryReference {\n fn clone(&self) -> Self {\n Self {\n key: self.key.clone(),\n _phantom: std::marker::PhantomData,\n }\n }\n}\n\n#[derive(Serialize, Deserialize)]\nstruct UntypedAuthEntryReference {\n key: T,\n}\n\nimpl Serialize for AuthEntryReference {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n UntypedAuthEntryReference { key: &self.key }.serialize(serializer)\n }\n}\n\nimpl<'de, T> Deserialize<'de> for AuthEntryReference {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let untyped_ref = UntypedAuthEntryReference::::deserialize(deserializer)?;\n Ok(AuthEntryReference {\n key: untyped_ref.key,\n _phantom: std::marker::PhantomData,\n })\n }\n}\n\nimpl PartialEq for AuthEntryReference {\n fn eq(&self, other: &Self) -> bool {\n self.key == other.key\n }\n}\n\nimpl Eq for AuthEntryReference {}\n\nimpl std::hash::Hash for AuthEntryReference {\n fn hash(&self, state: &mut H) {\n self.key.hash(state);\n }\n}\n"], ["/cocoindex/src/base/schema.rs", "use crate::prelude::*;\n\nuse super::spec::*;\nuse crate::builder::plan::AnalyzedValueMapping;\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct VectorTypeSchema {\n pub element_type: Box,\n pub dimension: Option,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct UnionTypeSchema {\n pub types: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(tag = \"kind\")]\npub enum BasicValueType {\n /// A sequence of bytes in binary.\n Bytes,\n\n /// String encoded in UTF-8.\n Str,\n\n /// A boolean value.\n Bool,\n\n /// 64-bit integer.\n Int64,\n\n /// 32-bit floating point number.\n Float32,\n\n /// 64-bit floating point number.\n Float64,\n\n /// A range, with a start offset and a length.\n Range,\n\n /// A UUID.\n Uuid,\n\n /// Date (without time within the current day).\n Date,\n\n /// Time of the day.\n Time,\n\n /// Local date and time, without timezone.\n LocalDateTime,\n\n /// Date and time with timezone.\n OffsetDateTime,\n\n /// A time duration.\n TimeDelta,\n\n /// A JSON value.\n Json,\n\n /// A vector of values (usually numbers, for embeddings).\n Vector(VectorTypeSchema),\n\n /// A union\n Union(UnionTypeSchema),\n}\n\nimpl std::fmt::Display for BasicValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n BasicValueType::Bytes => write!(f, \"Bytes\"),\n BasicValueType::Str => write!(f, \"Str\"),\n BasicValueType::Bool => write!(f, \"Bool\"),\n BasicValueType::Int64 => write!(f, \"Int64\"),\n BasicValueType::Float32 => write!(f, \"Float32\"),\n BasicValueType::Float64 => write!(f, \"Float64\"),\n BasicValueType::Range => write!(f, \"Range\"),\n BasicValueType::Uuid => write!(f, \"Uuid\"),\n BasicValueType::Date => write!(f, \"Date\"),\n BasicValueType::Time => write!(f, \"Time\"),\n BasicValueType::LocalDateTime => write!(f, \"LocalDateTime\"),\n BasicValueType::OffsetDateTime => write!(f, \"OffsetDateTime\"),\n BasicValueType::TimeDelta => write!(f, \"TimeDelta\"),\n BasicValueType::Json => write!(f, \"Json\"),\n BasicValueType::Vector(s) => {\n write!(f, \"Vector[{}\", s.element_type)?;\n if let Some(dimension) = s.dimension {\n write!(f, \", {dimension}\")?;\n }\n write!(f, \"]\")\n }\n BasicValueType::Union(s) => {\n write!(f, \"Union[\")?;\n for (i, typ) in s.types.iter().enumerate() {\n if i > 0 {\n // Add type delimiter\n write!(f, \" | \")?;\n }\n write!(f, \"{typ}\")?;\n }\n write!(f, \"]\")\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]\npub struct StructSchema {\n pub fields: Arc>,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub description: Option>,\n}\n\nimpl StructSchema {\n pub fn without_attrs(&self) -> Self {\n Self {\n fields: Arc::new(self.fields.iter().map(|f| f.without_attrs()).collect()),\n description: None,\n }\n }\n}\n\nimpl std::fmt::Display for StructSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Struct(\")?;\n for (i, field) in self.fields.iter().enumerate() {\n if i > 0 {\n write!(f, \", \")?;\n }\n write!(f, \"{field}\")?;\n }\n write!(f, \")\")\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\n#[allow(clippy::enum_variant_names)]\npub enum TableKind {\n /// An table with unordered rows, without key.\n UTable,\n /// A table's first field is the key.\n #[serde(alias = \"Table\")]\n KTable,\n /// A table whose rows orders are preserved.\n #[serde(alias = \"List\")]\n LTable,\n}\n\nimpl std::fmt::Display for TableKind {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n TableKind::UTable => write!(f, \"Table\"),\n TableKind::KTable => write!(f, \"KTable\"),\n TableKind::LTable => write!(f, \"LTable\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct TableSchema {\n pub kind: TableKind,\n pub row: StructSchema,\n}\n\nimpl TableSchema {\n pub fn has_key(&self) -> bool {\n match self.kind {\n TableKind::KTable => true,\n TableKind::UTable | TableKind::LTable => false,\n }\n }\n\n pub fn key_type(&self) -> Option<&EnrichedValueType> {\n match self.kind {\n TableKind::KTable => self\n .row\n .fields\n .first()\n .as_ref()\n .map(|field| &field.value_type),\n TableKind::UTable | TableKind::LTable => None,\n }\n }\n\n pub fn without_attrs(&self) -> Self {\n Self {\n kind: self.kind,\n row: self.row.without_attrs(),\n }\n }\n}\n\nimpl std::fmt::Display for TableSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}({})\", self.kind, self.row)\n }\n}\n\nimpl TableSchema {\n pub fn new(kind: TableKind, row: StructSchema) -> Self {\n Self { kind, row }\n }\n\n pub fn key_field(&self) -> Option<&FieldSchema> {\n match self.kind {\n TableKind::KTable => Some(self.row.fields.first().unwrap()),\n TableKind::UTable | TableKind::LTable => None,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(tag = \"kind\")]\npub enum ValueType {\n Struct(StructSchema),\n\n #[serde(untagged)]\n Basic(BasicValueType),\n\n #[serde(untagged)]\n Table(TableSchema),\n}\n\nimpl ValueType {\n pub fn key_type(&self) -> Option<&EnrichedValueType> {\n match self {\n ValueType::Basic(_) => None,\n ValueType::Struct(_) => None,\n ValueType::Table(c) => c.key_type(),\n }\n }\n\n // Type equality, ignoring attributes.\n pub fn without_attrs(&self) -> Self {\n match self {\n ValueType::Basic(a) => ValueType::Basic(a.clone()),\n ValueType::Struct(a) => ValueType::Struct(a.without_attrs()),\n ValueType::Table(a) => ValueType::Table(a.without_attrs()),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct EnrichedValueType {\n #[serde(rename = \"type\")]\n pub typ: DataType,\n\n #[serde(default, skip_serializing_if = \"std::ops::Not::not\")]\n pub nullable: bool,\n\n #[serde(default, skip_serializing_if = \"BTreeMap::is_empty\")]\n pub attrs: Arc>,\n}\n\nimpl EnrichedValueType {\n pub fn without_attrs(&self) -> Self {\n Self {\n typ: self.typ.without_attrs(),\n nullable: self.nullable,\n attrs: Default::default(),\n }\n }\n}\n\nimpl EnrichedValueType {\n pub fn from_alternative(\n value_type: &EnrichedValueType,\n ) -> Result\n where\n for<'a> &'a AltDataType: TryInto,\n {\n Ok(Self {\n typ: (&value_type.typ).try_into()?,\n nullable: value_type.nullable,\n attrs: value_type.attrs.clone(),\n })\n }\n\n pub fn with_attr(mut self, key: &str, value: serde_json::Value) -> Self {\n Arc::make_mut(&mut self.attrs).insert(key.to_string(), value);\n self\n }\n}\n\nimpl std::fmt::Display for EnrichedValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.typ)?;\n if self.nullable {\n write!(f, \"?\")?;\n }\n if !self.attrs.is_empty() {\n write!(\n f,\n \" [{}]\",\n self.attrs\n .iter()\n .map(|(k, v)| format!(\"{k}: {v}\"))\n .collect::>()\n .join(\", \")\n )?;\n }\n Ok(())\n }\n}\n\nimpl std::fmt::Display for ValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ValueType::Basic(b) => write!(f, \"{b}\"),\n ValueType::Struct(s) => write!(f, \"{s}\"),\n ValueType::Table(c) => write!(f, \"{c}\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct FieldSchema {\n /// ID is used to identify the field in the schema.\n pub name: FieldName,\n\n #[serde(flatten)]\n pub value_type: EnrichedValueType,\n}\n\nimpl FieldSchema {\n pub fn new(name: impl ToString, value_type: EnrichedValueType) -> Self {\n Self {\n name: name.to_string(),\n value_type,\n }\n }\n\n pub fn without_attrs(&self) -> Self {\n Self {\n name: self.name.clone(),\n value_type: self.value_type.without_attrs(),\n }\n }\n}\n\nimpl FieldSchema {\n pub fn from_alternative(field: &FieldSchema) -> Result\n where\n for<'a> &'a AltDataType: TryInto,\n {\n Ok(Self {\n name: field.name.clone(),\n value_type: EnrichedValueType::from_alternative(&field.value_type)?,\n })\n }\n}\n\nimpl std::fmt::Display for FieldSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.name, self.value_type)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct CollectorSchema {\n pub fields: Vec,\n /// If specified, the collector will have an automatically generated UUID field with the given index.\n pub auto_uuid_field_idx: Option,\n}\n\nimpl std::fmt::Display for CollectorSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Collector(\")?;\n for (i, field) in self.fields.iter().enumerate() {\n if i > 0 {\n write!(f, \", \")?;\n }\n write!(f, \"{field}\")?;\n }\n write!(f, \")\")\n }\n}\n\nimpl CollectorSchema {\n pub fn from_fields(fields: Vec, auto_uuid_field: Option) -> Self {\n let mut fields = fields;\n let auto_uuid_field_idx = if let Some(auto_uuid_field) = auto_uuid_field {\n fields.insert(\n 0,\n FieldSchema::new(\n auto_uuid_field,\n EnrichedValueType {\n typ: ValueType::Basic(BasicValueType::Uuid),\n nullable: false,\n attrs: Default::default(),\n },\n ),\n );\n Some(0)\n } else {\n None\n };\n Self {\n fields,\n auto_uuid_field_idx,\n }\n }\n pub fn without_attrs(&self) -> Self {\n Self {\n fields: self.fields.iter().map(|f| f.without_attrs()).collect(),\n auto_uuid_field_idx: self.auto_uuid_field_idx,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct OpScopeSchema {\n /// Output schema for ops with output.\n pub op_output_types: HashMap,\n\n /// Child op scope for foreach ops.\n pub op_scopes: HashMap>,\n\n /// Collectors for the current scope.\n pub collectors: Vec>>,\n}\n\n/// Top-level schema for a flow instance.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FlowSchema {\n pub schema: StructSchema,\n\n pub root_op_scope: OpScopeSchema,\n}\n\nimpl std::ops::Deref for FlowSchema {\n type Target = StructSchema;\n\n fn deref(&self) -> &Self::Target {\n &self.schema\n }\n}\n\npub struct OpArgSchema {\n pub name: OpArgName,\n pub value_type: EnrichedValueType,\n pub analyzed_value: AnalyzedValueMapping,\n}\n"], ["/cocoindex/src/service/flows.rs", "use crate::prelude::*;\n\nuse crate::execution::{evaluator, indexing_status, memoization, row_indexer, stats};\nuse crate::lib_context::LibContext;\nuse crate::{base::schema::FlowSchema, ops::interface::SourceExecutorListOptions};\nuse axum::{\n Json,\n extract::{Path, State},\n http::StatusCode,\n};\nuse axum_extra::extract::Query;\n\npub async fn list_flows(\n State(lib_context): State>,\n) -> Result>, ApiError> {\n Ok(Json(\n lib_context.flows.lock().unwrap().keys().cloned().collect(),\n ))\n}\n\npub async fn get_flow_schema(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n Ok(Json(flow_ctx.flow.data_schema.clone()))\n}\n\n#[derive(Serialize)]\npub struct GetFlowResponse {\n flow_spec: spec::FlowInstanceSpec,\n data_schema: FlowSchema,\n fingerprint: utils::fingerprint::Fingerprint,\n}\n\npub async fn get_flow(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let flow_spec = flow_ctx.flow.flow_instance.clone();\n let data_schema = flow_ctx.flow.data_schema.clone();\n let fingerprint = utils::fingerprint::Fingerprinter::default()\n .with(&flow_spec)\n .map_err(|e| api_error!(\"failed to fingerprint flow spec: {e}\"))?\n .with(&data_schema)\n .map_err(|e| api_error!(\"failed to fingerprint data schema: {e}\"))?\n .into_fingerprint();\n Ok(Json(GetFlowResponse {\n flow_spec,\n data_schema,\n fingerprint,\n }))\n}\n\n#[derive(Deserialize)]\npub struct GetKeysParam {\n field: String,\n}\n\n#[derive(Serialize)]\npub struct GetKeysResponse {\n key_type: schema::EnrichedValueType,\n keys: Vec,\n}\n\npub async fn get_keys(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let schema = &flow_ctx.flow.data_schema;\n\n let field_idx = schema\n .fields\n .iter()\n .position(|f| f.name == query.field)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field not found: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n let key_type = schema.fields[field_idx]\n .value_type\n .typ\n .key_type()\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field has no key: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n\n let execution_plan = flow_ctx.flow.get_execution_plan().await?;\n let import_op = execution_plan\n .import_ops\n .iter()\n .find(|op| op.output.field_idx == field_idx as u32)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field is not a source: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n\n let mut rows_stream = import_op.executor.list(&SourceExecutorListOptions {\n include_ordinal: false,\n });\n let mut keys = Vec::new();\n while let Some(rows) = rows_stream.next().await {\n keys.extend(rows?.into_iter().map(|row| row.key));\n }\n Ok(Json(GetKeysResponse {\n key_type: key_type.clone(),\n keys,\n }))\n}\n\n#[derive(Deserialize)]\npub struct SourceRowKeyParams {\n field: String,\n key: Vec,\n}\n\n#[derive(Serialize)]\npub struct EvaluateDataResponse {\n schema: FlowSchema,\n data: value::ScopeValue,\n}\n\nstruct SourceRowKeyContextHolder<'a> {\n plan: Arc,\n import_op_idx: usize,\n schema: &'a FlowSchema,\n key: value::KeyValue,\n}\n\nimpl<'a> SourceRowKeyContextHolder<'a> {\n async fn create(flow_ctx: &'a FlowContext, source_row_key: SourceRowKeyParams) -> Result {\n let schema = &flow_ctx.flow.data_schema;\n let import_op_idx = flow_ctx\n .flow\n .flow_instance\n .import_ops\n .iter()\n .position(|op| op.name == source_row_key.field)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"source field not found: {}\", source_row_key.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n let plan = flow_ctx.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[import_op_idx];\n let field_schema = &schema.fields[import_op.output.field_idx as usize];\n let table_schema = match &field_schema.value_type.typ {\n schema::ValueType::Table(table) => table,\n _ => api_bail!(\"field is not a table: {}\", source_row_key.field),\n };\n let key_field = table_schema\n .key_field()\n .ok_or_else(|| api_error!(\"field {} does not have a key\", source_row_key.field))?;\n let key = value::KeyValue::from_strs(source_row_key.key, &key_field.value_type.typ)?;\n Ok(Self {\n plan,\n import_op_idx,\n schema,\n key,\n })\n }\n\n fn as_context<'b>(&'b self) -> evaluator::SourceRowEvaluationContext<'b> {\n evaluator::SourceRowEvaluationContext {\n plan: &self.plan,\n import_op: &self.plan.import_ops[self.import_op_idx],\n schema: self.schema,\n key: &self.key,\n import_op_idx: self.import_op_idx,\n }\n }\n}\n\npub async fn evaluate_data(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let source_row_key_ctx = SourceRowKeyContextHolder::create(&flow_ctx, query).await?;\n let execution_ctx = flow_ctx.use_execution_ctx().await?;\n let evaluate_output = row_indexer::evaluate_source_entry_with_memory(\n &source_row_key_ctx.as_context(),\n &execution_ctx.setup_execution_context,\n memoization::EvaluationMemoryOptions {\n enable_cache: true,\n evaluation_only: true,\n },\n lib_context.require_builtin_db_pool()?,\n )\n .await?\n .ok_or_else(|| {\n api_error!(\n \"value not found for source at the specified key: {key:?}\",\n key = source_row_key_ctx.key\n )\n })?;\n\n Ok(Json(EvaluateDataResponse {\n schema: flow_ctx.flow.data_schema.clone(),\n data: evaluate_output.data_scope.into(),\n }))\n}\n\npub async fn update(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let live_updater = execution::FlowLiveUpdater::start(\n flow_ctx.clone(),\n lib_context.require_builtin_db_pool()?,\n execution::FlowLiveUpdaterOptions {\n live_mode: false,\n ..Default::default()\n },\n )\n .await?;\n live_updater.wait().await?;\n Ok(Json(live_updater.index_update_info()))\n}\n\npub async fn get_row_indexing_status(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let source_row_key_ctx = SourceRowKeyContextHolder::create(&flow_ctx, query).await?;\n\n let execution_ctx = flow_ctx.use_execution_ctx().await?;\n let indexing_status = indexing_status::get_source_row_indexing_status(\n &source_row_key_ctx.as_context(),\n &execution_ctx.setup_execution_context,\n lib_context.require_builtin_db_pool()?,\n )\n .await?;\n Ok(Json(indexing_status))\n}\n"], ["/cocoindex/src/ops/sources/azure_blob.rs", "use crate::fields_value;\nuse async_stream::try_stream;\nuse azure_core::prelude::NextMarker;\nuse azure_identity::{DefaultAzureCredential, TokenCredentialOptions};\nuse azure_storage::StorageCredentials;\nuse azure_storage_blobs::prelude::*;\nuse futures::StreamExt;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse std::sync::Arc;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n account_name: String,\n container_name: String,\n prefix: Option,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n\n /// SAS token for authentication. Takes precedence over account_access_key.\n sas_token: Option>,\n /// Account access key for authentication. If not provided, will use default Azure credential.\n account_access_key: Option>,\n}\n\nstruct Executor {\n client: BlobServiceClient,\n container_name: String,\n prefix: Option,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n}\n\nimpl Executor {\n fn is_excluded(&self, key: &str) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(key))\n }\n\n fn is_file_included(&self, key: &str) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(key))\n && !self.is_excluded(key)\n }\n}\n\nfn datetime_to_ordinal(dt: &time::OffsetDateTime) -> Ordinal {\n Ordinal(Some(dt.unix_timestamp_nanos() as i64 / 1000))\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n _options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n try_stream! {\n let mut continuation_token: Option = None;\n loop {\n let mut list_builder = self.client\n .container_client(&self.container_name)\n .list_blobs();\n\n if let Some(p) = &self.prefix {\n list_builder = list_builder.prefix(p.clone());\n }\n\n if let Some(token) = continuation_token.take() {\n list_builder = list_builder.marker(token);\n }\n\n let mut page_stream = list_builder.into_stream();\n let Some(page_result) = page_stream.next().await else {\n break;\n };\n\n let page = page_result?;\n let mut batch = Vec::new();\n\n for blob in page.blobs.blobs() {\n let key = &blob.name;\n\n // Only include files (not directories)\n if key.ends_with('/') { continue; }\n\n if self.is_file_included(key) {\n let ordinal = Some(datetime_to_ordinal(&blob.properties.last_modified));\n batch.push(PartialSourceRowMetadata {\n key: KeyValue::Str(key.clone().into()),\n ordinal,\n });\n }\n }\n\n if !batch.is_empty() {\n yield batch;\n }\n\n continuation_token = page.next_marker;\n if continuation_token.is_none() {\n break;\n }\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let key_str = key.str_value()?;\n if !self.is_file_included(key_str) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n\n let blob_client = self\n .client\n .container_client(&self.container_name)\n .blob_client(key_str.as_ref());\n\n let mut stream = blob_client.get().into_stream();\n let result = stream.next().await;\n\n let blob_response = match result {\n Some(response) => response?,\n None => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n };\n\n let ordinal = if options.include_ordinal {\n Some(datetime_to_ordinal(\n &blob_response.blob.properties.last_modified,\n ))\n } else {\n None\n };\n\n let value = if options.include_value {\n let bytes = blob_response.data.collect().await?;\n Some(SourceValue::Existence(if self.binary {\n fields_value!(bytes)\n } else {\n fields_value!(String::from_utf8_lossy(&bytes).to_string())\n }))\n } else {\n None\n };\n\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n // Azure Blob Storage doesn't have built-in change notifications like S3+SQS\n Ok(None)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"AzureBlob\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n context: Arc,\n ) -> Result> {\n let credential = if let Some(sas_token) = spec.sas_token {\n let sas_token = context.auth_registry.get(&sas_token)?;\n StorageCredentials::sas_token(sas_token)?\n } else if let Some(account_access_key) = spec.account_access_key {\n let account_access_key = context.auth_registry.get(&account_access_key)?;\n StorageCredentials::access_key(spec.account_name.clone(), account_access_key)\n } else {\n let default_credential = Arc::new(DefaultAzureCredential::create(\n TokenCredentialOptions::default(),\n )?);\n StorageCredentials::token_credential(default_credential)\n };\n\n let client = BlobServiceClient::new(&spec.account_name, credential);\n Ok(Box::new(Executor {\n client,\n container_name: spec.container_name,\n prefix: spec.prefix,\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/execution/memoization.rs", "use anyhow::{Result, bail};\nuse serde::{Deserialize, Serialize};\nuse std::{\n borrow::Cow,\n collections::HashMap,\n future::Future,\n sync::{Arc, Mutex},\n};\n\nuse crate::{\n base::{schema, value},\n service::error::{SharedError, SharedResultExtRef},\n utils::fingerprint::{Fingerprint, Fingerprinter},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StoredCacheEntry {\n time_sec: i64,\n value: serde_json::Value,\n}\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct StoredMemoizationInfo {\n #[serde(default, skip_serializing_if = \"HashMap::is_empty\")]\n pub cache: HashMap,\n\n #[serde(default, skip_serializing_if = \"HashMap::is_empty\")]\n pub uuids: HashMap>,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub content_hash: Option,\n}\n\npub type CacheEntryCell = Arc>>;\nenum CacheData {\n /// Existing entry in previous runs, but not in current run yet.\n Previous(serde_json::Value),\n /// Value appeared in current run.\n Current(CacheEntryCell),\n}\n\nstruct CacheEntry {\n time: chrono::DateTime,\n data: CacheData,\n}\n\n#[derive(Default)]\nstruct UuidEntry {\n uuids: Vec,\n num_current: usize,\n}\n\nimpl UuidEntry {\n fn new(uuids: Vec) -> Self {\n Self {\n uuids,\n num_current: 0,\n }\n }\n\n fn into_stored(self) -> Option> {\n if self.num_current == 0 {\n return None;\n }\n let mut uuids = self.uuids;\n if self.num_current < uuids.len() {\n uuids.truncate(self.num_current);\n }\n Some(uuids)\n }\n}\n\npub struct EvaluationMemoryOptions {\n pub enable_cache: bool,\n\n /// If true, it's for evaluation only.\n /// In this mode, we don't memoize anything.\n pub evaluation_only: bool,\n}\n\npub struct EvaluationMemory {\n current_time: chrono::DateTime,\n cache: Option>>,\n uuids: Mutex>,\n evaluation_only: bool,\n}\n\nimpl EvaluationMemory {\n pub fn new(\n current_time: chrono::DateTime,\n stored_info: Option,\n options: EvaluationMemoryOptions,\n ) -> Self {\n let (stored_cache, stored_uuids) = stored_info\n .map(|stored_info| (stored_info.cache, stored_info.uuids))\n .unzip();\n Self {\n current_time,\n cache: options.enable_cache.then(|| {\n Mutex::new(\n stored_cache\n .into_iter()\n .flat_map(|iter| iter.into_iter())\n .map(|(k, e)| {\n (\n k,\n CacheEntry {\n time: chrono::DateTime::from_timestamp(e.time_sec, 0)\n .unwrap_or(chrono::DateTime::::MIN_UTC),\n data: CacheData::Previous(e.value),\n },\n )\n })\n .collect(),\n )\n }),\n uuids: Mutex::new(\n (!options.evaluation_only)\n .then_some(stored_uuids)\n .flatten()\n .into_iter()\n .flat_map(|iter| iter.into_iter())\n .map(|(k, v)| (k, UuidEntry::new(v)))\n .collect(),\n ),\n evaluation_only: options.evaluation_only,\n }\n }\n\n pub fn into_stored(self) -> Result {\n if self.evaluation_only {\n bail!(\"For evaluation only, cannot convert to stored MemoizationInfo\");\n }\n let cache = if let Some(cache) = self.cache {\n cache\n .into_inner()?\n .into_iter()\n .filter_map(|(k, e)| match e.data {\n CacheData::Previous(_) => None,\n CacheData::Current(entry) => match entry.get() {\n Some(Ok(v)) => Some(serde_json::to_value(v).map(|value| {\n (\n k,\n StoredCacheEntry {\n time_sec: e.time.timestamp(),\n value,\n },\n )\n })),\n _ => None,\n },\n })\n .collect::>()?\n } else {\n bail!(\"Cache is disabled, cannot convert to stored MemoizationInfo\");\n };\n let uuids = self\n .uuids\n .into_inner()?\n .into_iter()\n .filter_map(|(k, v)| v.into_stored().map(|uuids| (k, uuids)))\n .collect();\n Ok(StoredMemoizationInfo {\n cache,\n uuids,\n content_hash: None,\n })\n }\n\n pub fn get_cache_entry(\n &self,\n key: impl FnOnce() -> Result,\n typ: &schema::ValueType,\n ttl: Option,\n ) -> Result> {\n let mut cache = if let Some(cache) = &self.cache {\n cache.lock().unwrap()\n } else {\n return Ok(None);\n };\n let result = match cache.entry(key()?) {\n std::collections::hash_map::Entry::Occupied(mut entry)\n if !ttl\n .map(|ttl| entry.get().time + ttl < self.current_time)\n .unwrap_or(false) =>\n {\n let entry_mut = &mut entry.get_mut();\n match &mut entry_mut.data {\n CacheData::Previous(value) => {\n let value = value::Value::from_json(std::mem::take(value), typ)?;\n let cell = Arc::new(tokio::sync::OnceCell::from(Ok(value)));\n let time = entry_mut.time;\n entry.insert(CacheEntry {\n time,\n data: CacheData::Current(cell.clone()),\n });\n cell\n }\n CacheData::Current(cell) => cell.clone(),\n }\n }\n entry => {\n let cell = Arc::new(tokio::sync::OnceCell::new());\n entry.insert_entry(CacheEntry {\n time: self.current_time,\n data: CacheData::Current(cell.clone()),\n });\n cell\n }\n };\n Ok(Some(result))\n }\n\n pub fn next_uuid(&self, key: Fingerprint) -> Result {\n let mut uuids = self.uuids.lock().unwrap();\n\n let entry = uuids.entry(key).or_default();\n let uuid = if self.evaluation_only {\n let fp = Fingerprinter::default()\n .with(&key)?\n .with(&entry.num_current)?\n .into_fingerprint();\n uuid::Uuid::new_v8(fp.0)\n } else if entry.num_current < entry.uuids.len() {\n entry.uuids[entry.num_current]\n } else {\n let uuid = uuid::Uuid::new_v4();\n entry.uuids.push(uuid);\n uuid\n };\n entry.num_current += 1;\n Ok(uuid)\n }\n}\n\npub async fn evaluate_with_cell(\n cell: Option<&CacheEntryCell>,\n compute: impl FnOnce() -> Fut,\n) -> Result>\nwhere\n Fut: Future>,\n{\n let result = match cell {\n Some(cell) => Cow::Borrowed(\n cell.get_or_init(|| {\n let fut = compute();\n async move { fut.await.map_err(SharedError::new) }\n })\n .await\n .std_result()?,\n ),\n None => Cow::Owned(compute().await?),\n };\n Ok(result)\n}\n"], ["/cocoindex/src/execution/indexing_status.rs", "use crate::prelude::*;\n\nuse super::db_tracking;\nuse super::evaluator;\nuse futures::try_join;\n\n#[derive(Debug, Serialize)]\npub struct SourceRowLastProcessedInfo {\n pub source_ordinal: interface::Ordinal,\n pub processing_time: Option>,\n pub is_logic_current: bool,\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceRowInfo {\n pub ordinal: interface::Ordinal,\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceRowIndexingStatus {\n pub last_processed: Option,\n pub current: Option,\n}\n\npub async fn get_source_row_indexing_status(\n src_eval_ctx: &evaluator::SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n pool: &sqlx::PgPool,\n) -> Result {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let last_processed_fut = db_tracking::read_source_last_processed_info(\n setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n );\n let current_fut = src_eval_ctx.import_op.executor.get_value(\n src_eval_ctx.key,\n &interface::SourceExecutorGetOptions {\n include_value: false,\n include_ordinal: true,\n },\n );\n let (last_processed, current) = try_join!(last_processed_fut, current_fut)?;\n\n let last_processed = last_processed.map(|l| SourceRowLastProcessedInfo {\n source_ordinal: interface::Ordinal(l.processed_source_ordinal),\n processing_time: l\n .process_time_micros\n .and_then(chrono::DateTime::::from_timestamp_micros),\n is_logic_current: Some(src_eval_ctx.plan.logic_fingerprint.0.as_slice())\n == l.process_logic_fingerprint.as_deref(),\n });\n let current = SourceRowInfo {\n ordinal: current\n .ordinal\n .ok_or(anyhow::anyhow!(\"Ordinal is unavailable for the source\"))?,\n };\n Ok(SourceRowIndexingStatus {\n last_processed,\n current: Some(current),\n })\n}\n"], ["/cocoindex/src/llm/gemini.rs", "use crate::prelude::*;\n\nuse crate::llm::{\n LlmEmbeddingClient, LlmGenerateRequest, LlmGenerateResponse, LlmGenerationClient, OutputFormat,\n ToJsonSchemaOptions, detect_image_mime_type,\n};\nuse base64::prelude::*;\nuse google_cloud_aiplatform_v1 as vertexai;\nuse serde_json::Value;\nuse urlencoding::encode;\n\nfn get_embedding_dimension(model: &str) -> Option {\n let model = model.to_ascii_lowercase();\n if model.starts_with(\"gemini-embedding-\") {\n Some(3072)\n } else if model.starts_with(\"text-embedding-\") {\n Some(768)\n } else if model.starts_with(\"embedding-\") {\n Some(768)\n } else if model.starts_with(\"text-multilingual-embedding-\") {\n Some(768)\n } else {\n None\n }\n}\n\npub struct AiStudioClient {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl AiStudioClient {\n pub fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Gemini doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"GEMINI_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"GEMINI_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n// Recursively remove all `additionalProperties` fields from a JSON value\nfn remove_additional_properties(value: &mut Value) {\n match value {\n Value::Object(map) => {\n map.remove(\"additionalProperties\");\n for v in map.values_mut() {\n remove_additional_properties(v);\n }\n }\n Value::Array(arr) => {\n for v in arr {\n remove_additional_properties(v);\n }\n }\n _ => {}\n }\n}\n\nimpl AiStudioClient {\n fn get_api_url(&self, model: &str, api_name: &str) -> String {\n format!(\n \"https://generativelanguage.googleapis.com/v1beta/models/{}:{}?key={}\",\n encode(model),\n api_name,\n encode(&self.api_key)\n )\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for AiStudioClient {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result {\n let mut user_parts: Vec = Vec::new();\n\n // Add text part first\n user_parts.push(serde_json::json!({ \"text\": request.user_prompt }));\n\n // Add image part if present\n if let Some(image_bytes) = &request.image {\n let base64_image = BASE64_STANDARD.encode(image_bytes.as_ref());\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n user_parts.push(serde_json::json!({\n \"inlineData\": {\n \"mimeType\": mime_type,\n \"data\": base64_image\n }\n }));\n }\n\n // Compose the contents\n let contents = vec![serde_json::json!({\n \"role\": \"user\",\n \"parts\": user_parts\n })];\n\n // Prepare payload\n let mut payload = serde_json::json!({ \"contents\": contents });\n if let Some(system) = request.system_prompt {\n payload[\"systemInstruction\"] = serde_json::json!({\n \"parts\": [ { \"text\": system } ]\n });\n }\n\n // If structured output is requested, add schema and responseMimeType\n if let Some(OutputFormat::JsonSchema { schema, .. }) = &request.output_format {\n let mut schema_json = serde_json::to_value(schema)?;\n remove_additional_properties(&mut schema_json);\n payload[\"generationConfig\"] = serde_json::json!({\n \"responseMimeType\": \"application/json\",\n \"responseSchema\": schema_json\n });\n }\n\n let url = self.get_api_url(request.model, \"generateContent\");\n let resp = retryable::run(\n || self.client.post(&url).json(&payload).send(),\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Gemini API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let resp_json: Value = resp.json().await.context(\"Invalid JSON\")?;\n\n if let Some(error) = resp_json.get(\"error\") {\n bail!(\"Gemini API error: {:?}\", error);\n }\n let mut resp_json = resp_json;\n let text = match &mut resp_json[\"candidates\"][0][\"content\"][\"parts\"][0][\"text\"] {\n Value::String(s) => std::mem::take(s),\n _ => bail!(\"No text in response\"),\n };\n\n Ok(LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[derive(Deserialize)]\nstruct ContentEmbedding {\n values: Vec,\n}\n#[derive(Deserialize)]\nstruct EmbedContentResponse {\n embedding: ContentEmbedding,\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for AiStudioClient {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let url = self.get_api_url(request.model, \"embedContent\");\n let mut payload = serde_json::json!({\n \"model\": request.model,\n \"content\": { \"parts\": [{ \"text\": request.text }] },\n });\n if let Some(task_type) = request.task_type {\n payload[\"taskType\"] = serde_json::Value::String(task_type.into());\n }\n let resp = retryable::run(\n || self.client.post(&url).json(&payload).send(),\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Gemini API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let embedding_resp: EmbedContentResponse = resp.json().await.context(\"Invalid JSON\")?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding_resp.embedding.values,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n\npub struct VertexAiClient {\n client: vertexai::client::PredictionService,\n config: super::VertexAiConfig,\n}\n\nimpl VertexAiClient {\n pub async fn new(\n address: Option,\n api_config: Option,\n ) -> Result {\n if address.is_some() {\n api_bail!(\"VertexAi API address is not supported for VertexAi API type\");\n }\n let Some(super::LlmApiConfig::VertexAi(config)) = api_config else {\n api_bail!(\"VertexAi API config is required for VertexAi API type\");\n };\n let client = vertexai::client::PredictionService::builder()\n .build()\n .await?;\n Ok(Self { client, config })\n }\n\n fn get_model_path(&self, model: &str) -> String {\n format!(\n \"projects/{}/locations/{}/publishers/google/models/{}\",\n self.config.project,\n self.config.region.as_deref().unwrap_or(\"global\"),\n model\n )\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for VertexAiClient {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n use vertexai::model::{Blob, Content, GenerationConfig, Part, Schema, part::Data};\n\n // Compose parts\n let mut parts = Vec::new();\n // Add text part\n parts.push(Part::new().set_text(request.user_prompt.to_string()));\n // Add image part if present\n if let Some(image_bytes) = request.image {\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n parts.push(\n Part::new().set_inline_data(\n Blob::new()\n .set_data(image_bytes.into_owned())\n .set_mime_type(mime_type.to_string()),\n ),\n );\n }\n // Compose content\n let mut contents = Vec::new();\n contents.push(Content::new().set_role(\"user\".to_string()).set_parts(parts));\n // Compose system instruction if present\n let system_instruction = request.system_prompt.as_ref().map(|sys| {\n Content::new()\n .set_role(\"system\".to_string())\n .set_parts(vec![Part::new().set_text(sys.to_string())])\n });\n\n // Compose generation config\n let mut generation_config = None;\n if let Some(OutputFormat::JsonSchema { schema, .. }) = &request.output_format {\n let schema_json = serde_json::to_value(schema)?;\n generation_config = Some(\n GenerationConfig::new()\n .set_response_mime_type(\"application/json\".to_string())\n .set_response_schema(serde_json::from_value::(schema_json)?),\n );\n }\n\n let mut req = self\n .client\n .generate_content()\n .set_model(self.get_model_path(request.model))\n .set_contents(contents);\n if let Some(sys) = system_instruction {\n req = req.set_system_instruction(sys);\n }\n if let Some(config) = generation_config {\n req = req.set_generation_config(config);\n }\n\n // Call the API\n let resp = req.send().await?;\n // Extract text from response\n let Some(Data::Text(text)) = resp\n .candidates\n .into_iter()\n .next()\n .and_then(|c| c.content)\n .and_then(|content| content.parts.into_iter().next())\n .and_then(|part| part.data)\n else {\n bail!(\"No text in response\");\n };\n Ok(super::LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for VertexAiClient {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n // Create the instances for the request\n let mut instance = serde_json::json!({\n \"content\": request.text\n });\n // Add task type if specified\n if let Some(task_type) = &request.task_type {\n instance[\"task_type\"] = serde_json::Value::String(task_type.to_string());\n }\n\n let instances = vec![instance];\n\n // Prepare the request parameters\n let mut parameters = serde_json::json!({});\n if let Some(output_dimension) = request.output_dimension {\n parameters[\"outputDimensionality\"] = serde_json::Value::Number(output_dimension.into());\n }\n\n // Build the prediction request using the raw predict builder\n let response = self\n .client\n .predict()\n .set_endpoint(self.get_model_path(request.model))\n .set_instances(instances)\n .set_parameters(parameters)\n .send()\n .await?;\n\n // Extract the embedding from the response\n let embeddings = response\n .predictions\n .into_iter()\n .next()\n .and_then(|mut e| e.get_mut(\"embeddings\").map(|v| v.take()))\n .ok_or_else(|| anyhow::anyhow!(\"No embeddings in response\"))?;\n let embedding: ContentEmbedding = serde_json::from_value(embeddings)?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding.values,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n"], ["/cocoindex/src/ops/functions/embed_text.rs", "use crate::{\n llm::{\n LlmApiConfig, LlmApiType, LlmEmbeddingClient, LlmEmbeddingRequest, new_llm_embedding_client,\n },\n ops::sdk::*,\n};\n\n#[derive(Deserialize)]\nstruct Spec {\n api_type: LlmApiType,\n model: String,\n address: Option,\n api_config: Option,\n output_dimension: Option,\n task_type: Option,\n}\n\nstruct Args {\n client: Box,\n text: ResolvedOpArg,\n}\n\nstruct Executor {\n spec: Spec,\n args: Args,\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n fn behavior_version(&self) -> Option {\n Some(1)\n }\n\n fn enable_cache(&self) -> bool {\n true\n }\n\n async fn evaluate(&self, input: Vec) -> Result {\n let text = self.args.text.value(&input)?.as_str()?;\n let req = LlmEmbeddingRequest {\n model: &self.spec.model,\n text: Cow::Borrowed(text),\n output_dimension: self.spec.output_dimension,\n task_type: self\n .spec\n .task_type\n .as_ref()\n .map(|s| Cow::Borrowed(s.as_str())),\n };\n let embedding = self.args.client.embed_text(req).await?;\n Ok(embedding.embedding.into())\n }\n}\n\nstruct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"EmbedText\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Self::ResolvedArgs, EnrichedValueType)> {\n let text = args_resolver.next_arg(\"text\")?;\n let client =\n new_llm_embedding_client(spec.api_type, spec.address.clone(), spec.api_config.clone())\n .await?;\n let output_dimension = match spec.output_dimension {\n Some(output_dimension) => output_dimension,\n None => {\n client.get_default_embedding_dimension(spec.model.as_str())\n .ok_or_else(|| api_error!(\"model \\\"{}\\\" is unknown for {:?}, needs to specify `output_dimension` explicitly\", spec.model, spec.api_type))?\n }\n };\n let output_schema = make_output_type(BasicValueType::Vector(VectorTypeSchema {\n dimension: Some(output_dimension as usize),\n element_type: Box::new(BasicValueType::Float32),\n }));\n Ok((Args { client, text }, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor { spec, args }))\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory.register(registry)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n #[tokio::test]\n #[ignore = \"This test requires OpenAI API key or a configured local LLM and may make network calls.\"]\n async fn test_embed_text() {\n let spec = Spec {\n api_type: LlmApiType::OpenAi,\n model: \"text-embedding-ada-002\".to_string(),\n address: None,\n api_config: None,\n output_dimension: None,\n task_type: None,\n };\n\n let factory = Arc::new(Factory);\n let text_content = \"CocoIndex is a performant data transformation framework for AI.\";\n\n let input_args_values = vec![text_content.to_string().into()];\n\n let input_arg_schemas = vec![build_arg_schema(\"text\", BasicValueType::Str)];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n if result.is_err() {\n eprintln!(\n \"test_embed_text: test_flow_function returned error (potentially expected for evaluate): {:?}\",\n result.as_ref().err()\n );\n }\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed. NOTE: This test may require network access/API keys for OpenAI. Error: {:?}\",\n result.err()\n );\n\n let value = result.unwrap();\n\n match value {\n Value::Basic(BasicValue::Vector(arc_vec)) => {\n assert_eq!(arc_vec.len(), 1536, \"Embedding vector dimension mismatch\");\n for item in arc_vec.iter() {\n match item {\n BasicValue::Float32(_) => {}\n _ => panic!(\"Embedding vector element is not Float32: {item:?}\"),\n }\n }\n }\n _ => panic!(\"Expected Value::Basic(BasicValue::Vector), got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/llm/anthropic.rs", "use crate::prelude::*;\nuse base64::prelude::*;\n\nuse crate::llm::{\n LlmGenerateRequest, LlmGenerateResponse, LlmGenerationClient, OutputFormat,\n ToJsonSchemaOptions, detect_image_mime_type,\n};\nuse anyhow::Context;\nuse urlencoding::encode;\n\npub struct Client {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl Client {\n pub async fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Anthropic doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"ANTHROPIC_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"ANTHROPIC_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result {\n let mut user_content_parts: Vec = Vec::new();\n\n // Add image part if present\n if let Some(image_bytes) = &request.image {\n let base64_image = BASE64_STANDARD.encode(image_bytes.as_ref());\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n user_content_parts.push(serde_json::json!({\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": mime_type,\n \"data\": base64_image,\n }\n }));\n }\n\n // Add text part\n user_content_parts.push(serde_json::json!({\n \"type\": \"text\",\n \"text\": request.user_prompt\n }));\n\n let messages = vec![serde_json::json!({\n \"role\": \"user\",\n \"content\": user_content_parts\n })];\n\n let mut payload = serde_json::json!({\n \"model\": request.model,\n \"messages\": messages,\n \"max_tokens\": 4096\n });\n\n // Add system prompt as top-level field if present (required)\n if let Some(system) = request.system_prompt {\n payload[\"system\"] = serde_json::json!(system);\n }\n\n // Extract schema from output_format, error if not JsonSchema\n let schema = match request.output_format.as_ref() {\n Some(OutputFormat::JsonSchema { schema, .. }) => schema,\n _ => api_bail!(\"Anthropic client expects OutputFormat::JsonSchema for all requests\"),\n };\n\n let schema_json = serde_json::to_value(schema)?;\n payload[\"tools\"] = serde_json::json!([\n { \"type\": \"custom\", \"name\": \"report_result\", \"input_schema\": schema_json }\n ]);\n\n let url = \"https://api.anthropic.com/v1/messages\";\n\n let encoded_api_key = encode(&self.api_key);\n\n let resp = retryable::run(\n || {\n self.client\n .post(url)\n .header(\"x-api-key\", encoded_api_key.as_ref())\n .header(\"anthropic-version\", \"2023-06-01\")\n .json(&payload)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Anthropic API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let mut resp_json: serde_json::Value = resp.json().await.context(\"Invalid JSON\")?;\n if let Some(error) = resp_json.get(\"error\") {\n bail!(\"Anthropic API error: {:?}\", error);\n }\n\n // Debug print full response\n // println!(\"Anthropic API full response: {resp_json:?}\");\n\n let resp_content = &resp_json[\"content\"];\n let tool_name = \"report_result\";\n let mut extracted_json: Option = None;\n if let Some(array) = resp_content.as_array() {\n for item in array {\n if item.get(\"type\") == Some(&serde_json::Value::String(\"tool_use\".to_string()))\n && item.get(\"name\") == Some(&serde_json::Value::String(tool_name.to_string()))\n {\n if let Some(input) = item.get(\"input\") {\n extracted_json = Some(input.clone());\n break;\n }\n }\n }\n }\n let text = if let Some(json) = extracted_json {\n // Try strict JSON serialization first\n serde_json::to_string(&json)?\n } else {\n // Fallback: try text if no tool output found\n match &mut resp_json[\"content\"][0][\"text\"] {\n serde_json::Value::String(s) => {\n // Try strict JSON parsing first\n match serde_json::from_str::(s) {\n Ok(_) => std::mem::take(s),\n Err(e) => {\n // Try permissive json5 parsing as fallback\n match json5::from_str::(s) {\n Ok(value) => {\n println!(\"[Anthropic] Used permissive JSON5 parser for output\");\n serde_json::to_string(&value)?\n }\n Err(e2) => {\n return Err(anyhow::anyhow!(format!(\n \"No structured tool output or text found in response, and permissive JSON5 parsing also failed: {e}; {e2}\"\n )));\n }\n }\n }\n }\n }\n _ => {\n return Err(anyhow::anyhow!(\n \"No structured tool output or text found in response\"\n ));\n }\n }\n };\n\n Ok(LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n"], ["/cocoindex/src/execution/stats.rs", "use crate::prelude::*;\n\nuse std::{\n ops::AddAssign,\n sync::atomic::{AtomicI64, Ordering::Relaxed},\n};\n\n#[derive(Default, Serialize)]\npub struct Counter(pub AtomicI64);\n\nimpl Counter {\n pub fn inc(&self, by: i64) {\n self.0.fetch_add(by, Relaxed);\n }\n\n pub fn get(&self) -> i64 {\n self.0.load(Relaxed)\n }\n\n pub fn delta(&self, base: &Self) -> Counter {\n Counter(AtomicI64::new(self.get() - base.get()))\n }\n\n pub fn into_inner(self) -> i64 {\n self.0.into_inner()\n }\n\n pub fn merge(&self, delta: &Self) {\n self.0.fetch_add(delta.get(), Relaxed);\n }\n}\n\nimpl AddAssign for Counter {\n fn add_assign(&mut self, rhs: Self) {\n self.0.fetch_add(rhs.into_inner(), Relaxed);\n }\n}\n\nimpl Clone for Counter {\n fn clone(&self) -> Self {\n Self(AtomicI64::new(self.get()))\n }\n}\n\nimpl std::fmt::Display for Counter {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.get())\n }\n}\n\nimpl std::fmt::Debug for Counter {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.get())\n }\n}\n\n#[derive(Debug, Serialize, Default, Clone)]\npub struct UpdateStats {\n pub num_no_change: Counter,\n pub num_insertions: Counter,\n pub num_deletions: Counter,\n /// Number of source rows that were updated.\n pub num_updates: Counter,\n /// Number of source rows that were reprocessed because of logic change.\n pub num_reprocesses: Counter,\n pub num_errors: Counter,\n}\n\nimpl UpdateStats {\n pub fn delta(&self, base: &Self) -> Self {\n UpdateStats {\n num_no_change: self.num_no_change.delta(&base.num_no_change),\n num_insertions: self.num_insertions.delta(&base.num_insertions),\n num_deletions: self.num_deletions.delta(&base.num_deletions),\n num_updates: self.num_updates.delta(&base.num_updates),\n num_reprocesses: self.num_reprocesses.delta(&base.num_reprocesses),\n num_errors: self.num_errors.delta(&base.num_errors),\n }\n }\n\n pub fn merge(&self, delta: &Self) {\n self.num_no_change.merge(&delta.num_no_change);\n self.num_insertions.merge(&delta.num_insertions);\n self.num_deletions.merge(&delta.num_deletions);\n self.num_updates.merge(&delta.num_updates);\n self.num_reprocesses.merge(&delta.num_reprocesses);\n self.num_errors.merge(&delta.num_errors);\n }\n\n pub fn has_any_change(&self) -> bool {\n self.num_insertions.get() > 0\n || self.num_deletions.get() > 0\n || self.num_updates.get() > 0\n || self.num_reprocesses.get() > 0\n || self.num_errors.get() > 0\n }\n}\n\nimpl std::fmt::Display for UpdateStats {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let mut messages = Vec::new();\n let num_errors = self.num_errors.get();\n if num_errors > 0 {\n messages.push(format!(\"{num_errors} source rows FAILED\"));\n }\n\n let num_skipped = self.num_no_change.get();\n if num_skipped > 0 {\n messages.push(format!(\"{num_skipped} source rows NO CHANGE\"));\n }\n\n let num_insertions = self.num_insertions.get();\n let num_deletions = self.num_deletions.get();\n let num_updates = self.num_updates.get();\n let num_reprocesses = self.num_reprocesses.get();\n let num_source_rows = num_insertions + num_deletions + num_updates + num_reprocesses;\n if num_source_rows > 0 {\n messages.push(format!(\n \"{num_source_rows} source rows processed ({num_insertions} ADDED, {num_deletions} REMOVED, {num_updates} UPDATED, {num_reprocesses} REPROCESSED on flow change)\",\n ));\n }\n\n if !messages.is_empty() {\n write!(f, \"{}\", messages.join(\"; \"))?;\n } else {\n write!(f, \"No changes\")?;\n }\n\n Ok(())\n }\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceUpdateInfo {\n pub source_name: String,\n pub stats: UpdateStats,\n}\n\nimpl std::fmt::Display for SourceUpdateInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.source_name, self.stats)\n }\n}\n\n#[derive(Debug, Serialize)]\npub struct IndexUpdateInfo {\n pub sources: Vec,\n}\n\nimpl std::fmt::Display for IndexUpdateInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n for source in self.sources.iter() {\n writeln!(f, \"{source}\")?;\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/sources/local_file.rs", "use async_stream::try_stream;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse log::warn;\nuse std::borrow::Cow;\nuse std::path::Path;\nuse std::{path::PathBuf, sync::Arc};\n\nuse crate::base::field_attrs;\nuse crate::{fields_value, ops::sdk::*};\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n path: String,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n}\n\nstruct Executor {\n root_path: PathBuf,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n}\n\nimpl Executor {\n fn is_excluded(&self, path: impl AsRef + Copy) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(path))\n }\n\n fn is_file_included(&self, path: impl AsRef + Copy) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(path))\n && !self.is_excluded(path)\n }\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n let root_component_size = self.root_path.components().count();\n let mut dirs = Vec::new();\n dirs.push(Cow::Borrowed(&self.root_path));\n let mut new_dirs = Vec::new();\n try_stream! {\n while let Some(dir) = dirs.pop() {\n let mut entries = tokio::fs::read_dir(dir.as_ref()).await?;\n while let Some(entry) = entries.next_entry().await? {\n let path = entry.path();\n let mut path_components = path.components();\n for _ in 0..root_component_size {\n path_components.next();\n }\n let relative_path = path_components.as_path();\n if path.is_dir() {\n if !self.is_excluded(relative_path) {\n new_dirs.push(Cow::Owned(path));\n }\n } else if self.is_file_included(relative_path) {\n let ordinal: Option = if options.include_ordinal {\n Some(path.metadata()?.modified()?.try_into()?)\n } else {\n None\n };\n if let Some(relative_path) = relative_path.to_str() {\n yield vec![PartialSourceRowMetadata {\n key: KeyValue::Str(relative_path.into()),\n ordinal,\n }];\n } else {\n warn!(\"Skipped ill-formed file path: {}\", path.display());\n }\n }\n }\n dirs.extend(new_dirs.drain(..).rev());\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n if !self.is_file_included(key.str_value()?.as_ref()) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n let path = self.root_path.join(key.str_value()?.as_ref());\n let ordinal = if options.include_ordinal {\n Some(path.metadata()?.modified()?.try_into()?)\n } else {\n None\n };\n let value = if options.include_value {\n match std::fs::read(path) {\n Ok(content) => {\n let content = if self.binary {\n fields_value!(content)\n } else {\n fields_value!(String::from_utf8_lossy(&content).to_string())\n };\n Some(SourceValue::Existence(content))\n }\n Err(e) if e.kind() == std::io::ErrorKind::NotFound => {\n Some(SourceValue::NonExistence)\n }\n Err(e) => Err(e)?,\n }\n } else {\n None\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"LocalFile\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor {\n root_path: PathBuf::from(spec.path),\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/base/duration.rs", "use std::f64;\n\nuse anyhow::{Result, anyhow, bail};\nuse chrono::Duration;\n\n/// Parses a string of number-unit pairs into a vector of (number, unit),\n/// ensuring units are among the allowed ones.\nfn parse_components(\n s: &str,\n allowed_units: &[char],\n original_input: &str,\n) -> Result> {\n let mut result = Vec::new();\n let mut iter = s.chars().peekable();\n while iter.peek().is_some() {\n let mut num_str = String::new();\n let mut has_decimal = false;\n\n // Parse digits and optional decimal point\n while let Some(&c) = iter.peek() {\n if c.is_ascii_digit() || (c == '.' && !has_decimal) {\n if c == '.' {\n has_decimal = true;\n }\n num_str.push(iter.next().unwrap());\n } else {\n break;\n }\n }\n if num_str.is_empty() {\n bail!(\"Expected number in: {}\", original_input);\n }\n let num = num_str\n .parse::()\n .map_err(|_| anyhow!(\"Invalid number '{}' in: {}\", num_str, original_input))?;\n if let Some(&unit) = iter.peek() {\n if allowed_units.contains(&unit) {\n result.push((num, unit));\n iter.next();\n } else {\n bail!(\"Invalid unit '{}' in: {}\", unit, original_input);\n }\n } else {\n bail!(\n \"Missing unit after number '{}' in: {}\",\n num_str,\n original_input\n );\n }\n }\n Ok(result)\n}\n\n/// Parses an ISO 8601 duration string into a `chrono::Duration`.\nfn parse_iso8601_duration(s: &str, original_input: &str) -> Result {\n let (is_negative, s_after_sign) = if let Some(stripped) = s.strip_prefix('-') {\n (true, stripped)\n } else {\n (false, s)\n };\n\n if !s_after_sign.starts_with('P') {\n bail!(\"Duration must start with 'P' in: {}\", original_input);\n }\n let s_after_p = &s_after_sign[1..];\n\n let (date_part, time_part) = if let Some(pos) = s_after_p.find('T') {\n (&s_after_p[..pos], Some(&s_after_p[pos + 1..]))\n } else {\n (s_after_p, None)\n };\n\n // Date components (Y, M, W, D)\n let date_components = parse_components(date_part, &['Y', 'M', 'W', 'D'], original_input)?;\n\n // Time components (H, M, S)\n let time_components = if let Some(time_str) = time_part {\n let comps = parse_components(time_str, &['H', 'M', 'S'], original_input)?;\n if comps.is_empty() {\n bail!(\n \"Time part present but no time components in: {}\",\n original_input\n );\n }\n comps\n } else {\n vec![]\n };\n\n if date_components.is_empty() && time_components.is_empty() {\n bail!(\"No components in duration: {}\", original_input);\n }\n\n // Accumulate date duration\n let date_duration = date_components\n .iter()\n .fold(Duration::zero(), |acc, &(num, unit)| {\n let days = match unit {\n 'Y' => num * 365.0,\n 'M' => num * 30.0,\n 'W' => num * 7.0,\n 'D' => num,\n _ => unreachable!(\"Invalid date unit should be caught by prior validation\"),\n };\n let microseconds = (days * 86_400_000_000.0) as i64;\n acc + Duration::microseconds(microseconds)\n });\n\n // Accumulate time duration\n let time_duration =\n time_components\n .iter()\n .fold(Duration::zero(), |acc, &(num, unit)| match unit {\n 'H' => {\n let nanoseconds = (num * 3_600_000_000_000.0).round() as i64;\n acc + Duration::nanoseconds(nanoseconds)\n }\n 'M' => {\n let nanoseconds = (num * 60_000_000_000.0).round() as i64;\n acc + Duration::nanoseconds(nanoseconds)\n }\n 'S' => {\n let nanoseconds = (num.fract() * 1_000_000_000.0).round() as i64;\n acc + Duration::seconds(num as i64) + Duration::nanoseconds(nanoseconds)\n }\n _ => unreachable!(\"Invalid time unit should be caught by prior validation\"),\n });\n\n let mut total = date_duration + time_duration;\n if is_negative {\n total = -total;\n }\n\n Ok(total)\n}\n\n/// Parses a human-readable duration string into a `chrono::Duration`.\nfn parse_human_readable_duration(s: &str, original_input: &str) -> Result {\n let parts: Vec<&str> = s.split_whitespace().collect();\n if parts.is_empty() || parts.len() % 2 != 0 {\n bail!(\n \"Invalid human-readable duration format in: {}\",\n original_input\n );\n }\n\n let durations: Result> = parts\n .chunks(2)\n .map(|chunk| {\n let num: i64 = chunk[0]\n .parse()\n .map_err(|_| anyhow!(\"Invalid number '{}' in: {}\", chunk[0], original_input))?;\n\n match chunk[1].to_lowercase().as_str() {\n \"day\" | \"days\" => Ok(Duration::days(num)),\n \"hour\" | \"hours\" => Ok(Duration::hours(num)),\n \"minute\" | \"minutes\" => Ok(Duration::minutes(num)),\n \"second\" | \"seconds\" => Ok(Duration::seconds(num)),\n \"millisecond\" | \"milliseconds\" => Ok(Duration::milliseconds(num)),\n \"microsecond\" | \"microseconds\" => Ok(Duration::microseconds(num)),\n _ => bail!(\"Invalid unit '{}' in: {}\", chunk[1], original_input),\n }\n })\n .collect();\n\n durations.map(|durs| durs.into_iter().sum())\n}\n\n/// Parses a duration string into a `chrono::Duration`, trying ISO 8601 first, then human-readable format.\npub fn parse_duration(s: &str) -> Result {\n let original_input = s;\n let s = s.trim();\n if s.is_empty() {\n bail!(\"Empty duration string\");\n }\n\n let is_likely_iso8601 = match s.as_bytes() {\n [c, ..] if c.eq_ignore_ascii_case(&b'P') => true,\n [b'-', c, ..] if c.eq_ignore_ascii_case(&b'P') => true,\n _ => false,\n };\n\n if is_likely_iso8601 {\n parse_iso8601_duration(s, original_input)\n } else {\n parse_human_readable_duration(s, original_input)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n fn check_ok(res: Result, expected: Duration, input_str: &str) {\n match res {\n Ok(duration) => assert_eq!(duration, expected, \"Input: '{input_str}'\"),\n Err(e) => panic!(\"Input: '{input_str}', expected Ok({expected:?}), but got Err: {e}\"),\n }\n }\n\n fn check_err_contains(res: Result, expected_substring: &str, input_str: &str) {\n match res {\n Ok(d) => panic!(\n \"Input: '{input_str}', expected error containing '{expected_substring}', but got Ok({d:?})\"\n ),\n Err(e) => {\n let err_msg = e.to_string();\n assert!(\n err_msg.contains(expected_substring),\n \"Input: '{input_str}', error message '{err_msg}' does not contain expected substring '{expected_substring}'\"\n );\n }\n }\n }\n\n #[test]\n fn test_empty_string() {\n check_err_contains(parse_duration(\"\"), \"Empty duration string\", \"\\\"\\\"\");\n }\n\n #[test]\n fn test_whitespace_string() {\n check_err_contains(parse_duration(\" \"), \"Empty duration string\", \"\\\" \\\"\");\n }\n\n #[test]\n fn test_iso_just_p() {\n check_err_contains(parse_duration(\"P\"), \"No components in duration: P\", \"\\\"P\\\"\");\n }\n\n #[test]\n fn test_iso_pt() {\n check_err_contains(\n parse_duration(\"PT\"),\n \"Time part present but no time components in: PT\",\n \"\\\"PT\\\"\",\n );\n }\n\n #[test]\n fn test_iso_missing_number_before_unit_in_date_part() {\n check_err_contains(parse_duration(\"PD\"), \"Expected number in: PD\", \"\\\"PD\\\"\");\n }\n #[test]\n fn test_iso_missing_number_before_unit_in_time_part() {\n check_err_contains(parse_duration(\"PTM\"), \"Expected number in: PTM\", \"\\\"PTM\\\"\");\n }\n\n #[test]\n fn test_iso_time_unit_without_t() {\n check_err_contains(parse_duration(\"P1H\"), \"Invalid unit 'H' in: P1H\", \"\\\"P1H\\\"\");\n check_err_contains(parse_duration(\"P1S\"), \"Invalid unit 'S' in: P1S\", \"\\\"P1S\\\"\");\n }\n\n #[test]\n fn test_iso_invalid_unit() {\n check_err_contains(parse_duration(\"P1X\"), \"Invalid unit 'X' in: P1X\", \"\\\"P1X\\\"\");\n check_err_contains(\n parse_duration(\"PT1X\"),\n \"Invalid unit 'X' in: PT1X\",\n \"\\\"PT1X\\\"\",\n );\n }\n\n #[test]\n fn test_iso_valid_lowercase_unit_is_not_allowed() {\n check_err_contains(\n parse_duration(\"p1h\"),\n \"Duration must start with 'P' in: p1h\",\n \"\\\"p1h\\\"\",\n );\n check_err_contains(\n parse_duration(\"PT1h\"),\n \"Invalid unit 'h' in: PT1h\",\n \"\\\"PT1h\\\"\",\n );\n }\n\n #[test]\n fn test_iso_trailing_number_error() {\n check_err_contains(\n parse_duration(\"P1D2\"),\n \"Missing unit after number '2' in: P1D2\",\n \"\\\"P1D2\\\"\",\n );\n }\n\n #[test]\n fn test_iso_invalid_fractional_format() {\n check_err_contains(\n parse_duration(\"PT1..5S\"),\n \"Invalid unit '.' in: PT1..5S\",\n \"\\\"PT1..5S\\\"\",\n );\n check_err_contains(\n parse_duration(\"PT1.5.5S\"),\n \"Invalid unit '.' in: PT1.5.5S\",\n \"\\\"PT1.5.5S\\\"\",\n );\n check_err_contains(\n parse_duration(\"P1..5D\"),\n \"Invalid unit '.' in: P1..5D\",\n \"\\\"P1..5D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_misplaced_t() {\n check_err_contains(\n parse_duration(\"P1DT2H T3M\"),\n \"Expected number in: P1DT2H T3M\",\n \"\\\"P1DT2H T3M\\\"\",\n );\n check_err_contains(\n parse_duration(\"P1T2H\"),\n \"Missing unit after number '1' in: P1T2H\",\n \"\\\"P1T2H\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_number_after_p() {\n check_err_contains(\n parse_duration(\"P-1D\"),\n \"Expected number in: P-1D\",\n \"\\\"P-1D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_valid_months() {\n check_ok(parse_duration(\"P1M\"), Duration::days(30), \"\\\"P1M\\\"\");\n check_ok(parse_duration(\" P13M\"), Duration::days(13 * 30), \"\\\"P13M\\\"\");\n }\n\n #[test]\n fn test_iso_valid_weeks() {\n check_ok(parse_duration(\"P1W\"), Duration::days(7), \"\\\"P1W\\\"\");\n check_ok(parse_duration(\" P1W \"), Duration::days(7), \"\\\"P1W\\\"\");\n }\n\n #[test]\n fn test_iso_valid_days() {\n check_ok(parse_duration(\"P1D\"), Duration::days(1), \"\\\"P1D\\\"\");\n }\n\n #[test]\n fn test_iso_valid_hours() {\n check_ok(parse_duration(\"PT2H\"), Duration::hours(2), \"\\\"PT2H\\\"\");\n }\n\n #[test]\n fn test_iso_valid_minutes() {\n check_ok(parse_duration(\"PT3M\"), Duration::minutes(3), \"\\\"PT3M\\\"\");\n }\n\n #[test]\n fn test_iso_valid_seconds() {\n check_ok(parse_duration(\"PT4S\"), Duration::seconds(4), \"\\\"PT4S\\\"\");\n }\n\n #[test]\n fn test_iso_combined_units() {\n check_ok(\n parse_duration(\"P1Y2M3W4DT5H6M7S\"),\n Duration::days(365 + 60 + 3 * 7 + 4)\n + Duration::hours(5)\n + Duration::minutes(6)\n + Duration::seconds(7),\n \"\\\"P1Y2M3DT4H5M6S\\\"\",\n );\n check_ok(\n parse_duration(\"P1DT2H3M4S\"),\n Duration::days(1) + Duration::hours(2) + Duration::minutes(3) + Duration::seconds(4),\n \"\\\"P1DT2H3M4S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_duplicated_unit() {\n check_ok(parse_duration(\"P1D1D\"), Duration::days(2), \"\\\"P1D1D\\\"\");\n check_ok(parse_duration(\"PT1H1H\"), Duration::hours(2), \"\\\"PT1H1H\\\"\");\n }\n\n #[test]\n fn test_iso_out_of_order_unit() {\n check_ok(\n parse_duration(\"P1W1Y\"),\n Duration::days(365 + 7),\n \"\\\"P1W1Y\\\"\",\n );\n check_ok(\n parse_duration(\"PT2S1H\"),\n Duration::hours(1) + Duration::seconds(2),\n \"\\\"PT2S1H\\\"\",\n );\n check_ok(parse_duration(\"P3M\"), Duration::days(90), \"\\\"PT2S1H\\\"\");\n check_ok(parse_duration(\"PT3M\"), Duration::minutes(3), \"\\\"PT2S1H\\\"\");\n check_err_contains(\n parse_duration(\"P1H2D\"),\n \"Invalid unit 'H' in: P1H2D\", // Time part without 'T' is invalid\n \"\\\"P1H2D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_duration_p1d() {\n check_ok(parse_duration(\"-P1D\"), -Duration::days(1), \"\\\"-P1D\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pd0() {\n check_ok(parse_duration(\"P0D\"), Duration::zero(), \"\\\"P0D\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pt0s() {\n check_ok(parse_duration(\"PT0S\"), Duration::zero(), \"\\\"PT0S\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pt0h0m0s() {\n check_ok(parse_duration(\"PT0H0M0S\"), Duration::zero(), \"\\\"PT0H0M0S\\\"\");\n }\n\n #[test]\n fn test_iso_fractional_seconds() {\n check_ok(\n parse_duration(\"PT1.5S\"),\n Duration::seconds(1) + Duration::milliseconds(500),\n \"\\\"PT1.5S\\\"\",\n );\n check_ok(\n parse_duration(\"PT441010.456123S\"),\n Duration::seconds(441010) + Duration::microseconds(456123),\n \"\\\"PT441010.456123S\\\"\",\n );\n check_ok(\n parse_duration(\"PT0.000001S\"),\n Duration::microseconds(1),\n \"\\\"PT0.000001S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_fractional_date_units() {\n check_ok(\n parse_duration(\"P1.5D\"),\n Duration::microseconds((1.5 * 86_400_000_000.0) as i64),\n \"\\\"P1.5D\\\"\",\n );\n check_ok(\n parse_duration(\"P1.25Y\"),\n Duration::microseconds((1.25 * 365.0 * 86_400_000_000.0) as i64),\n \"\\\"P1.25Y\\\"\",\n );\n check_ok(\n parse_duration(\"P2.75M\"),\n Duration::microseconds((2.75 * 30.0 * 86_400_000_000.0) as i64),\n \"\\\"P2.75M\\\"\",\n );\n check_ok(\n parse_duration(\"P0.5W\"),\n Duration::microseconds((0.5 * 7.0 * 86_400_000_000.0) as i64),\n \"\\\"P0.5W\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_fractional_date_units() {\n check_ok(\n parse_duration(\"-P1.5D\"),\n -Duration::microseconds((1.5 * 86_400_000_000.0) as i64),\n \"\\\"-P1.5D\\\"\",\n );\n check_ok(\n parse_duration(\"-P0.25Y\"),\n -Duration::microseconds((0.25 * 365.0 * 86_400_000_000.0) as i64),\n \"\\\"-P0.25Y\\\"\",\n );\n }\n\n #[test]\n fn test_iso_combined_fractional_units() {\n check_ok(\n parse_duration(\"P1.5DT2.5H3.5M4.5S\"),\n Duration::microseconds((1.5 * 86_400_000_000.0) as i64)\n + Duration::microseconds((2.5 * 3_600_000_000.0) as i64)\n + Duration::microseconds((3.5 * 60_000_000.0) as i64)\n + Duration::seconds(4)\n + Duration::milliseconds(500),\n \"\\\"1.5DT2.5H3.5M4.5S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_multiple_fractional_time_units() {\n check_ok(\n parse_duration(\"PT1.5S2.5S\"),\n Duration::seconds(1 + 2) + Duration::milliseconds(500) + Duration::milliseconds(500),\n \"\\\"PT1.5S2.5S\\\"\",\n );\n check_ok(\n parse_duration(\"PT1.1H2.2M3.3S\"),\n Duration::hours(1)\n + Duration::seconds((0.1 * 3600.0) as i64)\n + Duration::minutes(2)\n + Duration::seconds((0.2 * 60.0) as i64)\n + Duration::seconds(3)\n + Duration::milliseconds(300),\n \"\\\"PT1.1H2.2M3.3S\\\"\",\n );\n }\n\n // Human-readable Tests\n #[test]\n fn test_human_missing_unit() {\n check_err_contains(\n parse_duration(\"1\"),\n \"Invalid human-readable duration format in: 1\",\n \"\\\"1\\\"\",\n );\n }\n\n #[test]\n fn test_human_missing_number() {\n check_err_contains(\n parse_duration(\"day\"),\n \"Invalid human-readable duration format in: day\",\n \"\\\"day\\\"\",\n );\n }\n\n #[test]\n fn test_human_incomplete_pair() {\n check_err_contains(\n parse_duration(\"1 day 2\"),\n \"Invalid human-readable duration format in: 1 day 2\",\n \"\\\"1 day 2\\\"\",\n );\n }\n\n #[test]\n fn test_human_invalid_number_at_start() {\n check_err_contains(\n parse_duration(\"one day\"),\n \"Invalid number 'one' in: one day\",\n \"\\\"one day\\\"\",\n );\n }\n\n #[test]\n fn test_human_invalid_unit() {\n check_err_contains(\n parse_duration(\"1 hour 2 minutes 3 seconds four seconds\"),\n \"Invalid number 'four' in: 1 hour 2 minutes 3 seconds four seconds\",\n \"\\\"1 hour 2 minutes 3 seconds four seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_float_number_fail() {\n check_err_contains(\n parse_duration(\"1.5 hours\"),\n \"Invalid number '1.5' in: 1.5 hours\",\n \"\\\"1.5 hours\\\"\",\n );\n }\n\n #[test]\n fn test_invalid_human_readable_no_pairs() {\n check_err_contains(\n parse_duration(\"just some words\"),\n \"Invalid human-readable duration format in: just some words\",\n \"\\\"just some words\\\"\",\n );\n }\n\n #[test]\n fn test_human_unknown_unit() {\n check_err_contains(\n parse_duration(\"1 year\"),\n \"Invalid unit 'year' in: 1 year\",\n \"\\\"1 year\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_day() {\n check_ok(parse_duration(\"1 day\"), Duration::days(1), \"\\\"1 day\\\"\");\n }\n\n #[test]\n fn test_human_valid_days_uppercase() {\n check_ok(parse_duration(\"2 DAYS\"), Duration::days(2), \"\\\"2 DAYS\\\"\");\n }\n\n #[test]\n fn test_human_valid_hour() {\n check_ok(parse_duration(\"3 hour\"), Duration::hours(3), \"\\\"3 hour\\\"\");\n }\n\n #[test]\n fn test_human_valid_hours_mixedcase() {\n check_ok(parse_duration(\"4 HoUrS\"), Duration::hours(4), \"\\\"4 HoUrS\\\"\");\n }\n\n #[test]\n fn test_human_valid_minute() {\n check_ok(\n parse_duration(\"5 minute\"),\n Duration::minutes(5),\n \"\\\"5 minute\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_minutes() {\n check_ok(\n parse_duration(\"6 minutes\"),\n Duration::minutes(6),\n \"\\\"6 minutes\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_second() {\n check_ok(\n parse_duration(\"7 second\"),\n Duration::seconds(7),\n \"\\\"7 second\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_seconds() {\n check_ok(\n parse_duration(\"8 seconds\"),\n Duration::seconds(8),\n \"\\\"8 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_millisecond() {\n check_ok(\n parse_duration(\"9 millisecond\"),\n Duration::milliseconds(9),\n \"\\\"9 millisecond\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_milliseconds() {\n check_ok(\n parse_duration(\"10 milliseconds\"),\n Duration::milliseconds(10),\n \"\\\"10 milliseconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_microsecond() {\n check_ok(\n parse_duration(\"11 microsecond\"),\n Duration::microseconds(11),\n \"\\\"11 microsecond\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_microseconds() {\n check_ok(\n parse_duration(\"12 microseconds\"),\n Duration::microseconds(12),\n \"\\\"12 microseconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_combined() {\n let expected =\n Duration::days(1) + Duration::hours(2) + Duration::minutes(3) + Duration::seconds(4);\n check_ok(\n parse_duration(\"1 day 2 hours 3 minutes 4 seconds\"),\n expected,\n \"\\\"1 day 2 hours 3 minutes 4 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_out_of_order() {\n check_ok(\n parse_duration(\"1 second 2 hours\"),\n Duration::hours(2) + Duration::seconds(1),\n \"\\\"1 second 2 hours\\\"\",\n );\n check_ok(\n parse_duration(\"7 minutes 6 hours 5 days\"),\n Duration::days(5) + Duration::hours(6) + Duration::minutes(7),\n \"\\\"7 minutes 6 hours 5 days\\\"\",\n )\n }\n\n #[test]\n fn test_human_zero_duration_seconds() {\n check_ok(\n parse_duration(\"0 seconds\"),\n Duration::zero(),\n \"\\\"0 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_zero_duration_days_hours() {\n check_ok(\n parse_duration(\"0 day 0 hour\"),\n Duration::zero(),\n \"\\\"0 day 0 hour\\\"\",\n );\n }\n\n #[test]\n fn test_human_zero_duration_multiple_zeros() {\n check_ok(\n parse_duration(\"0 days 0 hours 0 minutes 0 seconds\"),\n Duration::zero(),\n \"\\\"0 days 0 hours 0 minutes 0 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_no_space_between_num_unit() {\n check_err_contains(\n parse_duration(\"1day\"),\n \"Invalid human-readable duration format in: 1day\",\n \"\\\"1day\\\"\",\n );\n }\n\n #[test]\n fn test_human_trimmed() {\n check_ok(parse_duration(\" 1 day \"), Duration::days(1), \"\\\" 1 day \\\"\");\n }\n\n #[test]\n fn test_human_extra_whitespace() {\n check_ok(\n parse_duration(\" 1 day 2 hours \"),\n Duration::days(1) + Duration::hours(2),\n \"\\\" 1 day 2 hours \\\"\",\n );\n }\n\n #[test]\n fn test_human_negative_numbers() {\n check_ok(\n parse_duration(\"-1 day 2 hours\"),\n Duration::days(-1) + Duration::hours(2),\n \"\\\"-1 day 2 hours\\\"\",\n );\n check_ok(\n parse_duration(\"1 day -2 hours\"),\n Duration::days(1) + Duration::hours(-2),\n \"\\\"1 day -2 hours\\\"\",\n );\n }\n}\n"], ["/cocoindex/src/utils/yaml_ser.rs", "use base64::prelude::*;\nuse serde::ser::{self, Serialize};\nuse yaml_rust2::yaml::Yaml;\n\n#[derive(Debug)]\npub struct YamlSerializerError {\n msg: String,\n}\n\nimpl std::fmt::Display for YamlSerializerError {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"YamlSerializerError: {}\", self.msg)\n }\n}\n\nimpl std::error::Error for YamlSerializerError {}\n\nimpl ser::Error for YamlSerializerError {\n fn custom(msg: T) -> Self\n where\n T: std::fmt::Display,\n {\n YamlSerializerError {\n msg: format!(\"{msg}\"),\n }\n }\n}\n\npub struct YamlSerializer;\n\nimpl YamlSerializer {\n pub fn serialize(value: &T) -> Result\n where\n T: Serialize,\n {\n value.serialize(YamlSerializer)\n }\n}\n\nimpl ser::Serializer for YamlSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n type SerializeSeq = SeqSerializer;\n type SerializeTuple = SeqSerializer;\n type SerializeTupleStruct = SeqSerializer;\n type SerializeTupleVariant = VariantSeqSerializer;\n type SerializeMap = MapSerializer;\n type SerializeStruct = MapSerializer;\n type SerializeStructVariant = VariantMapSerializer;\n\n fn serialize_bool(self, v: bool) -> Result {\n Ok(Yaml::Boolean(v))\n }\n\n fn serialize_i8(self, v: i8) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i16(self, v: i16) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i32(self, v: i32) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i64(self, v: i64) -> Result {\n Ok(Yaml::Integer(v))\n }\n\n fn serialize_u8(self, v: u8) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u16(self, v: u16) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u32(self, v: u32) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u64(self, v: u64) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_f32(self, v: f32) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_f64(self, v: f64) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_char(self, v: char) -> Result {\n Ok(Yaml::String(v.to_string()))\n }\n\n fn serialize_str(self, v: &str) -> Result {\n Ok(Yaml::String(v.to_owned()))\n }\n\n fn serialize_bytes(self, v: &[u8]) -> Result {\n let encoded = BASE64_STANDARD.encode(v);\n Ok(Yaml::String(encoded))\n }\n\n fn serialize_none(self) -> Result {\n Ok(Yaml::Null)\n }\n\n fn serialize_some(self, value: &T) -> Result\n where\n T: Serialize + ?Sized,\n {\n value.serialize(self)\n }\n\n fn serialize_unit(self) -> Result {\n Ok(Yaml::Hash(Default::default()))\n }\n\n fn serialize_unit_struct(self, _name: &'static str) -> Result {\n Ok(Yaml::Hash(Default::default()))\n }\n\n fn serialize_unit_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n ) -> Result {\n Ok(Yaml::String(variant.to_owned()))\n }\n\n fn serialize_newtype_struct(\n self,\n _name: &'static str,\n value: &T,\n ) -> Result\n where\n T: Serialize + ?Sized,\n {\n value.serialize(self)\n }\n\n fn serialize_newtype_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n value: &T,\n ) -> Result\n where\n T: Serialize + ?Sized,\n {\n let mut hash = yaml_rust2::yaml::Hash::new();\n hash.insert(Yaml::String(variant.to_owned()), value.serialize(self)?);\n Ok(Yaml::Hash(hash))\n }\n\n fn serialize_seq(self, len: Option) -> Result {\n Ok(SeqSerializer {\n vec: Vec::with_capacity(len.unwrap_or(0)),\n })\n }\n\n fn serialize_tuple(self, len: usize) -> Result {\n self.serialize_seq(Some(len))\n }\n\n fn serialize_tuple_struct(\n self,\n _name: &'static str,\n len: usize,\n ) -> Result {\n self.serialize_seq(Some(len))\n }\n\n fn serialize_tuple_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n len: usize,\n ) -> Result {\n Ok(VariantSeqSerializer {\n variant_name: variant.to_owned(),\n vec: Vec::with_capacity(len),\n })\n }\n\n fn serialize_map(self, _len: Option) -> Result {\n Ok(MapSerializer {\n map: yaml_rust2::yaml::Hash::new(),\n next_key: None,\n })\n }\n\n fn serialize_struct(\n self,\n _name: &'static str,\n len: usize,\n ) -> Result {\n self.serialize_map(Some(len))\n }\n\n fn serialize_struct_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n Ok(VariantMapSerializer {\n variant_name: variant.to_owned(),\n map: yaml_rust2::yaml::Hash::new(),\n })\n }\n}\n\npub struct SeqSerializer {\n vec: Vec,\n}\n\nimpl ser::SerializeSeq for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.vec.push(value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n Ok(Yaml::Array(self.vec))\n }\n}\n\nimpl ser::SerializeTuple for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeSeq::serialize_element(self, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeSeq::end(self)\n }\n}\n\nimpl ser::SerializeTupleStruct for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeSeq::serialize_element(self, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeSeq::end(self)\n }\n}\n\npub struct MapSerializer {\n map: yaml_rust2::yaml::Hash,\n next_key: Option,\n}\n\nimpl ser::SerializeMap for MapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.next_key = Some(key.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n let key = self.next_key.take().unwrap();\n self.map.insert(key, value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n Ok(Yaml::Hash(self.map))\n }\n}\n\nimpl ser::SerializeStruct for MapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeMap::serialize_entry(self, key, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeMap::end(self)\n }\n}\n\npub struct VariantMapSerializer {\n variant_name: String,\n map: yaml_rust2::yaml::Hash,\n}\n\nimpl ser::SerializeStructVariant for VariantMapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.map.insert(\n Yaml::String(key.to_owned()),\n value.serialize(YamlSerializer)?,\n );\n Ok(())\n }\n\n fn end(self) -> Result {\n let mut outer_map = yaml_rust2::yaml::Hash::new();\n outer_map.insert(Yaml::String(self.variant_name), Yaml::Hash(self.map));\n Ok(Yaml::Hash(outer_map))\n }\n}\n\npub struct VariantSeqSerializer {\n variant_name: String,\n vec: Vec,\n}\n\nimpl ser::SerializeTupleVariant for VariantSeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.vec.push(value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n let mut map = yaml_rust2::yaml::Hash::new();\n map.insert(Yaml::String(self.variant_name), Yaml::Array(self.vec));\n Ok(Yaml::Hash(map))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use serde::ser::Error as SerdeSerError;\n use serde::{Serialize, Serializer};\n use std::collections::BTreeMap;\n use yaml_rust2::yaml::{Hash, Yaml};\n\n fn assert_yaml_serialization(value: T, expected_yaml: Yaml) {\n let result = YamlSerializer::serialize(&value);\n println!(\"Serialized value: {result:?}, Expected value: {expected_yaml:?}\");\n\n assert!(\n result.is_ok(),\n \"Serialization failed when it should have succeeded. Error: {:?}\",\n result.err()\n );\n assert_eq!(\n result.unwrap(),\n expected_yaml,\n \"Serialized YAML did not match expected YAML.\"\n );\n }\n\n #[test]\n fn test_serialize_bool() {\n assert_yaml_serialization(true, Yaml::Boolean(true));\n assert_yaml_serialization(false, Yaml::Boolean(false));\n }\n\n #[test]\n fn test_serialize_integers() {\n assert_yaml_serialization(42i8, Yaml::Integer(42));\n assert_yaml_serialization(-100i16, Yaml::Integer(-100));\n assert_yaml_serialization(123456i32, Yaml::Integer(123456));\n assert_yaml_serialization(7890123456789i64, Yaml::Integer(7890123456789));\n assert_yaml_serialization(255u8, Yaml::Integer(255));\n assert_yaml_serialization(65535u16, Yaml::Integer(65535));\n assert_yaml_serialization(4000000000u32, Yaml::Integer(4000000000));\n // u64 is serialized as Yaml::Real(String) in your implementation\n assert_yaml_serialization(\n 18446744073709551615u64,\n Yaml::Real(\"18446744073709551615\".to_string()),\n );\n }\n\n #[test]\n fn test_serialize_floats() {\n assert_yaml_serialization(3.14f32, Yaml::Real(\"3.14\".to_string()));\n assert_yaml_serialization(-0.001f64, Yaml::Real(\"-0.001\".to_string()));\n assert_yaml_serialization(1.0e10f64, Yaml::Real(\"10000000000\".to_string()));\n }\n\n #[test]\n fn test_serialize_char() {\n assert_yaml_serialization('X', Yaml::String(\"X\".to_string()));\n assert_yaml_serialization('✨', Yaml::String(\"✨\".to_string()));\n }\n\n #[test]\n fn test_serialize_str_and_string() {\n assert_yaml_serialization(\"hello YAML\", Yaml::String(\"hello YAML\".to_string()));\n assert_yaml_serialization(\"\".to_string(), Yaml::String(\"\".to_string()));\n }\n\n #[test]\n fn test_serialize_raw_bytes() {\n let bytes_slice: &[u8] = &[0x48, 0x65, 0x6c, 0x6c, 0x6f]; // \"Hello\"\n let expected = Yaml::Array(vec![\n Yaml::Integer(72),\n Yaml::Integer(101),\n Yaml::Integer(108),\n Yaml::Integer(108),\n Yaml::Integer(111),\n ]);\n assert_yaml_serialization(bytes_slice, expected.clone());\n\n let bytes_vec: Vec = bytes_slice.to_vec();\n assert_yaml_serialization(bytes_vec, expected);\n\n let empty_bytes_slice: &[u8] = &[];\n assert_yaml_serialization(empty_bytes_slice, Yaml::Array(vec![]));\n }\n\n struct MyBytesWrapper<'a>(&'a [u8]);\n\n impl<'a> Serialize for MyBytesWrapper<'a> {\n fn serialize(&self, serializer: S) -> Result\n where\n S: Serializer,\n {\n serializer.serialize_bytes(self.0)\n }\n }\n\n #[test]\n fn test_custom_wrapper_serializes_bytes_as_base64_string() {\n let data: &[u8] = &[72, 101, 108, 108, 111]; // \"Hello\"\n let wrapped_data = MyBytesWrapper(data);\n\n let base64_encoded = BASE64_STANDARD.encode(data);\n let expected_yaml = Yaml::String(base64_encoded);\n\n assert_yaml_serialization(wrapped_data, expected_yaml);\n\n let empty_data: &[u8] = &[];\n let wrapped_empty_data = MyBytesWrapper(empty_data);\n let empty_base64_encoded = BASE64_STANDARD.encode(empty_data);\n let expected_empty_yaml = Yaml::String(empty_base64_encoded);\n assert_yaml_serialization(wrapped_empty_data, expected_empty_yaml);\n }\n\n #[test]\n fn test_serialize_option() {\n let val_none: Option = None;\n assert_yaml_serialization(val_none, Yaml::Null);\n\n let val_some: Option = Some(\"has value\".to_string());\n assert_yaml_serialization(val_some, Yaml::String(\"has value\".to_string()));\n }\n\n #[test]\n fn test_serialize_unit() {\n assert_yaml_serialization((), Yaml::Hash(Hash::new()));\n }\n\n #[test]\n fn test_serialize_unit_struct() {\n #[derive(Serialize)]\n struct MyUnitStruct;\n\n assert_yaml_serialization(MyUnitStruct, Yaml::Hash(Hash::new()));\n }\n\n #[test]\n fn test_serialize_newtype_struct() {\n #[derive(Serialize)]\n struct MyNewtypeStruct(u64);\n\n assert_yaml_serialization(MyNewtypeStruct(12345u64), Yaml::Real(\"12345\".to_string()));\n }\n\n #[test]\n fn test_serialize_seq() {\n let empty_vec: Vec = vec![];\n assert_yaml_serialization(empty_vec, Yaml::Array(vec![]));\n\n let simple_vec = vec![10, 20, 30];\n assert_yaml_serialization(\n simple_vec,\n Yaml::Array(vec![\n Yaml::Integer(10),\n Yaml::Integer(20),\n Yaml::Integer(30),\n ]),\n );\n\n let string_vec = vec![\"a\".to_string(), \"b\".to_string()];\n assert_yaml_serialization(\n string_vec,\n Yaml::Array(vec![\n Yaml::String(\"a\".to_string()),\n Yaml::String(\"b\".to_string()),\n ]),\n );\n }\n\n #[test]\n fn test_serialize_tuple() {\n let tuple_val = (42i32, \"text\", false);\n assert_yaml_serialization(\n tuple_val,\n Yaml::Array(vec![\n Yaml::Integer(42),\n Yaml::String(\"text\".to_string()),\n Yaml::Boolean(false),\n ]),\n );\n }\n\n #[test]\n fn test_serialize_tuple_struct() {\n #[derive(Serialize)]\n struct MyTupleStruct(String, i64);\n\n assert_yaml_serialization(\n MyTupleStruct(\"value\".to_string(), -500),\n Yaml::Array(vec![Yaml::String(\"value\".to_string()), Yaml::Integer(-500)]),\n );\n }\n\n #[test]\n fn test_serialize_map() {\n let mut map = BTreeMap::new(); // BTreeMap for ordered keys, matching yaml::Hash\n map.insert(\"key1\".to_string(), 100);\n map.insert(\"key2\".to_string(), 200);\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"key1\".to_string()), Yaml::Integer(100));\n expected_hash.insert(Yaml::String(\"key2\".to_string()), Yaml::Integer(200));\n assert_yaml_serialization(map, Yaml::Hash(expected_hash));\n\n let empty_map: BTreeMap = BTreeMap::new();\n assert_yaml_serialization(empty_map, Yaml::Hash(Hash::new()));\n }\n\n #[derive(Serialize)]\n struct SimpleStruct {\n id: u32,\n name: String,\n is_active: bool,\n }\n\n #[test]\n fn test_serialize_struct() {\n let s = SimpleStruct {\n id: 101,\n name: \"A Struct\".to_string(),\n is_active: true,\n };\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"id\".to_string()), Yaml::Integer(101));\n expected_hash.insert(\n Yaml::String(\"name\".to_string()),\n Yaml::String(\"A Struct\".to_string()),\n );\n expected_hash.insert(Yaml::String(\"is_active\".to_string()), Yaml::Boolean(true));\n assert_yaml_serialization(s, Yaml::Hash(expected_hash));\n }\n\n #[derive(Serialize)]\n struct NestedStruct {\n description: String,\n data: SimpleStruct,\n tags: Vec,\n }\n\n #[test]\n fn test_serialize_nested_struct() {\n let ns = NestedStruct {\n description: \"Contains another struct and a vec\".to_string(),\n data: SimpleStruct {\n id: 202,\n name: \"Inner\".to_string(),\n is_active: false,\n },\n tags: vec![\"nested\".to_string(), \"complex\".to_string()],\n };\n\n let mut inner_struct_hash = Hash::new();\n inner_struct_hash.insert(Yaml::String(\"id\".to_string()), Yaml::Integer(202));\n inner_struct_hash.insert(\n Yaml::String(\"name\".to_string()),\n Yaml::String(\"Inner\".to_string()),\n );\n inner_struct_hash.insert(Yaml::String(\"is_active\".to_string()), Yaml::Boolean(false));\n\n let tags_array = Yaml::Array(vec![\n Yaml::String(\"nested\".to_string()),\n Yaml::String(\"complex\".to_string()),\n ]);\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(\n Yaml::String(\"description\".to_string()),\n Yaml::String(\"Contains another struct and a vec\".to_string()),\n );\n expected_hash.insert(\n Yaml::String(\"data\".to_string()),\n Yaml::Hash(inner_struct_hash),\n );\n expected_hash.insert(Yaml::String(\"tags\".to_string()), tags_array);\n\n assert_yaml_serialization(ns, Yaml::Hash(expected_hash));\n }\n\n #[derive(Serialize)]\n enum MyEnum {\n Unit,\n Newtype(i32),\n Tuple(String, bool),\n Struct { field_a: u16, field_b: char },\n }\n\n #[test]\n fn test_serialize_enum_unit_variant() {\n assert_yaml_serialization(MyEnum::Unit, Yaml::String(\"Unit\".to_string()));\n }\n\n #[test]\n fn test_serialize_enum_newtype_variant() {\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"Newtype\".to_string()), Yaml::Integer(999));\n assert_yaml_serialization(MyEnum::Newtype(999), Yaml::Hash(expected_hash));\n }\n\n #[test]\n fn test_serialize_enum_tuple_variant() {\n let mut expected_hash = Hash::new();\n let inner_array = Yaml::Array(vec![\n Yaml::String(\"tuple_data\".to_string()),\n Yaml::Boolean(true),\n ]);\n expected_hash.insert(Yaml::String(\"Tuple\".to_string()), inner_array);\n assert_yaml_serialization(\n MyEnum::Tuple(\"tuple_data\".to_string(), true),\n Yaml::Hash(expected_hash),\n );\n }\n\n #[test]\n fn test_serialize_enum_struct_variant() {\n let mut inner_struct_hash = Hash::new();\n inner_struct_hash.insert(Yaml::String(\"field_a\".to_string()), Yaml::Integer(123));\n inner_struct_hash.insert(\n Yaml::String(\"field_b\".to_string()),\n Yaml::String(\"Z\".to_string()),\n );\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(\n Yaml::String(\"Struct\".to_string()),\n Yaml::Hash(inner_struct_hash),\n );\n assert_yaml_serialization(\n MyEnum::Struct {\n field_a: 123,\n field_b: 'Z',\n },\n Yaml::Hash(expected_hash),\n );\n }\n\n #[test]\n fn test_yaml_serializer_error_display() {\n let error = YamlSerializerError {\n msg: \"A test error message\".to_string(),\n };\n assert_eq!(\n format!(\"{error}\"),\n \"YamlSerializerError: A test error message\"\n );\n }\n\n #[test]\n fn test_yaml_serializer_error_custom() {\n let error = YamlSerializerError::custom(\"Custom error detail\");\n assert_eq!(error.msg, \"Custom error detail\");\n assert_eq!(\n format!(\"{error}\"),\n \"YamlSerializerError: Custom error detail\"\n );\n let _err_trait_obj: Box = Box::new(error);\n }\n}\n"], ["/cocoindex/src/llm/voyage.rs", "use crate::prelude::*;\n\nuse crate::llm::{LlmEmbeddingClient, LlmEmbeddingRequest, LlmEmbeddingResponse};\nuse phf::phf_map;\n\nstatic DEFAULT_EMBEDDING_DIMENSIONS: phf::Map<&str, u32> = phf_map! {\n // Current models\n \"voyage-3-large\" => 1024,\n \"voyage-3.5\" => 1024,\n \"voyage-3.5-lite\" => 1024,\n \"voyage-code-3\" => 1024,\n \"voyage-finance-2\" => 1024,\n \"voyage-law-2\" => 1024,\n \"voyage-code-2\" => 1536,\n\n // Legacy models\n \"voyage-3\" => 1024,\n \"voyage-3-lite\" => 512,\n \"voyage-multilingual-2\" => 1024,\n \"voyage-large-2-instruct\" => 1024,\n \"voyage-large-2\" => 1536,\n \"voyage-2\" => 1024,\n \"voyage-lite-02-instruct\" => 1024,\n \"voyage-02\" => 1024,\n \"voyage-01\" => 1024,\n \"voyage-lite-01\" => 1024,\n \"voyage-lite-01-instruct\" => 1024,\n};\n\npub struct Client {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl Client {\n pub fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Voyage AI doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"VOYAGE_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"VOYAGE_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n#[derive(Deserialize)]\nstruct EmbeddingData {\n embedding: Vec,\n}\n\n#[derive(Deserialize)]\nstruct EmbedResponse {\n data: Vec,\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: LlmEmbeddingRequest<'req>,\n ) -> Result {\n let url = \"https://api.voyageai.com/v1/embeddings\";\n\n let mut payload = serde_json::json!({\n \"input\": request.text,\n \"model\": request.model,\n });\n\n if let Some(task_type) = request.task_type {\n payload[\"input_type\"] = serde_json::Value::String(task_type.into());\n }\n\n let resp = retryable::run(\n || {\n self.client\n .post(url)\n .header(\"Authorization\", format!(\"Bearer {}\", self.api_key))\n .json(&payload)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n\n if !resp.status().is_success() {\n bail!(\n \"Voyage AI API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n\n let embedding_resp: EmbedResponse = resp.json().await.context(\"Invalid JSON\")?;\n\n if embedding_resp.data.is_empty() {\n bail!(\"No embedding data in response\");\n }\n\n Ok(LlmEmbeddingResponse {\n embedding: embedding_resp.data[0].embedding.clone(),\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n DEFAULT_EMBEDDING_DIMENSIONS.get(model).copied()\n }\n}\n"], ["/cocoindex/src/llm/ollama.rs", "use crate::prelude::*;\n\nuse super::{LlmEmbeddingClient, LlmGenerationClient};\nuse schemars::schema::SchemaObject;\nuse serde_with::{base64::Base64, serde_as};\n\nfn get_embedding_dimension(model: &str) -> Option {\n match model.to_ascii_lowercase().as_str() {\n \"mxbai-embed-large\"\n | \"bge-m3\"\n | \"bge-large\"\n | \"snowflake-arctic-embed\"\n | \"snowflake-arctic-embed2\" => Some(1024),\n\n \"nomic-embed-text\"\n | \"paraphrase-multilingual\"\n | \"snowflake-arctic-embed:110m\"\n | \"snowflake-arctic-embed:137m\"\n | \"granite-embedding:278m\" => Some(768),\n\n \"all-minilm\"\n | \"snowflake-arctic-embed:22m\"\n | \"snowflake-arctic-embed:33m\"\n | \"granite-embedding\" => Some(384),\n\n _ => None,\n }\n}\n\npub struct Client {\n generate_url: String,\n embed_url: String,\n reqwest_client: reqwest::Client,\n}\n\n#[derive(Debug, Serialize)]\nenum OllamaFormat<'a> {\n #[serde(untagged)]\n JsonSchema(&'a SchemaObject),\n}\n\n#[serde_as]\n#[derive(Debug, Serialize)]\nstruct OllamaRequest<'a> {\n pub model: &'a str,\n pub prompt: &'a str,\n #[serde_as(as = \"Option>\")]\n pub images: Option>,\n pub format: Option>,\n pub system: Option<&'a str>,\n pub stream: Option,\n}\n\n#[derive(Debug, Deserialize)]\nstruct OllamaResponse {\n pub response: String,\n}\n\n#[derive(Debug, Serialize)]\nstruct OllamaEmbeddingRequest<'a> {\n pub model: &'a str,\n pub input: &'a str,\n}\n\n#[derive(Debug, Deserialize)]\nstruct OllamaEmbeddingResponse {\n pub embedding: Vec,\n}\n\nconst OLLAMA_DEFAULT_ADDRESS: &str = \"http://localhost:11434\";\n\nimpl Client {\n pub async fn new(address: Option) -> Result {\n let address = match &address {\n Some(addr) => addr.trim_end_matches('/'),\n None => OLLAMA_DEFAULT_ADDRESS,\n };\n Ok(Self {\n generate_url: format!(\"{address}/api/generate\"),\n embed_url: format!(\"{address}/api/embed\"),\n reqwest_client: reqwest::Client::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n let req = OllamaRequest {\n model: request.model,\n prompt: request.user_prompt.as_ref(),\n images: request.image.as_deref().map(|img| vec![img]),\n format: request.output_format.as_ref().map(\n |super::OutputFormat::JsonSchema { schema, .. }| {\n OllamaFormat::JsonSchema(schema.as_ref())\n },\n ),\n system: request.system_prompt.as_ref().map(|s| s.as_ref()),\n stream: Some(false),\n };\n let res = retryable::run(\n || {\n self.reqwest_client\n .post(self.generate_url.as_str())\n .json(&req)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !res.status().is_success() {\n bail!(\n \"Ollama API error: {:?}\\n{}\\n\",\n res.status(),\n res.text().await?\n );\n }\n let json: OllamaResponse = res.json().await?;\n Ok(super::LlmGenerateResponse {\n text: json.response,\n })\n }\n\n fn json_schema_options(&self) -> super::ToJsonSchemaOptions {\n super::ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: true,\n extract_descriptions: true,\n top_level_must_be_object: false,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let req = OllamaEmbeddingRequest {\n model: request.model,\n input: request.text.as_ref(),\n };\n let resp = retryable::run(\n || {\n self.reqwest_client\n .post(self.embed_url.as_str())\n .json(&req)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Ollama API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let embedding_resp: OllamaEmbeddingResponse = resp.json().await.context(\"Invalid JSON\")?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding_resp.embedding,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n"], ["/cocoindex/src/ops/sdk.rs", "pub(crate) use crate::prelude::*;\n\nuse crate::builder::plan::AnalyzedFieldReference;\nuse crate::builder::plan::AnalyzedLocalFieldReference;\nuse std::collections::BTreeMap;\n\npub use super::factory_bases::*;\npub use super::interface::*;\npub use crate::base::schema::*;\npub use crate::base::spec::*;\npub use crate::base::value::*;\n\n// Disambiguate the ExportTargetBuildOutput type.\npub use super::factory_bases::TypedExportDataCollectionBuildOutput;\npub use super::registry::ExecutorFactoryRegistry;\n/// Defined for all types convertible to ValueType, to ease creation for ValueType in various operation factories.\npub trait TypeCore {\n fn into_type(self) -> ValueType;\n}\n\nimpl TypeCore for BasicValueType {\n fn into_type(self) -> ValueType {\n ValueType::Basic(self)\n }\n}\n\nimpl TypeCore for StructSchema {\n fn into_type(self) -> ValueType {\n ValueType::Struct(self)\n }\n}\n\nimpl TypeCore for TableSchema {\n fn into_type(self) -> ValueType {\n ValueType::Table(self)\n }\n}\n\npub fn make_output_type(value_type: Type) -> EnrichedValueType {\n EnrichedValueType {\n typ: value_type.into_type(),\n attrs: Default::default(),\n nullable: false,\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct EmptySpec {}\n\n#[macro_export]\nmacro_rules! fields_value {\n ($($field:expr), +) => {\n $crate::base::value::FieldValues { fields: std::vec![ $(($field).into()),+ ] }\n };\n}\n\npub struct SchemaBuilderFieldRef(AnalyzedLocalFieldReference);\n\nimpl SchemaBuilderFieldRef {\n pub fn to_field_ref(&self) -> AnalyzedFieldReference {\n AnalyzedFieldReference {\n local: self.0.clone(),\n scope_up_level: 0,\n }\n }\n}\npub struct StructSchemaBuilder<'a> {\n base_fields_idx: Vec,\n target: &'a mut StructSchema,\n}\n\nimpl<'a> StructSchemaBuilder<'a> {\n pub fn new(target: &'a mut StructSchema) -> Self {\n Self {\n base_fields_idx: Vec::new(),\n target,\n }\n }\n\n pub fn _set_description(&mut self, description: impl Into>) {\n self.target.description = Some(description.into());\n }\n\n pub fn add_field(&mut self, field_schema: FieldSchema) -> SchemaBuilderFieldRef {\n let current_idx = self.target.fields.len() as u32;\n Arc::make_mut(&mut self.target.fields).push(field_schema);\n let mut fields_idx = self.base_fields_idx.clone();\n fields_idx.push(current_idx);\n SchemaBuilderFieldRef(AnalyzedLocalFieldReference { fields_idx })\n }\n\n pub fn _add_struct_field(\n &mut self,\n name: impl Into,\n nullable: bool,\n attrs: Arc>,\n ) -> (StructSchemaBuilder<'_>, SchemaBuilderFieldRef) {\n let field_schema = FieldSchema::new(\n name.into(),\n EnrichedValueType {\n typ: ValueType::Struct(StructSchema {\n fields: Arc::new(Vec::new()),\n description: None,\n }),\n nullable,\n attrs,\n },\n );\n let local_ref = self.add_field(field_schema);\n let struct_schema = match &mut Arc::make_mut(&mut self.target.fields)\n .last_mut()\n .unwrap()\n .value_type\n .typ\n {\n ValueType::Struct(s) => s,\n _ => unreachable!(),\n };\n (\n StructSchemaBuilder {\n base_fields_idx: local_ref.0.fields_idx.clone(),\n target: struct_schema,\n },\n local_ref,\n )\n }\n}\n"], ["/cocoindex/src/llm/openai.rs", "use crate::api_bail;\n\nuse super::{LlmEmbeddingClient, LlmGenerationClient, detect_image_mime_type};\nuse anyhow::Result;\nuse async_openai::{\n Client as OpenAIClient,\n config::OpenAIConfig,\n types::{\n ChatCompletionRequestMessage, ChatCompletionRequestMessageContentPartImage,\n ChatCompletionRequestMessageContentPartText, ChatCompletionRequestSystemMessage,\n ChatCompletionRequestSystemMessageContent, ChatCompletionRequestUserMessage,\n ChatCompletionRequestUserMessageContent, ChatCompletionRequestUserMessageContentPart,\n CreateChatCompletionRequest, CreateEmbeddingRequest, EmbeddingInput, ImageDetail,\n ResponseFormat, ResponseFormatJsonSchema,\n },\n};\nuse async_trait::async_trait;\nuse base64::prelude::*;\nuse phf::phf_map;\n\nstatic DEFAULT_EMBEDDING_DIMENSIONS: phf::Map<&str, u32> = phf_map! {\n \"text-embedding-3-small\" => 1536,\n \"text-embedding-3-large\" => 3072,\n \"text-embedding-ada-002\" => 1536,\n};\n\npub struct Client {\n client: async_openai::Client,\n}\n\nimpl Client {\n pub(crate) fn from_parts(client: async_openai::Client) -> Self {\n Self { client }\n }\n\n pub fn new(address: Option) -> Result {\n if let Some(address) = address {\n api_bail!(\"OpenAI doesn't support custom API address: {address}\");\n }\n // Verify API key is set\n if std::env::var(\"OPENAI_API_KEY\").is_err() {\n api_bail!(\"OPENAI_API_KEY environment variable must be set\");\n }\n Ok(Self {\n // OpenAI client will use OPENAI_API_KEY env variable by default\n client: OpenAIClient::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n let mut messages = Vec::new();\n\n // Add system prompt if provided\n if let Some(system) = request.system_prompt {\n messages.push(ChatCompletionRequestMessage::System(\n ChatCompletionRequestSystemMessage {\n content: ChatCompletionRequestSystemMessageContent::Text(system.into_owned()),\n ..Default::default()\n },\n ));\n }\n\n // Add user message\n let user_message_content = match request.image {\n Some(img_bytes) => {\n let base64_image = BASE64_STANDARD.encode(img_bytes.as_ref());\n let mime_type = detect_image_mime_type(img_bytes.as_ref())?;\n let image_url = format!(\"data:{mime_type};base64,{base64_image}\");\n ChatCompletionRequestUserMessageContent::Array(vec![\n ChatCompletionRequestUserMessageContentPart::Text(\n ChatCompletionRequestMessageContentPartText {\n text: request.user_prompt.into_owned(),\n },\n ),\n ChatCompletionRequestUserMessageContentPart::ImageUrl(\n ChatCompletionRequestMessageContentPartImage {\n image_url: async_openai::types::ImageUrl {\n url: image_url,\n detail: Some(ImageDetail::Auto),\n },\n },\n ),\n ])\n }\n None => ChatCompletionRequestUserMessageContent::Text(request.user_prompt.into_owned()),\n };\n messages.push(ChatCompletionRequestMessage::User(\n ChatCompletionRequestUserMessage {\n content: user_message_content,\n ..Default::default()\n },\n ));\n\n // Create the chat completion request\n let request = CreateChatCompletionRequest {\n model: request.model.to_string(),\n messages,\n response_format: match request.output_format {\n Some(super::OutputFormat::JsonSchema { name, schema }) => {\n Some(ResponseFormat::JsonSchema {\n json_schema: ResponseFormatJsonSchema {\n name: name.into_owned(),\n description: None,\n schema: Some(serde_json::to_value(&schema)?),\n strict: Some(true),\n },\n })\n }\n None => None,\n },\n ..Default::default()\n };\n\n // Send request and get response\n let response = self.client.chat().create(request).await?;\n\n // Extract the response text from the first choice\n let text = response\n .choices\n .into_iter()\n .next()\n .and_then(|choice| choice.message.content)\n .ok_or_else(|| anyhow::anyhow!(\"No response from OpenAI\"))?;\n\n Ok(super::LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> super::ToJsonSchemaOptions {\n super::ToJsonSchemaOptions {\n fields_always_required: true,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let response = self\n .client\n .embeddings()\n .create(CreateEmbeddingRequest {\n model: request.model.to_string(),\n input: EmbeddingInput::String(request.text.to_string()),\n dimensions: request.output_dimension,\n ..Default::default()\n })\n .await?;\n Ok(super::LlmEmbeddingResponse {\n embedding: response\n .data\n .into_iter()\n .next()\n .ok_or_else(|| anyhow::anyhow!(\"No embedding returned from OpenAI\"))?\n .embedding,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n DEFAULT_EMBEDDING_DIMENSIONS.get(model).copied()\n }\n}\n"], ["/cocoindex/src/utils/concur_control.rs", "use crate::prelude::*;\n\nuse tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore};\n\nstruct WeightedSemaphore {\n downscale_factor: u8,\n downscaled_quota: u32,\n sem: Arc,\n}\n\nimpl WeightedSemaphore {\n pub fn new(quota: usize) -> Self {\n let mut downscale_factor = 0;\n let mut downscaled_quota = quota;\n while downscaled_quota > u32::MAX as usize {\n downscaled_quota >>= 1;\n downscale_factor += 1;\n }\n let sem = Arc::new(Semaphore::new(downscaled_quota));\n Self {\n downscaled_quota: downscaled_quota as u32,\n downscale_factor,\n sem,\n }\n }\n\n async fn acquire_reservation(&self) -> Result {\n self.sem.clone().acquire_owned().await\n }\n\n async fn acquire(\n &self,\n weight: usize,\n reserved: bool,\n ) -> Result, AcquireError> {\n let downscaled_weight = (weight >> self.downscale_factor) as u32;\n let capped_weight = downscaled_weight.min(self.downscaled_quota);\n let reserved_weight = if reserved { 1 } else { 0 };\n if reserved_weight >= capped_weight {\n return Ok(None);\n }\n Ok(Some(\n self.sem\n .clone()\n .acquire_many_owned(capped_weight - reserved_weight)\n .await?,\n ))\n }\n}\n\npub struct Options {\n pub max_inflight_rows: Option,\n pub max_inflight_bytes: Option,\n}\n\npub struct ConcurrencyControllerPermit {\n _inflight_count_permit: Option,\n _inflight_bytes_permit: Option,\n}\n\npub struct ConcurrencyController {\n inflight_count_sem: Option>,\n inflight_bytes_sem: Option,\n}\n\npub static BYTES_UNKNOWN_YET: Option usize> = None;\n\nimpl ConcurrencyController {\n pub fn new(exec_options: &Options) -> Self {\n Self {\n inflight_count_sem: exec_options\n .max_inflight_rows\n .map(|max| Arc::new(Semaphore::new(max))),\n inflight_bytes_sem: exec_options.max_inflight_bytes.map(WeightedSemaphore::new),\n }\n }\n\n /// If `bytes_fn` is `None`, it means the number of bytes is not known yet.\n /// The controller will reserve a minimum number of bytes.\n /// The caller should call `acquire_bytes_with_reservation` with the actual number of bytes later.\n pub async fn acquire(\n &self,\n bytes_fn: Option usize>,\n ) -> Result {\n let inflight_count_permit = if let Some(sem) = &self.inflight_count_sem {\n Some(sem.clone().acquire_owned().await?)\n } else {\n None\n };\n let inflight_bytes_permit = if let Some(sem) = &self.inflight_bytes_sem {\n if let Some(bytes_fn) = bytes_fn {\n sem.acquire(bytes_fn(), false).await?\n } else {\n Some(sem.acquire_reservation().await?)\n }\n } else {\n None\n };\n Ok(ConcurrencyControllerPermit {\n _inflight_count_permit: inflight_count_permit,\n _inflight_bytes_permit: inflight_bytes_permit,\n })\n }\n\n pub async fn acquire_bytes_with_reservation(\n &self,\n bytes_fn: impl FnOnce() -> usize,\n ) -> Result, AcquireError> {\n if let Some(sem) = &self.inflight_bytes_sem {\n sem.acquire(bytes_fn(), true).await\n } else {\n Ok(None)\n }\n }\n}\n\npub struct CombinedConcurrencyControllerPermit {\n _permit: ConcurrencyControllerPermit,\n _global_permit: ConcurrencyControllerPermit,\n}\n\npub struct CombinedConcurrencyController {\n controller: ConcurrencyController,\n global_controller: Arc,\n needs_num_bytes: bool,\n}\n\nimpl CombinedConcurrencyController {\n pub fn new(exec_options: &Options, global_controller: Arc) -> Self {\n Self {\n controller: ConcurrencyController::new(exec_options),\n needs_num_bytes: exec_options.max_inflight_bytes.is_some()\n || global_controller.inflight_bytes_sem.is_some(),\n global_controller,\n }\n }\n\n pub async fn acquire(\n &self,\n bytes_fn: Option usize>,\n ) -> Result {\n let num_bytes_fn = if let Some(bytes_fn) = bytes_fn\n && self.needs_num_bytes\n {\n let num_bytes = bytes_fn();\n Some(move || num_bytes)\n } else {\n None\n };\n\n let permit = self.controller.acquire(num_bytes_fn).await?;\n let global_permit = self.global_controller.acquire(num_bytes_fn).await?;\n Ok(CombinedConcurrencyControllerPermit {\n _permit: permit,\n _global_permit: global_permit,\n })\n }\n\n pub async fn acquire_bytes_with_reservation(\n &self,\n bytes_fn: impl FnOnce() -> usize,\n ) -> Result<(Option, Option), AcquireError> {\n let num_bytes = bytes_fn();\n let permit = self\n .controller\n .acquire_bytes_with_reservation(move || num_bytes)\n .await?;\n let global_permit = self\n .global_controller\n .acquire_bytes_with_reservation(move || num_bytes)\n .await?;\n Ok((permit, global_permit))\n }\n}\n"], ["/cocoindex/src/server.rs", "use crate::prelude::*;\n\nuse crate::{lib_context::LibContext, service};\nuse axum::{Router, routing};\nuse tower::ServiceBuilder;\nuse tower_http::{\n cors::{AllowOrigin, CorsLayer},\n trace::TraceLayer,\n};\n\n#[derive(Deserialize, Debug)]\npub struct ServerSettings {\n pub address: String,\n #[serde(default)]\n pub cors_origins: Vec,\n}\n\n/// Initialize the server and return a future that will actually handle requests.\npub async fn init_server(\n lib_context: Arc,\n settings: ServerSettings,\n) -> Result> {\n let mut cors = CorsLayer::default();\n if !settings.cors_origins.is_empty() {\n let origins: Vec<_> = settings\n .cors_origins\n .iter()\n .map(|origin| origin.parse())\n .collect::>()?;\n cors = cors\n .allow_origin(AllowOrigin::list(origins))\n .allow_methods([\n axum::http::Method::GET,\n axum::http::Method::POST,\n axum::http::Method::DELETE,\n ])\n .allow_headers([axum::http::header::CONTENT_TYPE]);\n }\n let app = Router::new()\n .route(\n \"/cocoindex\",\n routing::get(|| async { \"CocoIndex is running!\" }),\n )\n .nest(\n \"/cocoindex/api\",\n Router::new()\n .route(\"/flows\", routing::get(service::flows::list_flows))\n .route(\n \"/flows/{flowInstName}\",\n routing::get(service::flows::get_flow),\n )\n .route(\n \"/flows/{flowInstName}/schema\",\n routing::get(service::flows::get_flow_schema),\n )\n .route(\n \"/flows/{flowInstName}/keys\",\n routing::get(service::flows::get_keys),\n )\n .route(\n \"/flows/{flowInstName}/data\",\n routing::get(service::flows::evaluate_data),\n )\n .route(\n \"/flows/{flowInstName}/rowStatus\",\n routing::get(service::flows::get_row_indexing_status),\n )\n .route(\n \"/flows/{flowInstName}/update\",\n routing::post(service::flows::update),\n )\n .layer(\n ServiceBuilder::new()\n .layer(TraceLayer::new_for_http())\n .layer(cors),\n )\n .with_state(lib_context.clone()),\n );\n\n let listener = tokio::net::TcpListener::bind(&settings.address)\n .await\n .context(format!(\"Failed to bind to address: {}\", settings.address))?;\n\n println!(\n \"Server running at http://{}/cocoindex\",\n listener.local_addr()?\n );\n let serve_fut = async { axum::serve(listener, app).await.unwrap() };\n Ok(serve_fut.boxed())\n}\n"], ["/cocoindex/src/utils/retryable.rs", "use log::trace;\nuse std::{future::Future, time::Duration};\n\npub trait IsRetryable {\n fn is_retryable(&self) -> bool;\n}\n\npub struct Error {\n error: anyhow::Error,\n is_retryable: bool,\n}\n\nimpl std::fmt::Display for Error {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Display::fmt(&self.error, f)\n }\n}\n\nimpl std::fmt::Debug for Error {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Debug::fmt(&self.error, f)\n }\n}\n\nimpl IsRetryable for Error {\n fn is_retryable(&self) -> bool {\n self.is_retryable\n }\n}\n\nimpl IsRetryable for reqwest::Error {\n fn is_retryable(&self) -> bool {\n self.status() == Some(reqwest::StatusCode::TOO_MANY_REQUESTS)\n }\n}\n\nimpl Error {\n pub fn always_retryable(error: anyhow::Error) -> Self {\n Self {\n error,\n is_retryable: true,\n }\n }\n}\n\nimpl From for Error {\n fn from(error: anyhow::Error) -> Self {\n Self {\n error,\n is_retryable: false,\n }\n }\n}\n\nimpl From for anyhow::Error {\n fn from(val: Error) -> Self {\n val.error\n }\n}\n\nimpl From for Error {\n fn from(error: E) -> Self {\n Self {\n is_retryable: error.is_retryable(),\n error: anyhow::Error::new(error),\n }\n }\n}\n\npub type Result = std::result::Result;\n\n#[allow(non_snake_case)]\npub fn Ok(value: T) -> Result {\n Result::Ok(value)\n}\n\npub struct RetryOptions {\n pub max_retries: Option,\n pub initial_backoff: Duration,\n pub max_backoff: Duration,\n}\n\nimpl Default for RetryOptions {\n fn default() -> Self {\n Self {\n max_retries: Some(10),\n initial_backoff: Duration::from_millis(100),\n max_backoff: Duration::from_secs(10),\n }\n }\n}\n\npub static HEAVY_LOADED_OPTIONS: RetryOptions = RetryOptions {\n max_retries: Some(10),\n initial_backoff: Duration::from_secs(1),\n max_backoff: Duration::from_secs(60),\n};\n\npub async fn run<\n Ok,\n Err: std::fmt::Display + IsRetryable,\n Fut: Future>,\n F: Fn() -> Fut,\n>(\n f: F,\n options: &RetryOptions,\n) -> Result {\n let mut retries = 0;\n let mut backoff = options.initial_backoff;\n\n loop {\n match f().await {\n Result::Ok(result) => return Result::Ok(result),\n Result::Err(err) => {\n if !err.is_retryable()\n || options\n .max_retries\n .is_some_and(|max_retries| retries >= max_retries)\n {\n return Result::Err(err);\n }\n retries += 1;\n trace!(\n \"Will retry #{} in {}ms for error: {}\",\n retries,\n backoff.as_millis(),\n err\n );\n tokio::time::sleep(backoff).await;\n if backoff < options.max_backoff {\n backoff = std::cmp::min(\n Duration::from_micros(\n (backoff.as_micros() * rand::random_range(1618..=2000) / 1000) as u64,\n ),\n options.max_backoff,\n );\n }\n }\n }\n }\n}\n"], ["/cocoindex/src/ops/functions/parse_json.rs", "use crate::ops::sdk::*;\nuse anyhow::Result;\nuse std::collections::HashMap;\nuse std::sync::{Arc, LazyLock};\nuse unicase::UniCase;\n\npub struct Args {\n text: ResolvedOpArg,\n language: Option,\n}\n\ntype ParseFn = fn(&str) -> Result;\nstruct LanguageConfig {\n parse_fn: ParseFn,\n}\n\nfn add_language(\n output: &mut HashMap, Arc>,\n name: &'static str,\n aliases: impl IntoIterator,\n parse_fn: ParseFn,\n) {\n let lang_config = Arc::new(LanguageConfig { parse_fn });\n for name in std::iter::once(name).chain(aliases.into_iter()) {\n if output.insert(name.into(), lang_config.clone()).is_some() {\n panic!(\"Language `{name}` already exists\");\n }\n }\n}\n\nfn parse_json(text: &str) -> Result {\n Ok(serde_json::from_str(text)?)\n}\n\nstatic PARSE_FN_BY_LANG: LazyLock, Arc>> =\n LazyLock::new(|| {\n let mut map = HashMap::new();\n add_language(&mut map, \"json\", [\".json\"], parse_json);\n map\n });\n\nstruct Executor {\n args: Args,\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n async fn evaluate(&self, input: Vec) -> Result {\n let text = self.args.text.value(&input)?.as_str()?;\n let lang_config = {\n let language = self.args.language.value(&input)?;\n language\n .optional()\n .map(|v| anyhow::Ok(v.as_str()?.as_ref()))\n .transpose()?\n .and_then(|lang| PARSE_FN_BY_LANG.get(&UniCase::new(lang)))\n };\n let parse_fn = lang_config.map(|c| c.parse_fn).unwrap_or(parse_json);\n let parsed_value = parse_fn(text)?;\n Ok(value::Value::Basic(value::BasicValue::Json(Arc::new(\n parsed_value,\n ))))\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = EmptySpec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"ParseJson\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n _spec: &'a EmptySpec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n language: args_resolver\n .next_optional_arg(\"language\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n };\n\n let output_schema = make_output_type(BasicValueType::Json);\n Ok((args, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n _spec: EmptySpec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor { args }))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n use serde_json::json;\n\n #[tokio::test]\n async fn test_parse_json() {\n let spec = EmptySpec {};\n\n let factory = Arc::new(Factory);\n let json_string_content = r#\"{\"city\": \"Magdeburg\"}\"#;\n let lang_value: Value = \"json\".to_string().into();\n\n let input_args_values = vec![json_string_content.to_string().into(), lang_value.clone()];\n\n let input_arg_schemas = vec![\n build_arg_schema(\"text\", BasicValueType::Str),\n build_arg_schema(\"language\", BasicValueType::Str),\n ];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed: {:?}\",\n result.err()\n );\n let value = result.unwrap();\n\n match value {\n Value::Basic(BasicValue::Json(arc_json_value)) => {\n let expected_json = json!({\"city\": \"Magdeburg\"});\n assert_eq!(\n *arc_json_value, expected_json,\n \"Parsed JSON value mismatch with specified language\"\n );\n }\n _ => panic!(\"Expected Value::Basic(BasicValue::Json), got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/ops/functions/test_utils.rs", "use crate::builder::plan::{\n AnalyzedFieldReference, AnalyzedLocalFieldReference, AnalyzedValueMapping,\n};\nuse crate::ops::sdk::{\n AuthRegistry, BasicValueType, EnrichedValueType, FlowInstanceContext, OpArgSchema,\n OpArgsResolver, SimpleFunctionExecutor, SimpleFunctionFactoryBase, Value, make_output_type,\n};\nuse anyhow::Result;\nuse serde::de::DeserializeOwned;\nuse std::sync::Arc;\n\n// This function builds an argument schema for a flow function.\npub fn build_arg_schema(\n name: &str,\n value_type: BasicValueType,\n) -> (Option<&str>, EnrichedValueType) {\n (Some(name), make_output_type(value_type))\n}\n\n// This function tests a flow function by providing a spec, input argument schemas, and values.\npub async fn test_flow_function(\n factory: Arc,\n spec: S,\n input_arg_schemas: Vec<(Option<&str>, EnrichedValueType)>,\n input_arg_values: Vec,\n) -> Result\nwhere\n S: DeserializeOwned + Send + Sync + 'static,\n R: Send + Sync + 'static,\n F: SimpleFunctionFactoryBase + ?Sized,\n{\n // 1. Construct OpArgSchema\n let op_arg_schemas: Vec = input_arg_schemas\n .into_iter()\n .enumerate()\n .map(|(idx, (name, value_type))| OpArgSchema {\n name: name.map_or(crate::base::spec::OpArgName(None), |n| {\n crate::base::spec::OpArgName(Some(n.to_string()))\n }),\n value_type,\n analyzed_value: AnalyzedValueMapping::Field(AnalyzedFieldReference {\n local: AnalyzedLocalFieldReference {\n fields_idx: vec![idx as u32],\n },\n scope_up_level: 0,\n }),\n })\n .collect();\n\n // 2. Resolve Schema & Args\n let mut args_resolver = OpArgsResolver::new(&op_arg_schemas)?;\n let context = Arc::new(FlowInstanceContext {\n flow_instance_name: \"test_flow_function\".to_string(),\n auth_registry: Arc::new(AuthRegistry::default()),\n py_exec_ctx: None,\n });\n\n let (resolved_args_from_schema, _output_schema): (R, EnrichedValueType) = factory\n .resolve_schema(&spec, &mut args_resolver, &context)\n .await?;\n\n args_resolver.done()?;\n\n // 3. Build Executor\n let executor: Box = factory\n .build_executor(spec, resolved_args_from_schema, Arc::clone(&context))\n .await?;\n\n // 4. Evaluate\n let result = executor.evaluate(input_arg_values).await?;\n\n Ok(result)\n}\n"], ["/cocoindex/src/builder/analyzed_flow.rs", "use crate::{ops::interface::FlowInstanceContext, prelude::*};\n\nuse super::{analyzer, plan};\nuse crate::service::error::{SharedError, SharedResultExt, shared_ok};\n\npub struct AnalyzedFlow {\n pub flow_instance: spec::FlowInstanceSpec,\n pub data_schema: schema::FlowSchema,\n pub setup_state: exec_ctx::AnalyzedSetupState,\n\n pub flow_instance_ctx: Arc,\n\n /// It's None if the flow is not up to date\n pub execution_plan: Shared, SharedError>>>,\n}\n\nimpl AnalyzedFlow {\n pub async fn from_flow_instance(\n flow_instance: crate::base::spec::FlowInstanceSpec,\n flow_instance_ctx: Arc,\n ) -> Result {\n let (data_schema, setup_state, execution_plan_fut) =\n analyzer::analyze_flow(&flow_instance, flow_instance_ctx.clone()).await?;\n let execution_plan = async move {\n shared_ok(Arc::new(\n execution_plan_fut.await.map_err(SharedError::new)?,\n ))\n }\n .boxed()\n .shared();\n let result = Self {\n flow_instance,\n data_schema,\n setup_state,\n flow_instance_ctx,\n execution_plan,\n };\n Ok(result)\n }\n\n pub async fn get_execution_plan(&self) -> Result> {\n let execution_plan = self.execution_plan.clone().await.std_result()?;\n Ok(execution_plan)\n }\n}\n\npub struct AnalyzedTransientFlow {\n pub transient_flow_instance: spec::TransientFlowSpec,\n pub data_schema: schema::FlowSchema,\n pub execution_plan: plan::TransientExecutionPlan,\n pub output_type: schema::EnrichedValueType,\n}\n\nimpl AnalyzedTransientFlow {\n pub async fn from_transient_flow(\n transient_flow: spec::TransientFlowSpec,\n py_exec_ctx: Option,\n ) -> Result {\n let ctx = analyzer::build_flow_instance_context(&transient_flow.name, py_exec_ctx);\n let (output_type, data_schema, execution_plan_fut) =\n analyzer::analyze_transient_flow(&transient_flow, ctx).await?;\n Ok(Self {\n transient_flow_instance: transient_flow,\n data_schema,\n execution_plan: execution_plan_fut.await?,\n output_type,\n })\n }\n}\n"], ["/cocoindex/src/utils/fingerprint.rs", "use anyhow::bail;\nuse base64::prelude::*;\nuse blake2::digest::typenum;\nuse blake2::{Blake2b, Digest};\nuse serde::Deserialize;\nuse serde::ser::{\n Serialize, SerializeMap, SerializeSeq, SerializeStruct, SerializeStructVariant, SerializeTuple,\n SerializeTupleStruct, SerializeTupleVariant, Serializer,\n};\n\n#[derive(Debug)]\npub struct FingerprinterError {\n msg: String,\n}\n\nimpl std::fmt::Display for FingerprinterError {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"FingerprinterError: {}\", self.msg)\n }\n}\nimpl std::error::Error for FingerprinterError {}\nimpl serde::ser::Error for FingerprinterError {\n fn custom(msg: T) -> Self\n where\n T: std::fmt::Display,\n {\n FingerprinterError {\n msg: format!(\"{msg}\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\npub struct Fingerprint(pub [u8; 16]);\n\nimpl Fingerprint {\n pub fn to_base64(self) -> String {\n BASE64_STANDARD.encode(self.0)\n }\n\n pub fn from_base64(s: &str) -> anyhow::Result {\n let bytes = match s.len() {\n 24 => BASE64_STANDARD.decode(s)?,\n\n // For backward compatibility. Some old version (<= v0.1.2) is using hex encoding.\n 32 => hex::decode(s)?,\n _ => bail!(\"Encoded fingerprint length is unexpected: {}\", s.len()),\n };\n match bytes.try_into() {\n Ok(bytes) => Ok(Fingerprint(bytes)),\n Err(e) => bail!(\"Fingerprint bytes length is unexpected: {}\", e.len()),\n }\n }\n}\n\nimpl Serialize for Fingerprint {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n serializer.serialize_str(&self.to_base64())\n }\n}\n\nimpl<'de> Deserialize<'de> for Fingerprint {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let s = String::deserialize(deserializer)?;\n Self::from_base64(&s).map_err(serde::de::Error::custom)\n }\n}\n#[derive(Clone, Default)]\npub struct Fingerprinter {\n hasher: Blake2b,\n}\n\nimpl Fingerprinter {\n pub fn into_fingerprint(self) -> Fingerprint {\n Fingerprint(self.hasher.finalize().into())\n }\n\n pub fn with(self, value: &S) -> Result {\n let mut fingerprinter = self;\n value.serialize(&mut fingerprinter)?;\n Ok(fingerprinter)\n }\n\n pub fn write(&mut self, value: &S) -> Result<(), FingerprinterError> {\n value.serialize(self)\n }\n\n fn write_type_tag(&mut self, tag: &str) {\n self.hasher.update(tag.as_bytes());\n self.hasher.update(b\";\");\n }\n\n fn write_end_tag(&mut self) {\n self.hasher.update(b\".\");\n }\n\n fn write_varlen_bytes(&mut self, bytes: &[u8]) {\n self.write_usize(bytes.len());\n self.hasher.update(bytes);\n }\n\n fn write_usize(&mut self, value: usize) {\n self.hasher.update((value as u32).to_le_bytes());\n }\n}\n\nimpl Serializer for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n type SerializeSeq = Self;\n type SerializeTuple = Self;\n type SerializeTupleStruct = Self;\n type SerializeTupleVariant = Self;\n type SerializeMap = Self;\n type SerializeStruct = Self;\n type SerializeStructVariant = Self;\n\n fn serialize_bool(self, v: bool) -> Result<(), Self::Error> {\n self.write_type_tag(if v { \"t\" } else { \"f\" });\n Ok(())\n }\n\n fn serialize_i8(self, v: i8) -> Result<(), Self::Error> {\n self.write_type_tag(\"i1\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i16(self, v: i16) -> Result<(), Self::Error> {\n self.write_type_tag(\"i2\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i32(self, v: i32) -> Result<(), Self::Error> {\n self.write_type_tag(\"i4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i64(self, v: i64) -> Result<(), Self::Error> {\n self.write_type_tag(\"i8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u8(self, v: u8) -> Result<(), Self::Error> {\n self.write_type_tag(\"u1\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u16(self, v: u16) -> Result<(), Self::Error> {\n self.write_type_tag(\"u2\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u32(self, v: u32) -> Result<(), Self::Error> {\n self.write_type_tag(\"u4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u64(self, v: u64) -> Result<(), Self::Error> {\n self.write_type_tag(\"u8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_f32(self, v: f32) -> Result<(), Self::Error> {\n self.write_type_tag(\"f4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_f64(self, v: f64) -> Result<(), Self::Error> {\n self.write_type_tag(\"f8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_char(self, v: char) -> Result<(), Self::Error> {\n self.write_type_tag(\"c\");\n self.write_usize(v as usize);\n Ok(())\n }\n\n fn serialize_str(self, v: &str) -> Result<(), Self::Error> {\n self.write_type_tag(\"s\");\n self.write_varlen_bytes(v.as_bytes());\n Ok(())\n }\n\n fn serialize_bytes(self, v: &[u8]) -> Result<(), Self::Error> {\n self.write_type_tag(\"b\");\n self.write_varlen_bytes(v);\n Ok(())\n }\n\n fn serialize_none(self) -> Result<(), Self::Error> {\n self.write_type_tag(\"\");\n Ok(())\n }\n\n fn serialize_some(self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(self)\n }\n\n fn serialize_unit(self) -> Result<(), Self::Error> {\n self.write_type_tag(\"()\");\n Ok(())\n }\n\n fn serialize_unit_struct(self, name: &'static str) -> Result<(), Self::Error> {\n self.write_type_tag(\"US\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(())\n }\n\n fn serialize_unit_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n ) -> Result<(), Self::Error> {\n self.write_type_tag(\"UV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(())\n }\n\n fn serialize_newtype_struct(self, name: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.write_type_tag(\"NS\");\n self.write_varlen_bytes(name.as_bytes());\n value.serialize(self)\n }\n\n fn serialize_newtype_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n value: &T,\n ) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.write_type_tag(\"NV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n value.serialize(self)\n }\n\n fn serialize_seq(self, _len: Option) -> Result {\n self.write_type_tag(\"L\");\n Ok(self)\n }\n\n fn serialize_tuple(self, _len: usize) -> Result {\n self.write_type_tag(\"T\");\n Ok(self)\n }\n\n fn serialize_tuple_struct(\n self,\n name: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"TS\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(self)\n }\n\n fn serialize_tuple_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"TV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(self)\n }\n\n fn serialize_map(self, _len: Option) -> Result {\n self.write_type_tag(\"M\");\n Ok(self)\n }\n\n fn serialize_struct(\n self,\n name: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"S\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(self)\n }\n\n fn serialize_struct_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"SV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(self)\n }\n}\n\nimpl SerializeSeq for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTuple for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTupleStruct for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTupleVariant for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeMap for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n key.serialize(&mut **self)\n }\n\n fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeStruct for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.hasher.update(key.as_bytes());\n self.hasher.update(b\"\\n\");\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeStructVariant for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.hasher.update(key.as_bytes());\n self.hasher.update(b\"\\n\");\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n"], ["/cocoindex/src/setup/auth_registry.rs", "use std::collections::hash_map;\n\nuse crate::prelude::*;\n\npub struct AuthRegistry {\n entries: RwLock>,\n}\n\nimpl Default for AuthRegistry {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl AuthRegistry {\n pub fn new() -> Self {\n Self {\n entries: RwLock::new(HashMap::new()),\n }\n }\n\n pub fn add(&self, key: String, value: serde_json::Value) -> Result<()> {\n let mut entries = self.entries.write().unwrap();\n match entries.entry(key) {\n hash_map::Entry::Occupied(entry) => {\n api_bail!(\"Auth entry already exists: {}\", entry.key());\n }\n hash_map::Entry::Vacant(entry) => {\n entry.insert(value);\n }\n }\n Ok(())\n }\n\n pub fn get(&self, entry_ref: &spec::AuthEntryReference) -> Result {\n let entries = self.entries.read().unwrap();\n match entries.get(&entry_ref.key) {\n Some(value) => Ok(serde_json::from_value(value.clone())?),\n None => api_bail!(\n \"Auth entry `{key}` not found.\\n\\\n Hint: If you're not referencing `{key}` in your flow, it will likely be caused by a previously persisted target using it. \\\n You need to bring back the definition for the auth entry `{key}`, so that CocoIndex will be able to do a cleanup in the next `setup` run. \\\n See https://cocoindex.io/docs/core/flow_def#auth-registry for more details.\",\n key = entry_ref.key\n ),\n }\n }\n}\n"], ["/cocoindex/src/service/error.rs", "use crate::prelude::*;\n\nuse axum::{\n Json,\n http::StatusCode,\n response::{IntoResponse, Response},\n};\nuse pyo3::{exceptions::PyException, prelude::*};\nuse std::{\n error::Error,\n fmt::{Debug, Display},\n};\n\n#[derive(Debug)]\npub struct ApiError {\n pub err: anyhow::Error,\n pub status_code: StatusCode,\n}\n\nimpl ApiError {\n pub fn new(message: &str, status_code: StatusCode) -> Self {\n Self {\n err: anyhow!(\"{}\", message),\n status_code,\n }\n }\n}\n\nimpl Display for ApiError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Display::fmt(&self.err, f)\n }\n}\n\nimpl Error for ApiError {\n fn source(&self) -> Option<&(dyn Error + 'static)> {\n self.err.source()\n }\n}\n\n#[derive(Serialize)]\nstruct ErrorResponse {\n error: String,\n}\n\nimpl IntoResponse for ApiError {\n fn into_response(self) -> Response {\n debug!(\"Internal server error:\\n{:?}\", self.err);\n let error_response = ErrorResponse {\n error: self.err.to_string(),\n };\n (self.status_code, Json(error_response)).into_response()\n }\n}\n\nimpl From for ApiError {\n fn from(err: anyhow::Error) -> ApiError {\n if err.is::() {\n return err.downcast::().unwrap();\n }\n Self {\n err,\n status_code: StatusCode::INTERNAL_SERVER_ERROR,\n }\n }\n}\n\nimpl From for PyErr {\n fn from(val: ApiError) -> Self {\n PyException::new_err(val.err.to_string())\n }\n}\n\n#[derive(Clone)]\npub struct SharedError {\n pub err: Arc,\n}\n\nimpl SharedError {\n pub fn new(err: anyhow::Error) -> Self {\n Self { err: Arc::new(err) }\n }\n}\nimpl Debug for SharedError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.err, f)\n }\n}\n\nimpl Display for SharedError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Display::fmt(&self.err, f)\n }\n}\n\nimpl From for SharedError {\n fn from(err: E) -> Self {\n Self {\n err: Arc::new(anyhow::Error::from(err)),\n }\n }\n}\n\nimpl AsRef for SharedError {\n fn as_ref(&self) -> &(dyn std::error::Error + 'static) {\n self.err.as_ref().as_ref()\n }\n}\n\nimpl AsRef for SharedError {\n fn as_ref(&self) -> &(dyn std::error::Error + Send + Sync + 'static) {\n self.err.as_ref().as_ref()\n }\n}\n\npub fn shared_ok(value: T) -> Result {\n Ok(value)\n}\n\npub type SharedResult = Result;\n\npub struct SharedErrorWrapper(SharedError);\n\nimpl Display for SharedErrorWrapper {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.0, f)\n }\n}\n\nimpl Debug for SharedErrorWrapper {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.0, f)\n }\n}\n\nimpl Error for SharedErrorWrapper {\n fn source(&self) -> Option<&(dyn Error + 'static)> {\n self.0.err.as_ref().source()\n }\n}\n\npub trait SharedResultExt {\n fn std_result(self) -> Result;\n}\n\nimpl SharedResultExt for Result {\n fn std_result(self) -> Result {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(SharedErrorWrapper(err)),\n }\n }\n}\n\npub trait SharedResultExtRef<'a, T> {\n fn std_result(self) -> Result<&'a T, SharedErrorWrapper>;\n}\n\nimpl<'a, T> SharedResultExtRef<'a, T> for &'a Result {\n fn std_result(self) -> Result<&'a T, SharedErrorWrapper> {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(SharedErrorWrapper(err.clone())),\n }\n }\n}\n\npub fn invariance_violation() -> anyhow::Error {\n anyhow::anyhow!(\"Invariance violation\")\n}\n\n#[macro_export]\nmacro_rules! api_bail {\n ( $fmt:literal $(, $($arg:tt)*)?) => {\n return Err($crate::service::error::ApiError::new(&format!($fmt $(, $($arg)*)?), axum::http::StatusCode::BAD_REQUEST).into())\n };\n}\n\n#[macro_export]\nmacro_rules! api_error {\n ( $fmt:literal $(, $($arg:tt)*)?) => {\n $crate::service::error::ApiError::new(&format!($fmt $(, $($arg)*)?), axum::http::StatusCode::BAD_REQUEST)\n };\n}\n"], ["/cocoindex/src/builder/plan.rs", "use crate::prelude::*;\n\nuse crate::ops::interface::*;\nuse crate::utils::fingerprint::{Fingerprint, Fingerprinter};\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedLocalFieldReference {\n /// Must be non-empty.\n pub fields_idx: Vec,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedFieldReference {\n pub local: AnalyzedLocalFieldReference,\n /// How many levels up the scope the field is at.\n /// 0 means the current scope.\n #[serde(skip_serializing_if = \"u32_is_zero\")]\n pub scope_up_level: u32,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedLocalCollectorReference {\n pub collector_idx: u32,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedCollectorReference {\n pub local: AnalyzedLocalCollectorReference,\n /// How many levels up the scope the field is at.\n /// 0 means the current scope.\n #[serde(skip_serializing_if = \"u32_is_zero\")]\n pub scope_up_level: u32,\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct AnalyzedStructMapping {\n pub fields: Vec,\n}\n\n#[derive(Debug, Clone, Serialize)]\n#[serde(tag = \"kind\")]\npub enum AnalyzedValueMapping {\n Constant { value: value::Value },\n Field(AnalyzedFieldReference),\n Struct(AnalyzedStructMapping),\n}\n\n#[derive(Debug, Clone)]\npub struct AnalyzedOpOutput {\n pub field_idx: u32,\n}\n\npub struct AnalyzedImportOp {\n pub name: String,\n pub executor: Box,\n pub output: AnalyzedOpOutput,\n pub primary_key_type: schema::ValueType,\n pub refresh_options: spec::SourceRefreshOptions,\n\n pub concurrency_controller: concur_control::CombinedConcurrencyController,\n}\n\npub struct AnalyzedFunctionExecInfo {\n pub enable_cache: bool,\n pub behavior_version: Option,\n\n /// Fingerprinter of the function's behavior.\n pub fingerprinter: Fingerprinter,\n /// To deserialize cached value.\n pub output_type: schema::ValueType,\n}\n\npub struct AnalyzedTransformOp {\n pub name: String,\n pub inputs: Vec,\n pub function_exec_info: AnalyzedFunctionExecInfo,\n pub executor: Box,\n pub output: AnalyzedOpOutput,\n}\n\npub struct AnalyzedForEachOp {\n pub name: String,\n pub local_field_ref: AnalyzedLocalFieldReference,\n pub op_scope: AnalyzedOpScope,\n pub concurrency_controller: concur_control::ConcurrencyController,\n}\n\npub struct AnalyzedCollectOp {\n pub name: String,\n pub has_auto_uuid_field: bool,\n pub input: AnalyzedStructMapping,\n pub collector_ref: AnalyzedCollectorReference,\n /// Fingerprinter of the collector's schema. Used to decide when to reuse auto-generated UUIDs.\n pub fingerprinter: Fingerprinter,\n}\n\npub enum AnalyzedPrimaryKeyDef {\n Fields(Vec),\n}\n\npub struct AnalyzedExportOp {\n pub name: String,\n pub input: AnalyzedLocalCollectorReference,\n pub export_target_factory: Arc,\n pub export_context: Arc,\n pub primary_key_def: AnalyzedPrimaryKeyDef,\n pub primary_key_type: schema::ValueType,\n /// idx for value fields - excluding the primary key field.\n pub value_fields: Vec,\n /// If true, value is never changed on the same primary key.\n /// This is guaranteed if the primary key contains auto-generated UUIDs.\n pub value_stable: bool,\n}\n\npub struct AnalyzedExportTargetOpGroup {\n pub target_factory: Arc,\n pub op_idx: Vec,\n}\n\npub enum AnalyzedReactiveOp {\n Transform(AnalyzedTransformOp),\n ForEach(AnalyzedForEachOp),\n Collect(AnalyzedCollectOp),\n}\n\npub struct AnalyzedOpScope {\n pub reactive_ops: Vec,\n pub collector_len: usize,\n}\n\npub struct ExecutionPlan {\n pub logic_fingerprint: Fingerprint,\n\n pub import_ops: Vec,\n pub op_scope: AnalyzedOpScope,\n pub export_ops: Vec,\n pub export_op_groups: Vec,\n}\n\npub struct TransientExecutionPlan {\n pub input_fields: Vec,\n pub op_scope: AnalyzedOpScope,\n pub output_value: AnalyzedValueMapping,\n}\n\nfn u32_is_zero(v: &u32) -> bool {\n *v == 0\n}\n"], ["/cocoindex/src/llm/mod.rs", "use crate::prelude::*;\n\nuse crate::base::json_schema::ToJsonSchemaOptions;\nuse infer::Infer;\nuse schemars::schema::SchemaObject;\nuse std::borrow::Cow;\n\nstatic INFER: LazyLock = LazyLock::new(Infer::new);\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub enum LlmApiType {\n Ollama,\n OpenAi,\n Gemini,\n Anthropic,\n LiteLlm,\n OpenRouter,\n Voyage,\n Vllm,\n VertexAi,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct VertexAiConfig {\n pub project: String,\n pub region: Option,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum LlmApiConfig {\n VertexAi(VertexAiConfig),\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct LlmSpec {\n pub api_type: LlmApiType,\n pub address: Option,\n pub model: String,\n pub api_config: Option,\n}\n\n#[derive(Debug)]\npub enum OutputFormat<'a> {\n JsonSchema {\n name: Cow<'a, str>,\n schema: Cow<'a, SchemaObject>,\n },\n}\n\n#[derive(Debug)]\npub struct LlmGenerateRequest<'a> {\n pub model: &'a str,\n pub system_prompt: Option>,\n pub user_prompt: Cow<'a, str>,\n pub image: Option>,\n pub output_format: Option>,\n}\n\n#[derive(Debug)]\npub struct LlmGenerateResponse {\n pub text: String,\n}\n\n#[async_trait]\npub trait LlmGenerationClient: Send + Sync {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result;\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions;\n}\n\n#[derive(Debug)]\npub struct LlmEmbeddingRequest<'a> {\n pub model: &'a str,\n pub text: Cow<'a, str>,\n pub output_dimension: Option,\n pub task_type: Option>,\n}\n\npub struct LlmEmbeddingResponse {\n pub embedding: Vec,\n}\n\n#[async_trait]\npub trait LlmEmbeddingClient: Send + Sync {\n async fn embed_text<'req>(\n &self,\n request: LlmEmbeddingRequest<'req>,\n ) -> Result;\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option;\n}\n\nmod anthropic;\nmod gemini;\nmod litellm;\nmod ollama;\nmod openai;\nmod openrouter;\nmod vllm;\nmod voyage;\n\npub async fn new_llm_generation_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::Anthropic => {\n Box::new(anthropic::Client::new(address).await?) as Box\n }\n LlmApiType::LiteLlm => {\n Box::new(litellm::Client::new_litellm(address).await?) as Box\n }\n LlmApiType::OpenRouter => Box::new(openrouter::Client::new_openrouter(address).await?)\n as Box,\n LlmApiType::Voyage => {\n api_bail!(\"Voyage is not supported for generation\")\n }\n LlmApiType::Vllm => {\n Box::new(vllm::Client::new_vllm(address).await?) as Box\n }\n };\n Ok(client)\n}\n\npub async fn new_llm_embedding_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Voyage => {\n Box::new(voyage::Client::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::OpenRouter | LlmApiType::LiteLlm | LlmApiType::Vllm | LlmApiType::Anthropic => {\n api_bail!(\"Embedding is not supported for API type {:?}\", api_type)\n }\n };\n Ok(client)\n}\n\npub fn detect_image_mime_type(bytes: &[u8]) -> Result<&'static str> {\n let infer = &*INFER;\n match infer.get(bytes) {\n Some(info) if info.mime_type().starts_with(\"image/\") => Ok(info.mime_type()),\n _ => bail!(\"Unknown or unsupported image format\"),\n }\n}\n"], ["/cocoindex/src/utils/immutable.rs", "#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]\npub enum RefList<'a, T> {\n #[default]\n Nil,\n\n Cons(T, &'a RefList<'a, T>),\n}\n\nimpl<'a, T> RefList<'a, T> {\n pub fn prepend(&'a self, head: T) -> Self {\n Self::Cons(head, self)\n }\n\n pub fn iter(&'a self) -> impl Iterator {\n self\n }\n\n pub fn head(&'a self) -> Option<&'a T> {\n match self {\n RefList::Nil => None,\n RefList::Cons(head, _) => Some(head),\n }\n }\n\n pub fn headn(&'a self, n: usize) -> Option<&'a T> {\n match self {\n RefList::Nil => None,\n RefList::Cons(head, tail) => {\n if n == 0 {\n Some(head)\n } else {\n tail.headn(n - 1)\n }\n }\n }\n }\n\n pub fn tail(&'a self) -> Option<&'a RefList<'a, T>> {\n match self {\n RefList::Nil => None,\n RefList::Cons(_, tail) => Some(tail),\n }\n }\n\n pub fn tailn(&'a self, n: usize) -> Option<&'a RefList<'a, T>> {\n if n == 0 {\n Some(self)\n } else {\n match self {\n RefList::Nil => None,\n RefList::Cons(_, tail) => tail.tailn(n - 1),\n }\n }\n }\n}\n\nimpl<'a, T> Iterator for &'a RefList<'a, T> {\n type Item = &'a T;\n\n fn next(&mut self) -> Option {\n let current = *self;\n match current {\n RefList::Nil => None,\n RefList::Cons(head, tail) => {\n *self = *tail;\n Some(head)\n }\n }\n }\n}\n"], ["/cocoindex/src/settings.rs", "use serde::Deserialize;\n\n#[derive(Deserialize, Debug)]\npub struct DatabaseConnectionSpec {\n pub url: String,\n pub user: Option,\n pub password: Option,\n}\n\n#[derive(Deserialize, Debug, Default)]\npub struct GlobalExecutionOptions {\n pub source_max_inflight_rows: Option,\n pub source_max_inflight_bytes: Option,\n}\n\n#[derive(Deserialize, Debug, Default)]\npub struct Settings {\n #[serde(default)]\n pub database: Option,\n #[serde(default)]\n #[allow(dead_code)] // Used via serialization/deserialization to Python\n pub app_namespace: String,\n #[serde(default)]\n pub global_execution_options: GlobalExecutionOptions,\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_settings_deserialize_with_database() {\n let json = r#\"{\n \"database\": {\n \"url\": \"postgresql://localhost:5432/test\",\n \"user\": \"testuser\",\n \"password\": \"testpass\"\n },\n \"app_namespace\": \"test_app\"\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_some());\n let db = settings.database.unwrap();\n assert_eq!(db.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db.user, Some(\"testuser\".to_string()));\n assert_eq!(db.password, Some(\"testpass\".to_string()));\n assert_eq!(settings.app_namespace, \"test_app\");\n }\n\n #[test]\n fn test_settings_deserialize_without_database() {\n let json = r#\"{\n \"app_namespace\": \"test_app\"\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_none());\n assert_eq!(settings.app_namespace, \"test_app\");\n }\n\n #[test]\n fn test_settings_deserialize_empty_object() {\n let json = r#\"{}\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_none());\n assert_eq!(settings.app_namespace, \"\");\n }\n\n #[test]\n fn test_settings_deserialize_database_without_user_password() {\n let json = r#\"{\n \"database\": {\n \"url\": \"postgresql://localhost:5432/test\"\n }\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_some());\n let db = settings.database.unwrap();\n assert_eq!(db.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db.user, None);\n assert_eq!(db.password, None);\n assert_eq!(settings.app_namespace, \"\");\n }\n\n #[test]\n fn test_database_connection_spec_deserialize() {\n let json = r#\"{\n \"url\": \"postgresql://localhost:5432/test\",\n \"user\": \"testuser\",\n \"password\": \"testpass\"\n }\"#;\n\n let db_spec: DatabaseConnectionSpec = serde_json::from_str(json).unwrap();\n\n assert_eq!(db_spec.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db_spec.user, Some(\"testuser\".to_string()));\n assert_eq!(db_spec.password, Some(\"testpass\".to_string()));\n }\n}\n"], ["/cocoindex/src/utils/db.rs", "#[derive(Debug, Clone, PartialEq, Eq)]\npub struct ValidIdentifier(pub String);\n\nimpl TryFrom for ValidIdentifier {\n type Error = anyhow::Error;\n\n fn try_from(s: String) -> Result {\n if !s.is_empty() && s.chars().all(|c| c.is_alphanumeric() || c == '_') {\n Ok(ValidIdentifier(s))\n } else {\n Err(anyhow::anyhow!(\"Invalid identifier: {s:?}\"))\n }\n }\n}\n\nimpl std::fmt::Display for ValidIdentifier {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Display::fmt(&self.0, f)\n }\n}\n\nimpl std::ops::Deref for ValidIdentifier {\n type Target = String;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\npub enum WriteAction {\n Insert,\n Update,\n}\n\npub fn sanitize_identifier(s: &str) -> String {\n let mut result = String::new();\n for c in s.chars() {\n if c.is_alphanumeric() || c == '_' {\n result.push(c);\n } else {\n result.push_str(\"__\");\n }\n }\n result\n}\n"], ["/cocoindex/src/ops/registry.rs", "use super::interface::ExecutorFactory;\nuse anyhow::Result;\nuse std::collections::HashMap;\n\npub struct ExecutorFactoryRegistry {\n factories: HashMap,\n}\n\nimpl Default for ExecutorFactoryRegistry {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl ExecutorFactoryRegistry {\n pub fn new() -> Self {\n Self {\n factories: HashMap::new(),\n }\n }\n\n pub fn register(&mut self, name: String, factory: ExecutorFactory) -> Result<()> {\n match self.factories.entry(name) {\n std::collections::hash_map::Entry::Occupied(entry) => Err(anyhow::anyhow!(\n \"Factory with name already exists: {}\",\n entry.key()\n )),\n std::collections::hash_map::Entry::Vacant(entry) => {\n entry.insert(factory);\n Ok(())\n }\n }\n }\n\n pub fn get(&self, name: &str) -> Option<&ExecutorFactory> {\n self.factories.get(name)\n }\n}\n"], ["/cocoindex/src/ops/registration.rs", "use super::{\n factory_bases::*, functions, registry::ExecutorFactoryRegistry, sdk::ExecutorFactory, sources,\n targets,\n};\nuse anyhow::Result;\nuse std::sync::{LazyLock, RwLock};\n\nfn register_executor_factories(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n let reqwest_client = reqwest::Client::new();\n\n sources::local_file::Factory.register(registry)?;\n sources::google_drive::Factory.register(registry)?;\n sources::amazon_s3::Factory.register(registry)?;\n sources::azure_blob::Factory.register(registry)?;\n\n functions::parse_json::Factory.register(registry)?;\n functions::split_recursively::register(registry)?;\n functions::extract_by_llm::Factory.register(registry)?;\n functions::embed_text::register(registry)?;\n\n targets::postgres::Factory::default().register(registry)?;\n targets::qdrant::register(registry)?;\n targets::kuzu::register(registry, reqwest_client)?;\n\n targets::neo4j::Factory::new().register(registry)?;\n\n Ok(())\n}\n\nstatic EXECUTOR_FACTORY_REGISTRY: LazyLock> = LazyLock::new(|| {\n let mut registry = ExecutorFactoryRegistry::new();\n register_executor_factories(&mut registry).expect(\"Failed to register executor factories\");\n RwLock::new(registry)\n});\n\npub fn get_optional_executor_factory(kind: &str) -> Option {\n let registry = EXECUTOR_FACTORY_REGISTRY.read().unwrap();\n registry.get(kind).cloned()\n}\n\npub fn get_executor_factory(kind: &str) -> Result {\n get_optional_executor_factory(kind)\n .ok_or_else(|| anyhow::anyhow!(\"Executor factory not found for op kind: {}\", kind))\n}\n\npub fn register_factory(name: String, factory: ExecutorFactory) -> Result<()> {\n let mut registry = EXECUTOR_FACTORY_REGISTRY.write().unwrap();\n registry.register(name, factory)\n}\n"], ["/cocoindex/src/llm/vllm.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_vllm(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"http://127.0.0.1:8000/v1\".to_string());\n let api_key = std::env::var(\"VLLM_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/llm/litellm.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_litellm(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"http://127.0.0.1:4000\".to_string());\n let api_key = std::env::var(\"LITELLM_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/llm/openrouter.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_openrouter(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"https://openrouter.ai/api/v1\".to_string());\n let api_key = std::env::var(\"OPENROUTER_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/prelude.rs", "#![allow(unused_imports)]\n\npub(crate) use anyhow::{Context, Result};\npub(crate) use async_trait::async_trait;\npub(crate) use chrono::{DateTime, Utc};\npub(crate) use futures::{FutureExt, StreamExt};\npub(crate) use futures::{\n future::{BoxFuture, Shared},\n prelude::*,\n stream::BoxStream,\n};\npub(crate) use indexmap::{IndexMap, IndexSet};\npub(crate) use itertools::Itertools;\npub(crate) use serde::{Deserialize, Serialize, de::DeserializeOwned};\npub(crate) use std::any::Any;\npub(crate) use std::borrow::Cow;\npub(crate) use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};\npub(crate) use std::hash::Hash;\npub(crate) use std::sync::{Arc, LazyLock, Mutex, OnceLock, RwLock, Weak};\n\npub(crate) use crate::base::{self, schema, spec, value};\npub(crate) use crate::builder::{self, exec_ctx, plan};\npub(crate) use crate::execution;\npub(crate) use crate::lib_context::{FlowContext, LibContext, get_lib_context, get_runtime};\npub(crate) use crate::ops::interface;\npub(crate) use crate::service::error::{ApiError, invariance_violation};\npub(crate) use crate::setup;\npub(crate) use crate::setup::AuthRegistry;\npub(crate) use crate::utils::{self, concur_control, retryable};\npub(crate) use crate::{api_bail, api_error};\n\npub(crate) use anyhow::{anyhow, bail};\npub(crate) use async_stream::{stream, try_stream};\npub(crate) use log::{debug, error, info, trace, warn};\n\npub(crate) use derivative::Derivative;\n"], ["/cocoindex/src/base/field_attrs.rs", "use const_format::concatcp;\n\npub static COCOINDEX_PREFIX: &str = \"cocoindex.io/\";\n\n/// Present for bytes and str. It points to fields that represents the original file name for the data.\n/// Type: AnalyzedValueMapping\npub static CONTENT_FILENAME: &str = concatcp!(COCOINDEX_PREFIX, \"content_filename\");\n\n/// Present for bytes and str. It points to fields that represents mime types for the data.\n/// Type: AnalyzedValueMapping\npub static CONTENT_MIME_TYPE: &str = concatcp!(COCOINDEX_PREFIX, \"content_mime_type\");\n\n/// Present for chunks. It points to fields that the chunks are for.\n/// Type: AnalyzedValueMapping\npub static CHUNK_BASE_TEXT: &str = concatcp!(COCOINDEX_PREFIX, \"chunk_base_text\");\n\n/// Base text for an embedding vector.\npub static _EMBEDDING_ORIGIN_TEXT: &str = concatcp!(COCOINDEX_PREFIX, \"embedding_origin_text\");\n"], ["/cocoindex/src/execution/mod.rs", "pub(crate) mod db_tracking_setup;\npub(crate) mod dumper;\npub(crate) mod evaluator;\npub(crate) mod indexing_status;\npub(crate) mod memoization;\npub(crate) mod row_indexer;\npub(crate) mod source_indexer;\npub(crate) mod stats;\n\nmod live_updater;\npub(crate) use live_updater::*;\n\nmod db_tracking;\n"], ["/cocoindex/src/lib.rs", "mod base;\nmod builder;\nmod execution;\nmod lib_context;\nmod llm;\nmod ops;\nmod prelude;\nmod py;\nmod server;\nmod service;\nmod settings;\nmod setup;\nmod utils;\n"], ["/cocoindex/src/ops/mod.rs", "pub mod interface;\npub mod registry;\n\n// All operations\nmod factory_bases;\nmod functions;\nmod sources;\nmod targets;\n\nmod registration;\npub(crate) use registration::*;\npub(crate) mod py_factory;\n\n// SDK is used for help registration for operations.\nmod sdk;\n"], ["/cocoindex/src/ops/targets/shared/mod.rs", "pub mod property_graph;\npub mod table_columns;\n"], ["/cocoindex/src/builder/mod.rs", "pub mod analyzer;\npub mod exec_ctx;\npub mod flow_builder;\npub mod plan;\n\nmod analyzed_flow;\n\npub use analyzed_flow::AnalyzedFlow;\npub use analyzed_flow::AnalyzedTransientFlow;\n"], ["/cocoindex/src/setup/mod.rs", "mod auth_registry;\nmod db_metadata;\nmod driver;\nmod states;\n\npub mod components;\n\npub use auth_registry::AuthRegistry;\npub use driver::*;\npub use states::*;\n"], ["/cocoindex/src/ops/sources/mod.rs", "pub mod amazon_s3;\npub mod azure_blob;\npub mod google_drive;\npub mod local_file;\n"], ["/cocoindex/src/service/mod.rs", "pub(crate) mod error;\npub(crate) mod flows;\n"], ["/cocoindex/src/ops/functions/mod.rs", "pub mod embed_text;\npub mod extract_by_llm;\npub mod parse_json;\npub mod split_recursively;\n\n#[cfg(test)]\nmod test_utils;\n"], ["/cocoindex/src/ops/targets/mod.rs", "mod shared;\n\npub mod kuzu;\npub mod neo4j;\npub mod postgres;\npub mod qdrant;\n"], ["/cocoindex/src/base/mod.rs", "pub mod duration;\npub mod field_attrs;\npub mod json_schema;\npub mod schema;\npub mod spec;\npub mod value;\n"], ["/cocoindex/src/utils/mod.rs", "pub mod concur_control;\npub mod db;\npub mod fingerprint;\npub mod immutable;\npub mod retryable;\npub mod yaml_ser;\n"]], "task_instance_info": {"created_time": "2025-08-21 01:30:28", "created_task_model": "DeepSeek-R1", "function_skeleton": "fn describe_changes (&self) -> Vec {}", "code_description": "This code segment functions to: Generate human-readable descriptions of pending database schema modifications based on version comparisons and state transitions, including table lifecycle operations and data cleanup requirements."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```rust\n[TASK_BEGIN]\n fn describe_changes(&self) -> Vec {\n let mut changes = Vec::new();\n\n if let Some(desired) = &self.desired_state {\n // Handle table rename operations\n for legacy_name in &self.legacy_table_names {\n changes.push(setup::ChangeDescription::new(format!(\n \"Rename legacy tracking table {} to {}\",\n legacy_name, desired.table_name\n )));\n }\n\n // Handle table creation/version upgrades\n match self.min_existing_version_id {\n Some(existing_version) if existing_version < desired.version_id => {\n changes.push(setup::ChangeDescription::new(format!(\n \"Upgrade tracking table {} from version {} to {}\",\n desired.table_name, existing_version, desired.version_id\n )));\n }\n None => {\n changes.push(setup::ChangeDescription::new(format!(\n \"Create tracking table {} (version {})\",\n desired.table_name, desired.version_id\n )));\n }\n _ => {}\n }\n } else {\n // Handle table deletions\n for legacy_name in &self.legacy_table_names {\n changes.push(setup::ChangeDescription::new(format!(\n \"Drop legacy tracking table {}\",\n legacy_name\n )));\n }\n }\n\n // Handle source ID cleanups\n if !self.source_ids_to_delete.is_empty() {\n let ids = self\n .source_ids_to_delete\n .iter()\n .map(|id| id.to_string())\n .collect::>()\n .join(\", \");\n changes.push(setup::ChangeDescription::new(format!(\n \"Delete tracking data for source IDs: {}\",\n ids\n )));\n }\n\n changes\n }\n[TASK_END]\n```", "inference_time": "2025-08-21 01-31-35"}, "editdistance_info": {"edit_distance": 34.3465, "calculate_time": "2025-08-21 01:31:35", "true_code_clean": "fn describe_changes(&self) -> Vec {\n let mut changes: Vec = vec![];\n if self.desired_state.is_some() && !self.legacy_table_names.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Rename legacy tracking tables: {}. \",\n self.legacy_table_names.join(\", \")\n )));\n }\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(state)) => {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Create the tracking table: {}. \",\n state.table_name\n )));\n }\n (Some(min_version_id), Some(desired)) => {\n if min_version_id < desired.version_id {\n changes.push(setup::ChangeDescription::Action(\n \"Update the tracking table. \".into(),\n ));\n }\n }\n (Some(_), None) => changes.push(setup::ChangeDescription::Action(format!(\n \"Drop existing tracking table: {}. \",\n self.legacy_table_names.join(\", \")\n ))),\n (None, None) => (),\n }\n if !self.source_ids_to_delete.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Delete source IDs: {}. \",\n self.source_ids_to_delete\n .iter()\n .map(|id| id.to_string())\n .collect::>()\n .join(\", \")\n )));\n }\n changes\n }", "predict_code_clean": "fn describe_changes(&self) -> Vec {\n let mut changes = Vec::new();\n if let Some(desired) = &self.desired_state {\n for legacy_name in &self.legacy_table_names {\n changes.push(setup::ChangeDescription::new(format!(\n \"Rename legacy tracking table {} to {}\",\n legacy_name, desired.table_name\n )));\n }\n match self.min_existing_version_id {\n Some(existing_version) if existing_version < desired.version_id => {\n changes.push(setup::ChangeDescription::new(format!(\n \"Upgrade tracking table {} from version {} to {}\",\n desired.table_name, existing_version, desired.version_id\n )));\n }\n None => {\n changes.push(setup::ChangeDescription::new(format!(\n \"Create tracking table {} (version {})\",\n desired.table_name, desired.version_id\n )));\n }\n _ => {}\n }\n } else {\n for legacy_name in &self.legacy_table_names {\n changes.push(setup::ChangeDescription::new(format!(\n \"Drop legacy tracking table {}\",\n legacy_name\n )));\n }\n }\n if !self.source_ids_to_delete.is_empty() {\n let ids = self\n .source_ids_to_delete\n .iter()\n .map(|id| id.to_string())\n .collect::>()\n .join(\", \");\n changes.push(setup::ChangeDescription::new(format!(\n \"Delete tracking data for source IDs: {}\",\n ids\n )));\n }\n changes\n }"}} {"repo_name": "cocoindex", "file_name": "/cocoindex/src/ops/targets/shared/table_columns.rs", "inference_info": {"prefix_code": "use crate::{\n ops::sdk::SetupStateCompatibility,\n prelude::*,\n setup::{CombinedState, SetupChangeType},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TableColumnsSchema {\n #[serde(with = \"indexmap::map::serde_seq\", alias = \"key_fields_schema\")]\n pub key_columns: IndexMap,\n\n #[serde(with = \"indexmap::map::serde_seq\", alias = \"value_fields_schema\")]\n pub value_columns: IndexMap,\n}\n\n#[derive(Debug)]\npub enum TableUpsertionAction {\n Create {\n keys: IndexMap,\n values: IndexMap,\n },\n Update {\n columns_to_delete: IndexSet,\n columns_to_upsert: IndexMap,\n },\n}\n\nimpl TableUpsertionAction {\n pub fn is_empty(&self) -> bool {\n match self {\n Self::Create { .. } => false,\n Self::Update {\n columns_to_delete,\n columns_to_upsert,\n } => columns_to_delete.is_empty() && columns_to_upsert.is_empty(),\n }\n }\n}\n\n#[derive(Debug)]\npub struct TableMainSetupAction {\n pub drop_existing: bool,\n pub table_upsertion: Option>,\n}\n\nimpl TableMainSetupAction {\n ", "suffix_code": "\n\n pub fn describe_changes(&self) -> Vec\n where\n T: std::fmt::Display,\n {\n let mut descriptions = vec![];\n if self.drop_existing {\n descriptions.push(setup::ChangeDescription::Action(\"Drop table\".to_string()));\n }\n if let Some(table_upsertion) = &self.table_upsertion {\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Create table:\\n key columns: {}\\n value columns: {}\\n\",\n keys.iter().map(|(k, v)| format!(\"{k} {v}\")).join(\", \"),\n values.iter().map(|(k, v)| format!(\"{k} {v}\")).join(\", \"),\n )));\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n if !columns_to_delete.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Delete column from table: {}\",\n columns_to_delete.iter().join(\", \"),\n )));\n }\n if !columns_to_upsert.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Add / update columns in table: {}\",\n columns_to_upsert\n .iter()\n .map(|(k, v)| format!(\"{k} {v}\"))\n .join(\", \"),\n )));\n }\n }\n }\n }\n descriptions\n }\n\n pub fn change_type(&self, has_other_update: bool) -> SetupChangeType {\n match (self.drop_existing, &self.table_upsertion) {\n (_, Some(TableUpsertionAction::Create { .. })) => SetupChangeType::Create,\n (_, Some(TableUpsertionAction::Update { .. })) => SetupChangeType::Update,\n (true, None) => SetupChangeType::Delete,\n (false, None) => {\n if has_other_update {\n SetupChangeType::Update\n } else {\n SetupChangeType::NoChange\n }\n }\n }\n }\n}\n\npub fn check_table_compatibility(\n desired: &TableColumnsSchema,\n existing: &TableColumnsSchema,\n) -> SetupStateCompatibility {\n let is_key_identical = existing.key_columns == desired.key_columns;\n if is_key_identical {\n let is_value_lossy = existing\n .value_columns\n .iter()\n .any(|(k, v)| desired.value_columns.get(k) != Some(v));\n if is_value_lossy {\n SetupStateCompatibility::PartialCompatible\n } else {\n SetupStateCompatibility::Compatible\n }\n } else {\n SetupStateCompatibility::NotCompatible\n }\n}\n", "middle_code": "pub fn from_states(\n desired_state: Option<&S>,\n existing: &CombinedState,\n existing_invalidated: bool,\n ) -> Self\n where\n for<'a> &'a S: Into>>,\n T: Clone,\n {\n let existing_may_exists = existing.possible_versions().next().is_some();\n let possible_existing_cols: Vec>> = existing\n .possible_versions()\n .map(Into::>>::into)\n .collect();\n let Some(desired_state) = desired_state else {\n return Self {\n drop_existing: existing_may_exists,\n table_upsertion: None,\n };\n };\n let desired_cols: Cow<'_, TableColumnsSchema> = desired_state.into();\n let drop_existing = existing_invalidated\n || possible_existing_cols\n .iter()\n .any(|v| v.key_columns != desired_cols.key_columns)\n || (existing_may_exists && !existing.always_exists());\n let table_upsertion = if existing.always_exists() && !drop_existing {\n TableUpsertionAction::Update {\n columns_to_delete: possible_existing_cols\n .iter()\n .flat_map(|v| v.value_columns.keys())\n .filter(|column_name| !desired_cols.value_columns.contains_key(*column_name))\n .cloned()\n .collect(),\n columns_to_upsert: desired_cols\n .value_columns\n .iter()\n .filter(|(column_name, schema)| {\n !possible_existing_cols\n .iter()\n .all(|v| v.value_columns.get(*column_name) == Some(schema))\n })\n .map(|(k, v)| (k.to_owned(), v.to_owned()))\n .collect(),\n }\n } else {\n TableUpsertionAction::Create {\n keys: desired_cols.key_columns.to_owned(),\n values: desired_cols.value_columns.to_owned(),\n }\n };\n Self {\n drop_existing,\n table_upsertion: Some(table_upsertion).filter(|action| !action.is_empty()),\n }\n }", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "rust", "sub_task_type": null}, "context_code": [["/cocoindex/src/ops/targets/postgres.rs", "use crate::prelude::*;\n\nuse super::shared::table_columns::{\n TableColumnsSchema, TableMainSetupAction, TableUpsertionAction, check_table_compatibility,\n};\nuse crate::base::spec::{self, *};\nuse crate::ops::sdk::*;\nuse crate::settings::DatabaseConnectionSpec;\nuse async_trait::async_trait;\nuse indexmap::{IndexMap, IndexSet};\nuse itertools::Itertools;\nuse serde::Serialize;\nuse sqlx::PgPool;\nuse sqlx::postgres::types::PgRange;\nuse std::ops::Bound;\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n database: Option>,\n table_name: Option,\n}\nconst BIND_LIMIT: usize = 65535;\n\nfn key_value_fields_iter<'a>(\n key_fields_schema: &[FieldSchema],\n key_value: &'a KeyValue,\n) -> Result<&'a [KeyValue]> {\n let slice = if key_fields_schema.len() == 1 {\n std::slice::from_ref(key_value)\n } else {\n match key_value {\n KeyValue::Struct(fields) => fields,\n _ => bail!(\"expect struct key value\"),\n }\n };\n Ok(slice)\n}\n\nfn convertible_to_pgvector(vec_schema: &VectorTypeSchema) -> bool {\n if vec_schema.dimension.is_some() {\n matches!(\n *vec_schema.element_type,\n BasicValueType::Float32 | BasicValueType::Float64 | BasicValueType::Int64\n )\n } else {\n false\n }\n}\n\nfn bind_key_field<'arg>(\n builder: &mut sqlx::QueryBuilder<'arg, sqlx::Postgres>,\n key_value: &'arg KeyValue,\n) -> Result<()> {\n match key_value {\n KeyValue::Bytes(v) => {\n builder.push_bind(&**v);\n }\n KeyValue::Str(v) => {\n builder.push_bind(&**v);\n }\n KeyValue::Bool(v) => {\n builder.push_bind(v);\n }\n KeyValue::Int64(v) => {\n builder.push_bind(v);\n }\n KeyValue::Range(v) => {\n builder.push_bind(PgRange {\n start: Bound::Included(v.start as i64),\n end: Bound::Excluded(v.end as i64),\n });\n }\n KeyValue::Uuid(v) => {\n builder.push_bind(v);\n }\n KeyValue::Date(v) => {\n builder.push_bind(v);\n }\n KeyValue::Struct(fields) => {\n builder.push_bind(sqlx::types::Json(fields));\n }\n }\n Ok(())\n}\n\nfn bind_value_field<'arg>(\n builder: &mut sqlx::QueryBuilder<'arg, sqlx::Postgres>,\n field_schema: &'arg FieldSchema,\n value: &'arg Value,\n) -> Result<()> {\n match &value {\n Value::Basic(v) => match v {\n BasicValue::Bytes(v) => {\n builder.push_bind(&**v);\n }\n BasicValue::Str(v) => {\n builder.push_bind(&**v);\n }\n BasicValue::Bool(v) => {\n builder.push_bind(v);\n }\n BasicValue::Int64(v) => {\n builder.push_bind(v);\n }\n BasicValue::Float32(v) => {\n builder.push_bind(v);\n }\n BasicValue::Float64(v) => {\n builder.push_bind(v);\n }\n BasicValue::Range(v) => {\n builder.push_bind(PgRange {\n start: Bound::Included(v.start as i64),\n end: Bound::Excluded(v.end as i64),\n });\n }\n BasicValue::Uuid(v) => {\n builder.push_bind(v);\n }\n BasicValue::Date(v) => {\n builder.push_bind(v);\n }\n BasicValue::Time(v) => {\n builder.push_bind(v);\n }\n BasicValue::LocalDateTime(v) => {\n builder.push_bind(v);\n }\n BasicValue::OffsetDateTime(v) => {\n builder.push_bind(v);\n }\n BasicValue::TimeDelta(v) => {\n builder.push_bind(v);\n }\n BasicValue::Json(v) => {\n builder.push_bind(sqlx::types::Json(&**v));\n }\n BasicValue::Vector(v) => match &field_schema.value_type.typ {\n ValueType::Basic(BasicValueType::Vector(vs)) if convertible_to_pgvector(vs) => {\n let vec = v\n .iter()\n .map(|v| {\n Ok(match v {\n BasicValue::Float32(v) => *v,\n BasicValue::Float64(v) => *v as f32,\n BasicValue::Int64(v) => *v as f32,\n v => bail!(\"unexpected vector element type: {}\", v.kind()),\n })\n })\n .collect::>>()?;\n builder.push_bind(pgvector::Vector::from(vec));\n }\n _ => {\n builder.push_bind(sqlx::types::Json(v));\n }\n },\n BasicValue::UnionVariant { .. } => {\n builder.push_bind(sqlx::types::Json(TypedValue {\n t: &field_schema.value_type.typ,\n v: value,\n }));\n }\n },\n Value::Null => {\n builder.push(\"NULL\");\n }\n v => {\n builder.push_bind(sqlx::types::Json(TypedValue {\n t: &field_schema.value_type.typ,\n v,\n }));\n }\n };\n Ok(())\n}\n\npub struct ExportContext {\n db_ref: Option>,\n db_pool: PgPool,\n key_fields_schema: Vec,\n value_fields_schema: Vec,\n upsert_sql_prefix: String,\n upsert_sql_suffix: String,\n delete_sql_prefix: String,\n}\n\nimpl ExportContext {\n fn new(\n db_ref: Option>,\n db_pool: PgPool,\n table_name: String,\n key_fields_schema: Vec,\n value_fields_schema: Vec,\n ) -> Result {\n let key_fields = key_fields_schema\n .iter()\n .map(|f| format!(\"\\\"{}\\\"\", f.name))\n .collect::>()\n .join(\", \");\n let all_fields = (key_fields_schema.iter().chain(value_fields_schema.iter()))\n .map(|f| format!(\"\\\"{}\\\"\", f.name))\n .collect::>()\n .join(\", \");\n let set_value_fields = value_fields_schema\n .iter()\n .map(|f| format!(\"\\\"{}\\\" = EXCLUDED.\\\"{}\\\"\", f.name, f.name))\n .collect::>()\n .join(\", \");\n\n Ok(Self {\n db_ref,\n db_pool,\n upsert_sql_prefix: format!(\"INSERT INTO {table_name} ({all_fields}) VALUES \"),\n upsert_sql_suffix: if value_fields_schema.is_empty() {\n format!(\" ON CONFLICT ({key_fields}) DO NOTHING;\")\n } else {\n format!(\" ON CONFLICT ({key_fields}) DO UPDATE SET {set_value_fields};\")\n },\n delete_sql_prefix: format!(\"DELETE FROM {table_name} WHERE \"),\n key_fields_schema,\n value_fields_schema,\n })\n }\n}\n\nimpl ExportContext {\n async fn upsert(\n &self,\n upserts: &[interface::ExportTargetUpsertEntry],\n txn: &mut sqlx::PgTransaction<'_>,\n ) -> Result<()> {\n let num_parameters = self.key_fields_schema.len() + self.value_fields_schema.len();\n for upsert_chunk in upserts.chunks(BIND_LIMIT / num_parameters) {\n let mut query_builder = sqlx::QueryBuilder::new(&self.upsert_sql_prefix);\n for (i, upsert) in upsert_chunk.iter().enumerate() {\n if i > 0 {\n query_builder.push(\",\");\n }\n query_builder.push(\" (\");\n for (j, key_value) in key_value_fields_iter(&self.key_fields_schema, &upsert.key)?\n .iter()\n .enumerate()\n {\n if j > 0 {\n query_builder.push(\", \");\n }\n bind_key_field(&mut query_builder, key_value)?;\n }\n if self.value_fields_schema.len() != upsert.value.fields.len() {\n bail!(\n \"unmatched value length: {} vs {}\",\n self.value_fields_schema.len(),\n upsert.value.fields.len()\n );\n }\n for (schema, value) in self\n .value_fields_schema\n .iter()\n .zip(upsert.value.fields.iter())\n {\n query_builder.push(\", \");\n bind_value_field(&mut query_builder, schema, value)?;\n }\n query_builder.push(\")\");\n }\n query_builder.push(&self.upsert_sql_suffix);\n query_builder.build().execute(&mut **txn).await?;\n }\n Ok(())\n }\n\n async fn delete(\n &self,\n deletions: &[interface::ExportTargetDeleteEntry],\n txn: &mut sqlx::PgTransaction<'_>,\n ) -> Result<()> {\n // TODO: Find a way to batch delete.\n for deletion in deletions.iter() {\n let mut query_builder = sqlx::QueryBuilder::new(\"\");\n query_builder.push(&self.delete_sql_prefix);\n for (i, (schema, value)) in self\n .key_fields_schema\n .iter()\n .zip(key_value_fields_iter(&self.key_fields_schema, &deletion.key)?.iter())\n .enumerate()\n {\n if i > 0 {\n query_builder.push(\" AND \");\n }\n query_builder.push(\"\\\"\");\n query_builder.push(schema.name.as_str());\n query_builder.push(\"\\\"\");\n query_builder.push(\"=\");\n bind_key_field(&mut query_builder, value)?;\n }\n query_builder.build().execute(&mut **txn).await?;\n }\n Ok(())\n }\n}\n\n#[derive(Default)]\npub struct Factory {}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct TableId {\n #[serde(skip_serializing_if = \"Option::is_none\")]\n database: Option>,\n table_name: String,\n}\n\nimpl std::fmt::Display for TableId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.table_name)?;\n if let Some(database) = &self.database {\n write!(f, \" (database: {database})\")?;\n }\n Ok(())\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SetupState {\n #[serde(flatten)]\n columns: TableColumnsSchema,\n\n vector_indexes: BTreeMap,\n}\n\nimpl SetupState {\n fn new(\n table_id: &TableId,\n key_fields_schema: &[FieldSchema],\n value_fields_schema: &[FieldSchema],\n index_options: &IndexOptions,\n ) -> Self {\n Self {\n columns: TableColumnsSchema {\n key_columns: key_fields_schema\n .iter()\n .map(|f| (f.name.clone(), f.value_type.typ.without_attrs()))\n .collect(),\n value_columns: value_fields_schema\n .iter()\n .map(|f| (f.name.clone(), f.value_type.typ.without_attrs()))\n .collect(),\n },\n vector_indexes: index_options\n .vector_indexes\n .iter()\n .map(|v| (to_vector_index_name(&table_id.table_name, v), v.clone()))\n .collect(),\n }\n }\n\n fn uses_pgvector(&self) -> bool {\n self.columns\n .value_columns\n .iter()\n .any(|(_, value)| match &value {\n ValueType::Basic(BasicValueType::Vector(vec_schema)) => {\n convertible_to_pgvector(vec_schema)\n }\n _ => false,\n })\n }\n}\n\nfn to_column_type_sql(column_type: &ValueType) -> String {\n match column_type {\n ValueType::Basic(basic_type) => match basic_type {\n BasicValueType::Bytes => \"bytea\".into(),\n BasicValueType::Str => \"text\".into(),\n BasicValueType::Bool => \"boolean\".into(),\n BasicValueType::Int64 => \"bigint\".into(),\n BasicValueType::Float32 => \"real\".into(),\n BasicValueType::Float64 => \"double precision\".into(),\n BasicValueType::Range => \"int8range\".into(),\n BasicValueType::Uuid => \"uuid\".into(),\n BasicValueType::Date => \"date\".into(),\n BasicValueType::Time => \"time\".into(),\n BasicValueType::LocalDateTime => \"timestamp\".into(),\n BasicValueType::OffsetDateTime => \"timestamp with time zone\".into(),\n BasicValueType::TimeDelta => \"interval\".into(),\n BasicValueType::Json => \"jsonb\".into(),\n BasicValueType::Vector(vec_schema) => {\n if convertible_to_pgvector(vec_schema) {\n format!(\"vector({})\", vec_schema.dimension.unwrap_or(0))\n } else {\n \"jsonb\".into()\n }\n }\n BasicValueType::Union(_) => \"jsonb\".into(),\n },\n _ => \"jsonb\".into(),\n }\n}\n\nimpl<'a> From<&'a SetupState> for Cow<'a, TableColumnsSchema> {\n fn from(val: &'a SetupState) -> Self {\n Cow::Owned(TableColumnsSchema {\n key_columns: val\n .columns\n .key_columns\n .iter()\n .map(|(k, v)| (k.clone(), to_column_type_sql(v)))\n .collect(),\n value_columns: val\n .columns\n .value_columns\n .iter()\n .map(|(k, v)| (k.clone(), to_column_type_sql(v)))\n .collect(),\n })\n }\n}\n\n#[derive(Debug)]\npub struct TableSetupAction {\n table_action: TableMainSetupAction,\n indexes_to_delete: IndexSet,\n indexes_to_create: IndexMap,\n}\n\n#[derive(Debug)]\npub struct SetupStatus {\n create_pgvector_extension: bool,\n actions: TableSetupAction,\n vector_as_jsonb_columns: Vec<(String, ValueType)>,\n}\n\nimpl SetupStatus {\n fn new(desired_state: Option, existing: setup::CombinedState) -> Self {\n let table_action =\n TableMainSetupAction::from_states(desired_state.as_ref(), &existing, false);\n let vector_as_jsonb_columns = desired_state\n .as_ref()\n .iter()\n .flat_map(|s| {\n s.columns.value_columns.iter().filter_map(|(name, schema)| {\n if let ValueType::Basic(BasicValueType::Vector(vec_schema)) = schema\n && !convertible_to_pgvector(vec_schema)\n {\n let is_touched = match &table_action.table_upsertion {\n Some(TableUpsertionAction::Create { values, .. }) => {\n values.contains_key(name)\n }\n Some(TableUpsertionAction::Update {\n columns_to_upsert, ..\n }) => columns_to_upsert.contains_key(name),\n None => false,\n };\n if is_touched {\n Some((name.clone(), schema.clone()))\n } else {\n None\n }\n } else {\n None\n }\n })\n })\n .collect::>();\n let (indexes_to_delete, indexes_to_create) = desired_state\n .as_ref()\n .map(|desired| {\n (\n existing\n .possible_versions()\n .flat_map(|v| v.vector_indexes.keys())\n .filter(|index_name| !desired.vector_indexes.contains_key(*index_name))\n .cloned()\n .collect::>(),\n desired\n .vector_indexes\n .iter()\n .filter(|(name, def)| {\n !existing.always_exists()\n || existing\n .possible_versions()\n .any(|v| v.vector_indexes.get(*name) != Some(def))\n })\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect::>(),\n )\n })\n .unwrap_or_default();\n let create_pgvector_extension = desired_state\n .as_ref()\n .map(|s| s.uses_pgvector())\n .unwrap_or(false)\n && !existing.current.map(|s| s.uses_pgvector()).unwrap_or(false);\n\n Self {\n create_pgvector_extension,\n actions: TableSetupAction {\n table_action,\n indexes_to_delete,\n indexes_to_create,\n },\n vector_as_jsonb_columns,\n }\n }\n}\n\nfn to_vector_similarity_metric_sql(metric: VectorSimilarityMetric) -> &'static str {\n match metric {\n VectorSimilarityMetric::CosineSimilarity => \"vector_cosine_ops\",\n VectorSimilarityMetric::L2Distance => \"vector_l2_ops\",\n VectorSimilarityMetric::InnerProduct => \"vector_ip_ops\",\n }\n}\n\nfn to_index_spec_sql(index_spec: &VectorIndexDef) -> Cow<'static, str> {\n format!(\n \"USING hnsw ({} {})\",\n index_spec.field_name,\n to_vector_similarity_metric_sql(index_spec.metric)\n )\n .into()\n}\n\nfn to_vector_index_name(table_name: &str, vector_index_def: &spec::VectorIndexDef) -> String {\n format!(\n \"{}__{}__{}\",\n table_name,\n vector_index_def.field_name,\n to_vector_similarity_metric_sql(vector_index_def.metric)\n )\n}\n\nfn describe_index_spec(index_name: &str, index_spec: &VectorIndexDef) -> String {\n format!(\"{} {}\", index_name, to_index_spec_sql(index_spec))\n}\n\nimpl setup::ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut descriptions = self.actions.table_action.describe_changes();\n for (column_name, schema) in self.vector_as_jsonb_columns.iter() {\n descriptions.push(setup::ChangeDescription::Note(format!(\n \"Field `{}` has type `{}`. Only number vector with fixed size is supported by pgvector. It will be stored as `jsonb`.\",\n column_name,\n schema\n )));\n }\n if self.create_pgvector_extension {\n descriptions.push(setup::ChangeDescription::Action(\n \"Create pg_vector extension (if not exists)\".to_string(),\n ));\n }\n if !self.actions.indexes_to_delete.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Delete indexes from table: {}\",\n self.actions.indexes_to_delete.iter().join(\", \"),\n )));\n }\n if !self.actions.indexes_to_create.is_empty() {\n descriptions.push(setup::ChangeDescription::Action(format!(\n \"Create indexes in table: {}\",\n self.actions\n .indexes_to_create\n .iter()\n .map(|(index_name, index_spec)| describe_index_spec(index_name, index_spec))\n .join(\", \"),\n )));\n }\n descriptions\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n let has_other_update = !self.actions.indexes_to_create.is_empty()\n || !self.actions.indexes_to_delete.is_empty();\n self.actions.table_action.change_type(has_other_update)\n }\n}\n\nimpl SetupStatus {\n async fn apply_change(&self, db_pool: &PgPool, table_name: &str) -> Result<()> {\n if self.actions.table_action.drop_existing {\n sqlx::query(&format!(\"DROP TABLE IF EXISTS {table_name}\"))\n .execute(db_pool)\n .await?;\n }\n if self.create_pgvector_extension {\n sqlx::query(\"CREATE EXTENSION IF NOT EXISTS vector;\")\n .execute(db_pool)\n .await?;\n }\n for index_name in self.actions.indexes_to_delete.iter() {\n let sql = format!(\"DROP INDEX IF EXISTS {index_name}\");\n sqlx::query(&sql).execute(db_pool).await?;\n }\n if let Some(table_upsertion) = &self.actions.table_action.table_upsertion {\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n let mut fields = (keys\n .iter()\n .map(|(name, typ)| format!(\"\\\"{name}\\\" {typ} NOT NULL\")))\n .chain(values.iter().map(|(name, typ)| format!(\"\\\"{name}\\\" {typ}\")));\n let sql = format!(\n \"CREATE TABLE IF NOT EXISTS {table_name} ({}, PRIMARY KEY ({}))\",\n fields.join(\", \"),\n keys.keys().join(\", \")\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n for column_name in columns_to_delete.iter() {\n let sql = format!(\n \"ALTER TABLE {table_name} DROP COLUMN IF EXISTS \\\"{column_name}\\\"\",\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n for (column_name, column_type) in columns_to_upsert.iter() {\n let sql = format!(\n \"ALTER TABLE {table_name} DROP COLUMN IF EXISTS \\\"{column_name}\\\", ADD COLUMN \\\"{column_name}\\\" {column_type}\"\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n }\n }\n }\n for (index_name, index_spec) in self.actions.indexes_to_create.iter() {\n let sql = format!(\n \"CREATE INDEX IF NOT EXISTS {index_name} ON {table_name} {}\",\n to_index_spec_sql(index_spec)\n );\n sqlx::query(&sql).execute(db_pool).await?;\n }\n Ok(())\n }\n}\n\nasync fn get_db_pool(\n db_ref: Option<&spec::AuthEntryReference>,\n auth_registry: &AuthRegistry,\n) -> Result {\n let lib_context = get_lib_context()?;\n let db_conn_spec = db_ref\n .as_ref()\n .map(|db_ref| auth_registry.get(db_ref))\n .transpose()?;\n let db_pool = match db_conn_spec {\n Some(db_conn_spec) => lib_context.db_pools.get_pool(&db_conn_spec).await?,\n None => lib_context.require_builtin_db_pool()?.clone(),\n };\n Ok(db_pool)\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = ();\n type SetupState = SetupState;\n type SetupStatus = SetupStatus;\n type Key = TableId;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Postgres\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n _declarations: Vec<()>,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(TableId, SetupState)>,\n )> {\n let data_coll_output = data_collections\n .into_iter()\n .map(|d| {\n let table_id = TableId {\n database: d.spec.database.clone(),\n table_name: d.spec.table_name.unwrap_or_else(|| {\n utils::db::sanitize_identifier(&format!(\n \"{}__{}\",\n context.flow_instance_name, d.name\n ))\n }),\n };\n let setup_state = SetupState::new(\n &table_id,\n &d.key_fields_schema,\n &d.value_fields_schema,\n &d.index_options,\n );\n let table_name = table_id.table_name.clone();\n let db_ref = d.spec.database;\n let auth_registry = context.auth_registry.clone();\n let export_context = Box::pin(async move {\n let db_pool = get_db_pool(db_ref.as_ref(), &auth_registry).await?;\n let export_context = Arc::new(ExportContext::new(\n db_ref,\n db_pool.clone(),\n table_name,\n d.key_fields_schema,\n d.value_fields_schema,\n )?);\n Ok(export_context)\n });\n Ok(TypedExportDataCollectionBuildOutput {\n setup_key: table_id,\n desired_setup_state: setup_state,\n export_context,\n })\n })\n .collect::>>()?;\n Ok((data_coll_output, vec![]))\n }\n\n async fn check_setup_status(\n &self,\n _key: TableId,\n desired: Option,\n existing: setup::CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n Ok(SetupStatus::new(desired, existing))\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(check_table_compatibility(\n &desired.columns,\n &existing.columns,\n ))\n }\n\n fn describe_resource(&self, key: &TableId) -> Result {\n Ok(format!(\"Postgres table {}\", key.table_name))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut mut_groups_by_db_ref = HashMap::new();\n for mutation in mutations.iter() {\n mut_groups_by_db_ref\n .entry(mutation.export_context.db_ref.clone())\n .or_insert_with(Vec::new)\n .push(mutation);\n }\n for mut_groups in mut_groups_by_db_ref.values() {\n let db_pool = &mut_groups\n .first()\n .ok_or_else(|| anyhow!(\"empty group\"))?\n .export_context\n .db_pool;\n let mut txn = db_pool.begin().await?;\n for mut_group in mut_groups.iter() {\n mut_group\n .export_context\n .upsert(&mut_group.mutation.upserts, &mut txn)\n .await?;\n }\n for mut_group in mut_groups.iter() {\n mut_group\n .export_context\n .delete(&mut_group.mutation.deletes, &mut txn)\n .await?;\n }\n txn.commit().await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n for change in changes.iter() {\n let db_pool = get_db_pool(change.key.database.as_ref(), &context.auth_registry).await?;\n change\n .setup_status\n .apply_change(&db_pool, &change.key.table_name)\n .await?;\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/targets/kuzu.rs", "use chrono::TimeDelta;\nuse serde_json::json;\n\nuse std::fmt::Write;\n\nuse super::shared::property_graph::GraphElementMapping;\nuse super::shared::property_graph::*;\nuse super::shared::table_columns::{\n TableColumnsSchema, TableMainSetupAction, TableUpsertionAction, check_table_compatibility,\n};\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::prelude::*;\n\nuse crate::setup::SetupChangeType;\nuse crate::{ops::sdk::*, setup::CombinedState};\n\nconst SELF_CONTAINED_TAG_FIELD_NAME: &str = \"__self_contained\";\n\n////////////////////////////////////////////////////////////\n// Public Types\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n /// The URL of the [Kuzu API server](https://kuzu.com/docs/api/server/overview),\n /// e.g. `http://localhost:8000`.\n api_server_url: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n connection: spec::AuthEntryReference,\n mapping: GraphElementMapping,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Declaration {\n connection: spec::AuthEntryReference,\n #[serde(flatten)]\n decl: GraphDeclaration,\n}\n\n////////////////////////////////////////////////////////////\n// Utils to deal with Kuzu\n////////////////////////////////////////////////////////////\n\nstruct CypherBuilder {\n query: String,\n}\n\nimpl CypherBuilder {\n fn new() -> Self {\n Self {\n query: String::new(),\n }\n }\n\n fn query_mut(&mut self) -> &mut String {\n &mut self.query\n }\n}\n\nstruct KuzuThinClient {\n reqwest_client: reqwest::Client,\n query_url: String,\n}\n\nimpl KuzuThinClient {\n fn new(conn_spec: &ConnectionSpec, reqwest_client: reqwest::Client) -> Self {\n Self {\n reqwest_client,\n query_url: format!(\"{}/cypher\", conn_spec.api_server_url.trim_end_matches('/')),\n }\n }\n\n async fn run_cypher(&self, cyper_builder: CypherBuilder) -> Result<()> {\n if cyper_builder.query.is_empty() {\n return Ok(());\n }\n let query = json!({\n \"query\": cyper_builder.query\n });\n let response = self\n .reqwest_client\n .post(&self.query_url)\n .json(&query)\n .send()\n .await?;\n if !response.status().is_success() {\n return Err(anyhow::anyhow!(\n \"Failed to run cypher: {}\",\n response.text().await?\n ));\n }\n Ok(())\n }\n}\n\nfn kuzu_table_type(elem_type: &ElementType) -> &'static str {\n match elem_type {\n ElementType::Node(_) => \"NODE\",\n ElementType::Relationship(_) => \"REL\",\n }\n}\n\nfn basic_type_to_kuzu(basic_type: &BasicValueType) -> Result {\n Ok(match basic_type {\n BasicValueType::Bytes => \"BLOB\".to_string(),\n BasicValueType::Str => \"STRING\".to_string(),\n BasicValueType::Bool => \"BOOL\".to_string(),\n BasicValueType::Int64 => \"INT64\".to_string(),\n BasicValueType::Float32 => \"FLOAT\".to_string(),\n BasicValueType::Float64 => \"DOUBLE\".to_string(),\n BasicValueType::Range => \"UINT64[2]\".to_string(),\n BasicValueType::Uuid => \"UUID\".to_string(),\n BasicValueType::Date => \"DATE\".to_string(),\n BasicValueType::LocalDateTime => \"TIMESTAMP\".to_string(),\n BasicValueType::OffsetDateTime => \"TIMESTAMP\".to_string(),\n BasicValueType::TimeDelta => \"INTERVAL\".to_string(),\n BasicValueType::Vector(t) => format!(\n \"{}[{}]\",\n basic_type_to_kuzu(&t.element_type)?,\n t.dimension\n .map_or_else(|| \"\".to_string(), |d| d.to_string())\n ),\n t @ (BasicValueType::Union(_) | BasicValueType::Time | BasicValueType::Json) => {\n api_bail!(\"{t} is not supported in Kuzu\")\n }\n })\n}\n\nfn struct_schema_to_kuzu(struct_schema: &StructSchema) -> Result {\n Ok(format!(\n \"STRUCT({})\",\n struct_schema\n .fields\n .iter()\n .map(|f| Ok(format!(\n \"{} {}\",\n f.name,\n value_type_to_kuzu(&f.value_type.typ)?\n )))\n .collect::>>()?\n .join(\", \")\n ))\n}\n\nfn value_type_to_kuzu(value_type: &ValueType) -> Result {\n Ok(match value_type {\n ValueType::Basic(basic_type) => basic_type_to_kuzu(basic_type)?,\n ValueType::Struct(struct_type) => struct_schema_to_kuzu(struct_type)?,\n ValueType::Table(table_type) => format!(\"{}[]\", struct_schema_to_kuzu(&table_type.row)?),\n })\n}\n\n////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]\nstruct ReferencedNodeTable {\n table_name: String,\n\n #[serde(with = \"indexmap::map::serde_seq\")]\n key_columns: IndexMap,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\nstruct SetupState {\n schema: TableColumnsSchema,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n referenced_node_tables: Option<(ReferencedNodeTable, ReferencedNodeTable)>,\n}\n\nimpl<'a> From<&'a SetupState> for Cow<'a, TableColumnsSchema> {\n fn from(val: &'a SetupState) -> Self {\n Cow::Borrowed(&val.schema)\n }\n}\n\n#[derive(Debug)]\nstruct GraphElementDataSetupStatus {\n actions: TableMainSetupAction,\n referenced_node_tables: Option<(String, String)>,\n drop_affected_referenced_node_tables: IndexSet,\n}\n\nimpl setup::ResourceSetupStatus for GraphElementDataSetupStatus {\n fn describe_changes(&self) -> Vec {\n self.actions.describe_changes()\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.actions.change_type(false)\n }\n}\n\nfn append_drop_table(\n cypher: &mut CypherBuilder,\n setup_status: &GraphElementDataSetupStatus,\n elem_type: &ElementType,\n) -> Result<()> {\n if !setup_status.actions.drop_existing {\n return Ok(());\n }\n writeln!(\n cypher.query_mut(),\n \"DROP TABLE IF EXISTS {};\",\n elem_type.label()\n )?;\n Ok(())\n}\n\nfn append_delete_orphaned_nodes(cypher: &mut CypherBuilder, node_table: &str) -> Result<()> {\n writeln!(\n cypher.query_mut(),\n \"MATCH (n:{node_table}) WITH n WHERE NOT (n)--() DELETE n;\"\n )?;\n Ok(())\n}\n\nfn append_upsert_table(\n cypher: &mut CypherBuilder,\n setup_status: &GraphElementDataSetupStatus,\n elem_type: &ElementType,\n) -> Result<()> {\n let table_upsertion = if let Some(table_upsertion) = &setup_status.actions.table_upsertion {\n table_upsertion\n } else {\n return Ok(());\n };\n match table_upsertion {\n TableUpsertionAction::Create { keys, values } => {\n write!(\n cypher.query_mut(),\n \"CREATE {kuzu_table_type} TABLE IF NOT EXISTS {table_name} (\",\n kuzu_table_type = kuzu_table_type(elem_type),\n table_name = elem_type.label(),\n )?;\n if let Some((src, tgt)) = &setup_status.referenced_node_tables {\n write!(cypher.query_mut(), \"FROM {src} TO {tgt}, \")?;\n }\n cypher.query_mut().push_str(\n keys.iter()\n .chain(values.iter())\n .map(|(name, kuzu_type)| format!(\"{name} {kuzu_type}\"))\n .join(\", \")\n .as_str(),\n );\n match elem_type {\n ElementType::Node(_) => {\n write!(\n cypher.query_mut(),\n \", {SELF_CONTAINED_TAG_FIELD_NAME} BOOL, PRIMARY KEY ({})\",\n keys.iter().map(|(name, _)| name).join(\", \")\n )?;\n }\n ElementType::Relationship(_) => {}\n }\n write!(cypher.query_mut(), \");\\n\\n\")?;\n }\n TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n } => {\n let table_name = elem_type.label();\n for name in columns_to_delete\n .iter()\n .chain(columns_to_upsert.iter().map(|(name, _)| name))\n {\n writeln!(\n cypher.query_mut(),\n \"ALTER TABLE {table_name} DROP IF EXISTS {name};\"\n )?;\n }\n for (name, kuzu_type) in columns_to_upsert.iter() {\n writeln!(\n cypher.query_mut(),\n \"ALTER TABLE {table_name} ADD {name} {kuzu_type};\",\n )?;\n }\n }\n }\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Utils to convert value to Kuzu literals\n////////////////////////////////////////////////////////////\n\nfn append_string_literal(cypher: &mut CypherBuilder, s: &str) -> Result<()> {\n let out = cypher.query_mut();\n out.push('\"');\n for c in s.chars() {\n match c {\n '\\\\' => out.push_str(\"\\\\\\\\\"),\n '\"' => out.push_str(\"\\\\\\\"\"),\n // Control characters (0x00..=0x1F)\n c if (c as u32) < 0x20 => write!(out, \"\\\\u{:04X}\", c as u32)?,\n // BMP Unicode\n c if (c as u32) <= 0xFFFF => out.push(c),\n // Non-BMP Unicode: Encode as surrogate pairs for Cypher \\uXXXX\\uXXXX\n c => {\n let code = c as u32;\n let high = 0xD800 + ((code - 0x10000) >> 10);\n let low = 0xDC00 + ((code - 0x10000) & 0x3FF);\n write!(out, \"\\\\u{high:04X}\\\\u{low:04X}\")?;\n }\n }\n }\n out.push('\"');\n Ok(())\n}\n\nfn append_basic_value(cypher: &mut CypherBuilder, basic_value: &BasicValue) -> Result<()> {\n match basic_value {\n BasicValue::Bytes(bytes) => {\n write!(cypher.query_mut(), \"BLOB(\")?;\n for byte in bytes {\n write!(cypher.query_mut(), \"\\\\\\\\x{byte:02X}\")?;\n }\n write!(cypher.query_mut(), \")\")?;\n }\n BasicValue::Str(s) => {\n append_string_literal(cypher, s)?;\n }\n BasicValue::Bool(b) => {\n write!(cypher.query_mut(), \"{b}\")?;\n }\n BasicValue::Int64(i) => {\n write!(cypher.query_mut(), \"{i}\")?;\n }\n BasicValue::Float32(f) => {\n write!(cypher.query_mut(), \"{f}\")?;\n }\n BasicValue::Float64(f) => {\n write!(cypher.query_mut(), \"{f}\")?;\n }\n BasicValue::Range(r) => {\n write!(cypher.query_mut(), \"[{}, {}]\", r.start, r.end)?;\n }\n BasicValue::Uuid(u) => {\n write!(cypher.query_mut(), \"UUID(\\\"{u}\\\")\")?;\n }\n BasicValue::Date(d) => {\n write!(cypher.query_mut(), \"DATE(\\\"{d}\\\")\")?;\n }\n BasicValue::LocalDateTime(dt) => write!(cypher.query_mut(), \"TIMESTAMP(\\\"{dt}\\\")\")?,\n BasicValue::OffsetDateTime(dt) => write!(cypher.query_mut(), \"TIMESTAMP(\\\"{dt}\\\")\")?,\n BasicValue::TimeDelta(td) => {\n let num_days = td.num_days();\n let sub_day_duration = *td - TimeDelta::days(num_days);\n write!(cypher.query_mut(), \"INTERVAL(\\\"\")?;\n if num_days != 0 {\n write!(cypher.query_mut(), \"{num_days} days \")?;\n }\n let microseconds = sub_day_duration\n .num_microseconds()\n .ok_or_else(invariance_violation)?;\n write!(cypher.query_mut(), \"{microseconds} microseconds\\\")\")?;\n }\n BasicValue::Vector(v) => {\n write!(cypher.query_mut(), \"[\")?;\n let mut prefix = \"\";\n for elem in v.iter() {\n cypher.query_mut().push_str(prefix);\n append_basic_value(cypher, elem)?;\n prefix = \", \";\n }\n write!(cypher.query_mut(), \"]\")?;\n }\n v @ (BasicValue::UnionVariant { .. } | BasicValue::Time(_) | BasicValue::Json(_)) => {\n bail!(\"value types are not supported in Kuzu: {}\", v.kind());\n }\n }\n Ok(())\n}\n\nfn append_struct_fields<'a>(\n cypher: &'a mut CypherBuilder,\n field_schema: &[schema::FieldSchema],\n field_values: impl Iterator,\n) -> Result<()> {\n let mut prefix = \"\";\n for (f, v) in std::iter::zip(field_schema.iter(), field_values) {\n write!(cypher.query_mut(), \"{prefix}{}: \", f.name)?;\n append_value(cypher, &f.value_type.typ, v)?;\n prefix = \", \";\n }\n Ok(())\n}\n\nfn append_value(\n cypher: &mut CypherBuilder,\n typ: &schema::ValueType,\n value: &value::Value,\n) -> Result<()> {\n match value {\n value::Value::Null => {\n write!(cypher.query_mut(), \"NULL\")?;\n }\n value::Value::Basic(basic_value) => append_basic_value(cypher, basic_value)?,\n value::Value::Struct(struct_value) => {\n let struct_schema = match typ {\n schema::ValueType::Struct(struct_schema) => struct_schema,\n _ => {\n api_bail!(\"Expected struct type, got {}\", typ);\n }\n };\n cypher.query_mut().push('{');\n append_struct_fields(cypher, &struct_schema.fields, struct_value.fields.iter())?;\n cypher.query_mut().push('}');\n }\n value::Value::KTable(map) => {\n let row_schema = match typ {\n schema::ValueType::Table(table_schema) => &table_schema.row,\n _ => {\n api_bail!(\"Expected table type, got {}\", typ);\n }\n };\n cypher.query_mut().push('[');\n let mut prefix = \"\";\n for (k, v) in map.iter() {\n let key_value = value::Value::from(k);\n cypher.query_mut().push_str(prefix);\n cypher.query_mut().push('{');\n append_struct_fields(\n cypher,\n &row_schema.fields,\n std::iter::once(&key_value).chain(v.fields.iter()),\n )?;\n cypher.query_mut().push('}');\n prefix = \", \";\n }\n cypher.query_mut().push(']');\n }\n value::Value::LTable(rows) | value::Value::UTable(rows) => {\n let row_schema = match typ {\n schema::ValueType::Table(table_schema) => &table_schema.row,\n _ => {\n api_bail!(\"Expected table type, got {}\", typ);\n }\n };\n cypher.query_mut().push('[');\n let mut prefix = \"\";\n for v in rows.iter() {\n cypher.query_mut().push_str(prefix);\n cypher.query_mut().push('{');\n append_struct_fields(cypher, &row_schema.fields, v.fields.iter())?;\n cypher.query_mut().push('}');\n prefix = \", \";\n }\n cypher.query_mut().push(']');\n }\n }\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Deal with mutations\n////////////////////////////////////////////////////////////\n\nstruct ExportContext {\n conn_ref: AuthEntryReference,\n kuzu_client: KuzuThinClient,\n analyzed_data_coll: AnalyzedDataCollection,\n}\n\nfn append_key_pattern<'a>(\n cypher: &'a mut CypherBuilder,\n key_fields: &'a [FieldSchema],\n values: impl Iterator>,\n) -> Result<()> {\n write!(cypher.query_mut(), \"{{\")?;\n let mut prefix = \"\";\n for (f, v) in std::iter::zip(key_fields.iter(), values) {\n write!(cypher.query_mut(), \"{prefix}{}: \", f.name)?;\n append_value(cypher, &f.value_type.typ, v.as_ref())?;\n prefix = \", \";\n }\n write!(cypher.query_mut(), \"}}\")?;\n Ok(())\n}\n\nfn append_set_value_fields(\n cypher: &mut CypherBuilder,\n var_name: &str,\n value_fields: &[FieldSchema],\n value_fields_idx: &[usize],\n upsert_entry: &ExportTargetUpsertEntry,\n set_self_contained_tag: bool,\n) -> Result<()> {\n let mut prefix = \" SET \";\n if set_self_contained_tag {\n write!(\n cypher.query_mut(),\n \"{prefix}{var_name}.{SELF_CONTAINED_TAG_FIELD_NAME} = TRUE\"\n )?;\n prefix = \", \";\n }\n for (value_field, value_idx) in std::iter::zip(value_fields.iter(), value_fields_idx.iter()) {\n let field_name = &value_field.name;\n write!(cypher.query_mut(), \"{prefix}{var_name}.{field_name}=\")?;\n append_value(\n cypher,\n &value_field.value_type.typ,\n &upsert_entry.value.fields[*value_idx],\n )?;\n prefix = \", \";\n }\n Ok(())\n}\n\nfn append_upsert_node(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n {\n write!(\n cypher.query_mut(),\n \"MERGE ({NODE_VAR_NAME}:{label} \",\n label = data_coll.schema.elem_type.label(),\n )?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n upsert_entry\n .key\n .fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n write!(cypher.query_mut(), \")\")?;\n }\n append_set_value_fields(\n cypher,\n NODE_VAR_NAME,\n &data_coll.schema.value_fields,\n &data_coll.value_fields_input_idx,\n upsert_entry,\n true,\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_merge_node_for_rel(\n cypher: &mut CypherBuilder,\n var_name: &str,\n field_mapping: &AnalyzedGraphElementFieldMapping,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n {\n write!(\n cypher.query_mut(),\n \"MERGE ({var_name}:{label} \",\n label = field_mapping.schema.elem_type.label(),\n )?;\n append_key_pattern(\n cypher,\n &field_mapping.schema.key_fields,\n field_mapping\n .fields_input_idx\n .key\n .iter()\n .map(|idx| Cow::Borrowed(&upsert_entry.value.fields[*idx])),\n )?;\n write!(cypher.query_mut(), \")\")?;\n }\n append_set_value_fields(\n cypher,\n var_name,\n &field_mapping.schema.value_fields,\n &field_mapping.fields_input_idx.value,\n upsert_entry,\n false,\n )?;\n writeln!(cypher.query_mut())?;\n Ok(())\n}\n\nfn append_upsert_rel(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n upsert_entry: &ExportTargetUpsertEntry,\n) -> Result<()> {\n const REL_VAR_NAME: &str = \"r\";\n const SRC_NODE_VAR_NAME: &str = \"s\";\n const TGT_NODE_VAR_NAME: &str = \"t\";\n\n let rel_info = if let Some(rel_info) = &data_coll.rel {\n rel_info\n } else {\n return Ok(());\n };\n append_merge_node_for_rel(cypher, SRC_NODE_VAR_NAME, &rel_info.source, upsert_entry)?;\n append_merge_node_for_rel(cypher, TGT_NODE_VAR_NAME, &rel_info.target, upsert_entry)?;\n {\n let rel_type = data_coll.schema.elem_type.label();\n write!(\n cypher.query_mut(),\n \"MERGE ({SRC_NODE_VAR_NAME})-[{REL_VAR_NAME}:{rel_type} \"\n )?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n upsert_entry\n .key\n .fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n write!(cypher.query_mut(), \"]->({TGT_NODE_VAR_NAME})\")?;\n }\n append_set_value_fields(\n cypher,\n REL_VAR_NAME,\n &data_coll.schema.value_fields,\n &data_coll.value_fields_input_idx,\n upsert_entry,\n false,\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_delete_node(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n key: &KeyValue,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n let node_label = data_coll.schema.elem_type.label();\n write!(cypher.query_mut(), \"MATCH ({NODE_VAR_NAME}:{node_label} \")?;\n append_key_pattern(\n cypher,\n &data_coll.schema.key_fields,\n key.fields_iter(data_coll.schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n writeln!(cypher.query_mut(), \")\")?;\n writeln!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} SET {NODE_VAR_NAME}.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\"\n )?;\n writeln!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} WHERE NOT ({NODE_VAR_NAME})--() DELETE {NODE_VAR_NAME}\"\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_delete_rel(\n cypher: &mut CypherBuilder,\n data_coll: &AnalyzedDataCollection,\n key: &KeyValue,\n src_node_key: &KeyValue,\n tgt_node_key: &KeyValue,\n) -> Result<()> {\n const REL_VAR_NAME: &str = \"r\";\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n let rel_type = data_coll.schema.elem_type.label();\n\n write!(\n cypher.query_mut(),\n \"MATCH (:{label} \",\n label = rel.source.schema.elem_type.label()\n )?;\n let src_key_schema = &rel.source.schema.key_fields;\n append_key_pattern(\n cypher,\n src_key_schema,\n src_node_key\n .fields_iter(src_key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n\n write!(cypher.query_mut(), \")-[{REL_VAR_NAME}:{rel_type} \")?;\n let key_schema = &data_coll.schema.key_fields;\n append_key_pattern(\n cypher,\n key_schema,\n key.fields_iter(key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n\n write!(\n cypher.query_mut(),\n \"]->(:{label} \",\n label = rel.target.schema.elem_type.label()\n )?;\n let tgt_key_schema = &rel.target.schema.key_fields;\n append_key_pattern(\n cypher,\n tgt_key_schema,\n tgt_node_key\n .fields_iter(tgt_key_schema.len())?\n .map(|k| Cow::Owned(value::Value::from(k))),\n )?;\n write!(cypher.query_mut(), \") DELETE {REL_VAR_NAME}\")?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\nfn append_maybe_gc_node(\n cypher: &mut CypherBuilder,\n schema: &GraphElementSchema,\n key: &KeyValue,\n) -> Result<()> {\n const NODE_VAR_NAME: &str = \"n\";\n let node_label = schema.elem_type.label();\n write!(cypher.query_mut(), \"MATCH ({NODE_VAR_NAME}:{node_label} \")?;\n append_key_pattern(\n cypher,\n &schema.key_fields,\n key.fields_iter(schema.key_fields.len())?\n .map(|f| Cow::Owned(value::Value::from(f))),\n )?;\n writeln!(cypher.query_mut(), \")\")?;\n write!(\n cypher.query_mut(),\n \"WITH {NODE_VAR_NAME} WHERE NOT ({NODE_VAR_NAME})--() DELETE {NODE_VAR_NAME}\"\n )?;\n writeln!(cypher.query_mut(), \";\")?;\n Ok(())\n}\n\n////////////////////////////////////////////////////////////\n// Factory implementation\n////////////////////////////////////////////////////////////\n\ntype KuzuGraphElement = GraphElementType;\n\nstruct Factory {\n reqwest_client: reqwest::Client,\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = Declaration;\n type SetupState = SetupState;\n type SetupStatus = GraphElementDataSetupStatus;\n\n type Key = KuzuGraphElement;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Kuzu\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(KuzuGraphElement, SetupState)>,\n )> {\n let (analyzed_data_colls, declared_graph_elements) = analyze_graph_mappings(\n data_collections\n .iter()\n .map(|d| DataCollectionGraphMappingInput {\n auth_ref: &d.spec.connection,\n mapping: &d.spec.mapping,\n index_options: &d.index_options,\n key_fields_schema: d.key_fields_schema.clone(),\n value_fields_schema: d.value_fields_schema.clone(),\n }),\n declarations.iter().map(|d| (&d.connection, &d.decl)),\n )?;\n fn to_kuzu_cols(fields: &[FieldSchema]) -> Result> {\n fields\n .iter()\n .map(|f| Ok((f.name.clone(), value_type_to_kuzu(&f.value_type.typ)?)))\n .collect::>>()\n }\n let data_coll_outputs: Vec> =\n std::iter::zip(data_collections, analyzed_data_colls.into_iter())\n .map(|(data_coll, analyzed)| {\n fn to_dep_table(\n field_mapping: &AnalyzedGraphElementFieldMapping,\n ) -> Result {\n Ok(ReferencedNodeTable {\n table_name: field_mapping.schema.elem_type.label().to_string(),\n key_columns: to_kuzu_cols(&field_mapping.schema.key_fields)?,\n })\n }\n let setup_key = KuzuGraphElement {\n connection: data_coll.spec.connection.clone(),\n typ: analyzed.schema.elem_type.clone(),\n };\n let desired_setup_state = SetupState {\n schema: TableColumnsSchema {\n key_columns: to_kuzu_cols(&analyzed.schema.key_fields)?,\n value_columns: to_kuzu_cols(&analyzed.schema.value_fields)?,\n },\n referenced_node_tables: (analyzed.rel.as_ref())\n .map(|rel| {\n anyhow::Ok((to_dep_table(&rel.source)?, to_dep_table(&rel.target)?))\n })\n .transpose()?,\n };\n\n let export_context = ExportContext {\n conn_ref: data_coll.spec.connection.clone(),\n kuzu_client: KuzuThinClient::new(\n &context\n .auth_registry\n .get::(&data_coll.spec.connection)?,\n self.reqwest_client.clone(),\n ),\n analyzed_data_coll: analyzed,\n };\n Ok(TypedExportDataCollectionBuildOutput {\n export_context: async move { Ok(Arc::new(export_context)) }.boxed(),\n setup_key,\n desired_setup_state,\n })\n })\n .collect::>()?;\n let decl_output = std::iter::zip(declarations, declared_graph_elements)\n .map(|(decl, graph_elem_schema)| {\n let setup_state = SetupState {\n schema: TableColumnsSchema {\n key_columns: to_kuzu_cols(&graph_elem_schema.key_fields)?,\n value_columns: to_kuzu_cols(&graph_elem_schema.value_fields)?,\n },\n referenced_node_tables: None,\n };\n let setup_key = GraphElementType {\n connection: decl.connection,\n typ: graph_elem_schema.elem_type.clone(),\n };\n Ok((setup_key, setup_state))\n })\n .collect::>()?;\n Ok((data_coll_outputs, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n _key: KuzuGraphElement,\n desired: Option,\n existing: CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n let existing_invalidated = desired.as_ref().is_some_and(|desired| {\n existing\n .possible_versions()\n .any(|v| v.referenced_node_tables != desired.referenced_node_tables)\n });\n let actions =\n TableMainSetupAction::from_states(desired.as_ref(), &existing, existing_invalidated);\n let drop_affected_referenced_node_tables = if actions.drop_existing {\n existing\n .possible_versions()\n .flat_map(|v| &v.referenced_node_tables)\n .flat_map(|(src, tgt)| [src.table_name.clone(), tgt.table_name.clone()].into_iter())\n .collect()\n } else {\n IndexSet::new()\n };\n Ok(GraphElementDataSetupStatus {\n actions,\n referenced_node_tables: desired\n .and_then(|desired| desired.referenced_node_tables)\n .map(|(src, tgt)| (src.table_name, tgt.table_name)),\n drop_affected_referenced_node_tables,\n })\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(\n if desired.referenced_node_tables != existing.referenced_node_tables {\n SetupStateCompatibility::NotCompatible\n } else {\n check_table_compatibility(&desired.schema, &existing.schema)\n },\n )\n }\n\n fn describe_resource(&self, key: &KuzuGraphElement) -> Result {\n Ok(format!(\n \"Kuzu {} TABLE {}\",\n kuzu_table_type(&key.typ),\n key.typ.label()\n ))\n }\n\n fn extract_additional_key(\n &self,\n _key: &KeyValue,\n value: &FieldValues,\n export_context: &ExportContext,\n ) -> Result {\n let additional_key = if let Some(rel_info) = &export_context.analyzed_data_coll.rel {\n serde_json::to_value((\n (rel_info.source.fields_input_idx).extract_key(&value.fields)?,\n (rel_info.target.fields_input_idx).extract_key(&value.fields)?,\n ))?\n } else {\n serde_json::Value::Null\n };\n Ok(additional_key)\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut mutations_by_conn = IndexMap::new();\n for mutation in mutations.into_iter() {\n mutations_by_conn\n .entry(mutation.export_context.conn_ref.clone())\n .or_insert_with(Vec::new)\n .push(mutation);\n }\n for mutations in mutations_by_conn.into_values() {\n let kuzu_client = &mutations[0].export_context.kuzu_client;\n let mut cypher = CypherBuilder::new();\n writeln!(cypher.query_mut(), \"BEGIN TRANSACTION;\")?;\n\n let (mut rel_mutations, nodes_mutations): (Vec<_>, Vec<_>) = mutations\n .into_iter()\n .partition(|m| m.export_context.analyzed_data_coll.rel.is_some());\n\n struct NodeTableGcInfo {\n schema: Arc,\n keys: IndexSet,\n }\n fn register_gc_node(\n map: &mut IndexMap,\n schema: &Arc,\n key: KeyValue,\n ) {\n map.entry(schema.elem_type.clone())\n .or_insert_with(|| NodeTableGcInfo {\n schema: schema.clone(),\n keys: IndexSet::new(),\n })\n .keys\n .insert(key);\n }\n fn resolve_gc_node(\n map: &mut IndexMap,\n schema: &Arc,\n key: &KeyValue,\n ) {\n map.get_mut(&schema.elem_type)\n .map(|info| info.keys.shift_remove(key));\n }\n let mut gc_info = IndexMap::::new();\n\n // Deletes for relationships\n for rel_mutation in rel_mutations.iter_mut() {\n let data_coll = &rel_mutation.export_context.analyzed_data_coll;\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n for delete in rel_mutation.mutation.deletes.iter_mut() {\n let mut additional_keys = match delete.additional_key.take() {\n serde_json::Value::Array(keys) => keys,\n _ => return Err(invariance_violation()),\n };\n if additional_keys.len() != 2 {\n api_bail!(\n \"Expected additional key with 2 fields, got {}\",\n delete.additional_key\n );\n }\n let src_key = KeyValue::from_json(\n additional_keys[0].take(),\n &rel.source.schema.key_fields,\n )?;\n let tgt_key = KeyValue::from_json(\n additional_keys[1].take(),\n &rel.target.schema.key_fields,\n )?;\n append_delete_rel(&mut cypher, data_coll, &delete.key, &src_key, &tgt_key)?;\n register_gc_node(&mut gc_info, &rel.source.schema, src_key);\n register_gc_node(&mut gc_info, &rel.target.schema, tgt_key);\n }\n }\n\n for node_mutation in nodes_mutations.iter() {\n let data_coll = &node_mutation.export_context.analyzed_data_coll;\n // Deletes for nodes\n for delete in node_mutation.mutation.deletes.iter() {\n append_delete_node(&mut cypher, data_coll, &delete.key)?;\n resolve_gc_node(&mut gc_info, &data_coll.schema, &delete.key);\n }\n\n // Upserts for nodes\n for upsert in node_mutation.mutation.upserts.iter() {\n append_upsert_node(&mut cypher, data_coll, upsert)?;\n resolve_gc_node(&mut gc_info, &data_coll.schema, &upsert.key);\n }\n }\n // Upserts for relationships\n for rel_mutation in rel_mutations.iter() {\n let data_coll = &rel_mutation.export_context.analyzed_data_coll;\n for upsert in rel_mutation.mutation.upserts.iter() {\n append_upsert_rel(&mut cypher, data_coll, upsert)?;\n\n let rel = data_coll.rel.as_ref().ok_or_else(invariance_violation)?;\n resolve_gc_node(\n &mut gc_info,\n &rel.source.schema,\n &(rel.source.fields_input_idx).extract_key(&upsert.value.fields)?,\n );\n resolve_gc_node(\n &mut gc_info,\n &rel.target.schema,\n &(rel.target.fields_input_idx).extract_key(&upsert.value.fields)?,\n );\n }\n }\n\n // GC orphaned nodes\n for info in gc_info.into_values() {\n for key in info.keys {\n append_maybe_gc_node(&mut cypher, &info.schema, &key)?;\n }\n }\n\n writeln!(cypher.query_mut(), \"COMMIT;\")?;\n kuzu_client.run_cypher(cypher).await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n let mut changes_by_conn = IndexMap::new();\n for change in changes.into_iter() {\n changes_by_conn\n .entry(change.key.connection.clone())\n .or_insert_with(Vec::new)\n .push(change);\n }\n for (conn, changes) in changes_by_conn.into_iter() {\n let conn_spec = context.auth_registry.get::(&conn)?;\n let kuzu_client = KuzuThinClient::new(&conn_spec, self.reqwest_client.clone());\n\n let (node_changes, rel_changes): (Vec<_>, Vec<_>) =\n changes.into_iter().partition(|c| match &c.key.typ {\n ElementType::Node(_) => true,\n ElementType::Relationship(_) => false,\n });\n\n let mut partial_affected_node_tables = IndexSet::new();\n let mut cypher = CypherBuilder::new();\n // Relationships first when dropping.\n for change in rel_changes.iter().chain(node_changes.iter()) {\n if !change.setup_status.actions.drop_existing {\n continue;\n }\n append_drop_table(&mut cypher, change.setup_status, &change.key.typ)?;\n\n partial_affected_node_tables.extend(\n change\n .setup_status\n .drop_affected_referenced_node_tables\n .iter(),\n );\n if let ElementType::Node(label) = &change.key.typ {\n partial_affected_node_tables.swap_remove(label);\n }\n }\n // Nodes first when creating.\n for change in node_changes.iter().chain(rel_changes.iter()) {\n append_upsert_table(&mut cypher, change.setup_status, &change.key.typ)?;\n }\n\n for table in partial_affected_node_tables {\n append_delete_orphaned_nodes(&mut cypher, table)?;\n }\n\n kuzu_client.run_cypher(cypher).await?;\n }\n Ok(())\n }\n}\n\npub fn register(\n registry: &mut ExecutorFactoryRegistry,\n reqwest_client: reqwest::Client,\n) -> Result<()> {\n Factory { reqwest_client }.register(registry)\n}\n"], ["/cocoindex/src/setup/components.rs", "use super::{CombinedState, ResourceSetupStatus, SetupChangeType, StateChange};\nuse crate::prelude::*;\nuse std::fmt::Debug;\n\npub trait State: Debug + Send + Sync {\n fn key(&self) -> Key;\n}\n\n#[async_trait]\npub trait SetupOperator: 'static + Send + Sync {\n type Key: Debug + Hash + Eq + Clone + Send + Sync;\n type State: State;\n type SetupState: Send + Sync + IntoIterator;\n type Context: Sync;\n\n fn describe_key(&self, key: &Self::Key) -> String;\n\n fn describe_state(&self, state: &Self::State) -> String;\n\n fn is_up_to_date(&self, current: &Self::State, desired: &Self::State) -> bool;\n\n async fn create(&self, state: &Self::State, context: &Self::Context) -> Result<()>;\n\n async fn delete(&self, key: &Self::Key, context: &Self::Context) -> Result<()>;\n\n async fn update(&self, state: &Self::State, context: &Self::Context) -> Result<()> {\n self.delete(&state.key(), context).await?;\n self.create(state, context).await\n }\n}\n\n#[derive(Debug)]\nstruct CompositeStateUpsert {\n state: S,\n already_exists: bool,\n}\n\n#[derive(Derivative)]\n#[derivative(Debug)]\npub struct SetupStatus {\n #[derivative(Debug = \"ignore\")]\n desc: D,\n keys_to_delete: IndexSet,\n states_to_upsert: Vec>,\n}\n\nimpl SetupStatus {\n pub fn create(\n desc: D,\n desired: Option,\n existing: CombinedState,\n ) -> Result {\n let existing_component_states = CombinedState {\n current: existing.current.map(|s| {\n s.into_iter()\n .map(|s| (s.key(), s))\n .collect::>()\n }),\n staging: existing\n .staging\n .into_iter()\n .map(|s| match s {\n StateChange::Delete => StateChange::Delete,\n StateChange::Upsert(s) => {\n StateChange::Upsert(s.into_iter().map(|s| (s.key(), s)).collect())\n }\n })\n .collect(),\n legacy_state_key: existing.legacy_state_key,\n };\n let mut keys_to_delete = IndexSet::new();\n let mut states_to_upsert = vec![];\n\n // Collect all existing component keys\n for c in existing_component_states.possible_versions() {\n keys_to_delete.extend(c.keys().cloned());\n }\n\n if let Some(desired_state) = desired {\n for desired_comp_state in desired_state {\n let key = desired_comp_state.key();\n\n // Remove keys that should be kept from deletion list\n keys_to_delete.shift_remove(&key);\n\n // Add components that need to be updated\n let is_up_to_date = existing_component_states.always_exists()\n && existing_component_states.possible_versions().all(|v| {\n v.get(&key)\n .is_some_and(|s| desc.is_up_to_date(s, &desired_comp_state))\n });\n if !is_up_to_date {\n let already_exists = existing_component_states\n .possible_versions()\n .any(|v| v.contains_key(&key));\n states_to_upsert.push(CompositeStateUpsert {\n state: desired_comp_state,\n already_exists,\n });\n }\n }\n }\n\n Ok(Self {\n desc,\n keys_to_delete,\n states_to_upsert,\n })\n }\n}\n\nimpl ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n\n for key in &self.keys_to_delete {\n result.push(setup::ChangeDescription::Action(format!(\n \"Delete {}\",\n self.desc.describe_key(key)\n )));\n }\n\n for state in &self.states_to_upsert {\n result.push(setup::ChangeDescription::Action(format!(\n \"{} {}\",\n if state.already_exists {\n \"Update\"\n } else {\n \"Create\"\n },\n self.desc.describe_state(&state.state)\n )));\n }\n\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n if self.keys_to_delete.is_empty() && self.states_to_upsert.is_empty() {\n SetupChangeType::NoChange\n } else if self.keys_to_delete.is_empty() {\n SetupChangeType::Create\n } else if self.states_to_upsert.is_empty() {\n SetupChangeType::Delete\n } else {\n SetupChangeType::Update\n }\n }\n}\n\npub async fn apply_component_changes(\n changes: Vec<&SetupStatus>,\n context: &D::Context,\n) -> Result<()> {\n // First delete components that need to be removed\n for change in changes.iter() {\n for key in &change.keys_to_delete {\n change.desc.delete(key, context).await?;\n }\n }\n\n // Then upsert components that need to be updated\n for change in changes.iter() {\n for state in &change.states_to_upsert {\n if state.already_exists {\n change.desc.update(&state.state, context).await?;\n } else {\n change.desc.create(&state.state, context).await?;\n }\n }\n }\n\n Ok(())\n}\n\nimpl ResourceSetupStatus for (A, B) {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n result.extend(self.0.describe_changes());\n result.extend(self.1.describe_changes());\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n match (self.0.change_type(), self.1.change_type()) {\n (SetupChangeType::Invalid, _) | (_, SetupChangeType::Invalid) => {\n SetupChangeType::Invalid\n }\n (SetupChangeType::NoChange, b) => b,\n (a, _) => a,\n }\n }\n}\n"], ["/cocoindex/src/ops/targets/neo4j.rs", "use crate::prelude::*;\n\nuse super::shared::property_graph::*;\n\nuse crate::setup::components::{self, State, apply_component_changes};\nuse crate::setup::{ResourceSetupStatus, SetupChangeType};\nuse crate::{ops::sdk::*, setup::CombinedState};\n\nuse indoc::formatdoc;\nuse neo4rs::{BoltType, ConfigBuilder, Graph};\nuse std::fmt::Write;\nuse tokio::sync::OnceCell;\n\nconst DEFAULT_DB: &str = \"neo4j\";\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n uri: String,\n user: String,\n password: String,\n db: Option,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n connection: spec::AuthEntryReference,\n mapping: GraphElementMapping,\n}\n\n#[derive(Debug, Deserialize)]\npub struct Declaration {\n connection: spec::AuthEntryReference,\n #[serde(flatten)]\n decl: GraphDeclaration,\n}\n\ntype Neo4jGraphElement = GraphElementType;\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct GraphKey {\n uri: String,\n db: String,\n}\n\nimpl GraphKey {\n fn from_spec(spec: &ConnectionSpec) -> Self {\n Self {\n uri: spec.uri.clone(),\n db: spec.db.clone().unwrap_or_else(|| DEFAULT_DB.to_string()),\n }\n }\n}\n\nimpl retryable::IsRetryable for neo4rs::Error {\n fn is_retryable(&self) -> bool {\n match self {\n neo4rs::Error::ConnectionError => true,\n neo4rs::Error::Neo4j(e) => e.kind() == neo4rs::Neo4jErrorKind::Transient,\n _ => false,\n }\n }\n}\n\n#[derive(Default)]\npub struct GraphPool {\n graphs: Mutex>>>>,\n}\n\nimpl GraphPool {\n async fn get_graph(&self, spec: &ConnectionSpec) -> Result> {\n let graph_key = GraphKey::from_spec(spec);\n let cell = {\n let mut graphs = self.graphs.lock().unwrap();\n graphs.entry(graph_key).or_default().clone()\n };\n let graph = cell\n .get_or_try_init(|| async {\n let mut config_builder = ConfigBuilder::default()\n .uri(spec.uri.clone())\n .user(spec.user.clone())\n .password(spec.password.clone());\n if let Some(db) = &spec.db {\n config_builder = config_builder.db(db.clone());\n }\n anyhow::Ok(Arc::new(Graph::connect(config_builder.build()?).await?))\n })\n .await?;\n Ok(graph.clone())\n }\n\n async fn get_graph_for_key(\n &self,\n key: &Neo4jGraphElement,\n auth_registry: &AuthRegistry,\n ) -> Result> {\n let spec = auth_registry.get::(&key.connection)?;\n self.get_graph(&spec).await\n }\n}\n\npub struct ExportContext {\n connection_ref: AuthEntryReference,\n graph: Arc,\n\n create_order: u8,\n\n delete_cypher: String,\n insert_cypher: String,\n delete_before_upsert: bool,\n\n analyzed_data_coll: AnalyzedDataCollection,\n\n key_field_params: Vec,\n src_key_field_params: Vec,\n tgt_key_field_params: Vec,\n}\n\nfn json_value_to_bolt_value(value: &serde_json::Value) -> Result {\n let bolt_value = match value {\n serde_json::Value::Null => BoltType::Null(neo4rs::BoltNull),\n serde_json::Value::Bool(v) => BoltType::Boolean(neo4rs::BoltBoolean::new(*v)),\n serde_json::Value::Number(v) => {\n if let Some(i) = v.as_i64() {\n BoltType::Integer(neo4rs::BoltInteger::new(i))\n } else if let Some(f) = v.as_f64() {\n BoltType::Float(neo4rs::BoltFloat::new(f))\n } else {\n anyhow::bail!(\"Unsupported JSON number: {}\", v)\n }\n }\n serde_json::Value::String(v) => BoltType::String(neo4rs::BoltString::new(v)),\n serde_json::Value::Array(v) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(json_value_to_bolt_value)\n .collect::>()?,\n }),\n serde_json::Value::Object(v) => BoltType::Map(neo4rs::BoltMap {\n value: v\n .into_iter()\n .map(|(k, v)| Ok((neo4rs::BoltString::new(k), json_value_to_bolt_value(v)?)))\n .collect::>()?,\n }),\n };\n Ok(bolt_value)\n}\n\nfn key_to_bolt(key: &KeyValue, schema: &schema::ValueType) -> Result {\n value_to_bolt(&key.into(), schema)\n}\n\nfn field_values_to_bolt<'a>(\n field_values: impl IntoIterator,\n schema: impl IntoIterator,\n) -> Result {\n let bolt_value = BoltType::Map(neo4rs::BoltMap {\n value: std::iter::zip(schema, field_values)\n .map(|(schema, value)| {\n Ok((\n neo4rs::BoltString::new(&schema.name),\n value_to_bolt(value, &schema.value_type.typ)?,\n ))\n })\n .collect::>()?,\n });\n Ok(bolt_value)\n}\n\nfn mapped_field_values_to_bolt(\n fields_schema: &[schema::FieldSchema],\n fields_input_idx: &[usize],\n field_values: &FieldValues,\n) -> Result {\n let bolt_value = BoltType::Map(neo4rs::BoltMap {\n value: std::iter::zip(fields_schema.iter(), fields_input_idx.iter())\n .map(|(schema, field_idx)| {\n Ok((\n neo4rs::BoltString::new(&schema.name),\n value_to_bolt(&field_values.fields[*field_idx], &schema.value_type.typ)?,\n ))\n })\n .collect::>()?,\n });\n Ok(bolt_value)\n}\n\nfn basic_value_to_bolt(value: &BasicValue, schema: &BasicValueType) -> Result {\n let bolt_value = match value {\n BasicValue::Bytes(v) => {\n BoltType::Bytes(neo4rs::BoltBytes::new(bytes::Bytes::from_owner(v.clone())))\n }\n BasicValue::Str(v) => BoltType::String(neo4rs::BoltString::new(v)),\n BasicValue::Bool(v) => BoltType::Boolean(neo4rs::BoltBoolean::new(*v)),\n BasicValue::Int64(v) => BoltType::Integer(neo4rs::BoltInteger::new(*v)),\n BasicValue::Float64(v) => BoltType::Float(neo4rs::BoltFloat::new(*v)),\n BasicValue::Float32(v) => BoltType::Float(neo4rs::BoltFloat::new(*v as f64)),\n BasicValue::Range(v) => BoltType::List(neo4rs::BoltList {\n value: [\n BoltType::Integer(neo4rs::BoltInteger::new(v.start as i64)),\n BoltType::Integer(neo4rs::BoltInteger::new(v.end as i64)),\n ]\n .into(),\n }),\n BasicValue::Uuid(v) => BoltType::String(neo4rs::BoltString::new(&v.to_string())),\n BasicValue::Date(v) => BoltType::Date(neo4rs::BoltDate::from(*v)),\n BasicValue::Time(v) => BoltType::LocalTime(neo4rs::BoltLocalTime::from(*v)),\n BasicValue::LocalDateTime(v) => {\n BoltType::LocalDateTime(neo4rs::BoltLocalDateTime::from(*v))\n }\n BasicValue::OffsetDateTime(v) => BoltType::DateTime(neo4rs::BoltDateTime::from(*v)),\n BasicValue::TimeDelta(v) => BoltType::Duration(neo4rs::BoltDuration::new(\n neo4rs::BoltInteger { value: 0 },\n neo4rs::BoltInteger { value: 0 },\n neo4rs::BoltInteger {\n value: v.num_seconds(),\n },\n v.subsec_nanos().into(),\n )),\n BasicValue::Vector(v) => match schema {\n BasicValueType::Vector(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|v| basic_value_to_bolt(v, &t.element_type))\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-vector type got vector value: {}\", schema),\n },\n BasicValue::Json(v) => json_value_to_bolt_value(v)?,\n BasicValue::UnionVariant { tag_id, value } => match schema {\n BasicValueType::Union(s) => {\n let typ = s\n .types\n .get(*tag_id)\n .ok_or_else(|| anyhow::anyhow!(\"Invalid `tag_id`: {}\", tag_id))?;\n\n basic_value_to_bolt(value, typ)?\n }\n _ => anyhow::bail!(\"Non-union type got union value: {}\", schema),\n },\n };\n Ok(bolt_value)\n}\n\nfn value_to_bolt(value: &Value, schema: &schema::ValueType) -> Result {\n let bolt_value = match value {\n Value::Null => BoltType::Null(neo4rs::BoltNull),\n Value::Basic(v) => match schema {\n ValueType::Basic(t) => basic_value_to_bolt(v, t)?,\n _ => anyhow::bail!(\"Non-basic type got basic value: {}\", schema),\n },\n Value::Struct(v) => match schema {\n ValueType::Struct(t) => field_values_to_bolt(v.fields.iter(), t.fields.iter())?,\n _ => anyhow::bail!(\"Non-struct type got struct value: {}\", schema),\n },\n Value::UTable(v) | Value::LTable(v) => match schema {\n ValueType::Table(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|v| field_values_to_bolt(v.0.fields.iter(), t.row.fields.iter()))\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-table type got table value: {}\", schema),\n },\n Value::KTable(v) => match schema {\n ValueType::Table(t) => BoltType::List(neo4rs::BoltList {\n value: v\n .iter()\n .map(|(k, v)| {\n field_values_to_bolt(\n std::iter::once(&Into::::into(k.clone()))\n .chain(v.0.fields.iter()),\n t.row.fields.iter(),\n )\n })\n .collect::>()?,\n }),\n _ => anyhow::bail!(\"Non-table type got table value: {}\", schema),\n },\n };\n Ok(bolt_value)\n}\n\nconst CORE_KEY_PARAM_PREFIX: &str = \"key\";\nconst CORE_PROPS_PARAM: &str = \"props\";\nconst SRC_KEY_PARAM_PREFIX: &str = \"source_key\";\nconst SRC_PROPS_PARAM: &str = \"source_props\";\nconst TGT_KEY_PARAM_PREFIX: &str = \"target_key\";\nconst TGT_PROPS_PARAM: &str = \"target_props\";\nconst CORE_ELEMENT_MATCHER_VAR: &str = \"e\";\nconst SELF_CONTAINED_TAG_FIELD_NAME: &str = \"__self_contained\";\n\nimpl ExportContext {\n fn build_key_field_params_n_literal<'a>(\n param_prefix: &str,\n key_fields: impl Iterator,\n ) -> (Vec, String) {\n let (params, items): (Vec, Vec) = key_fields\n .into_iter()\n .enumerate()\n .map(|(i, name)| {\n let param = format!(\"{param_prefix}_{i}\");\n let item = format!(\"{name}: ${param}\");\n (param, item)\n })\n .unzip();\n (params, format!(\"{{{}}}\", items.into_iter().join(\", \")))\n }\n\n fn new(\n graph: Arc,\n spec: Spec,\n analyzed_data_coll: AnalyzedDataCollection,\n ) -> Result {\n let (key_field_params, key_fields_literal) = Self::build_key_field_params_n_literal(\n CORE_KEY_PARAM_PREFIX,\n analyzed_data_coll.schema.key_fields.iter().map(|f| &f.name),\n );\n let result = match spec.mapping {\n GraphElementMapping::Node(node_spec) => {\n let delete_cypher = formatdoc! {\"\n OPTIONAL MATCH (old_node:{label} {key_fields_literal})\n WITH old_node\n SET old_node.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\n WITH old_node\n WHERE NOT (old_node)--()\n DELETE old_node\n FINISH\n \",\n label = node_spec.label,\n };\n\n let insert_cypher = formatdoc! {\"\n MERGE (new_node:{label} {key_fields_literal})\n SET new_node.{SELF_CONTAINED_TAG_FIELD_NAME} = TRUE{optional_set_props}\n FINISH\n \",\n label = node_spec.label,\n optional_set_props = if !analyzed_data_coll.value_fields_input_idx.is_empty() {\n format!(\", new_node += ${CORE_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n };\n\n Self {\n connection_ref: spec.connection,\n graph,\n create_order: 0,\n delete_cypher,\n insert_cypher,\n delete_before_upsert: false,\n analyzed_data_coll,\n key_field_params,\n src_key_field_params: vec![],\n tgt_key_field_params: vec![],\n }\n }\n GraphElementMapping::Relationship(rel_spec) => {\n let delete_cypher = formatdoc! {\"\n OPTIONAL MATCH (old_src)-[old_rel:{rel_type} {key_fields_literal}]->(old_tgt)\n\n DELETE old_rel\n\n WITH collect(old_src) + collect(old_tgt) AS nodes_to_check\n UNWIND nodes_to_check AS node\n WITH DISTINCT node\n WHERE NOT COALESCE(node.{SELF_CONTAINED_TAG_FIELD_NAME}, FALSE)\n AND COUNT{{ (node)--() }} = 0\n DELETE node\n\n FINISH\n \",\n rel_type = rel_spec.rel_type,\n };\n\n let analyzed_rel = analyzed_data_coll\n .rel\n .as_ref()\n .ok_or_else(invariance_violation)?;\n let analyzed_src = &analyzed_rel.source;\n let analyzed_tgt = &analyzed_rel.target;\n\n let (src_key_field_params, src_key_fields_literal) =\n Self::build_key_field_params_n_literal(\n SRC_KEY_PARAM_PREFIX,\n analyzed_src.schema.key_fields.iter().map(|f| &f.name),\n );\n let (tgt_key_field_params, tgt_key_fields_literal) =\n Self::build_key_field_params_n_literal(\n TGT_KEY_PARAM_PREFIX,\n analyzed_tgt.schema.key_fields.iter().map(|f| &f.name),\n );\n\n let insert_cypher = formatdoc! {\"\n MERGE (new_src:{src_node_label} {src_key_fields_literal})\n {optional_set_src_props}\n\n MERGE (new_tgt:{tgt_node_label} {tgt_key_fields_literal})\n {optional_set_tgt_props}\n\n MERGE (new_src)-[new_rel:{rel_type} {key_fields_literal}]->(new_tgt)\n {optional_set_rel_props}\n\n FINISH\n \",\n src_node_label = rel_spec.source.label,\n optional_set_src_props = if analyzed_src.has_value_fields() {\n format!(\"SET new_src += ${SRC_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n tgt_node_label = rel_spec.target.label,\n optional_set_tgt_props = if analyzed_tgt.has_value_fields() {\n format!(\"SET new_tgt += ${TGT_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n rel_type = rel_spec.rel_type,\n optional_set_rel_props = if !analyzed_data_coll.value_fields_input_idx.is_empty() {\n format!(\"SET new_rel += ${CORE_PROPS_PARAM}\\n\")\n } else {\n \"\".to_string()\n },\n };\n Self {\n connection_ref: spec.connection,\n graph,\n create_order: 1,\n delete_cypher,\n insert_cypher,\n delete_before_upsert: true,\n analyzed_data_coll,\n key_field_params,\n src_key_field_params,\n tgt_key_field_params,\n }\n }\n };\n Ok(result)\n }\n\n fn bind_key_field_params<'a>(\n query: neo4rs::Query,\n params: &[String],\n type_val: impl Iterator,\n ) -> Result {\n let mut query = query;\n for (i, (typ, val)) in type_val.enumerate() {\n query = query.param(¶ms[i], value_to_bolt(val, typ)?);\n }\n Ok(query)\n }\n\n fn bind_rel_key_field_params(\n &self,\n query: neo4rs::Query,\n val: &KeyValue,\n ) -> Result {\n let mut query = query;\n for (i, val) in val\n .fields_iter(self.analyzed_data_coll.schema.key_fields.len())?\n .enumerate()\n {\n query = query.param(\n &self.key_field_params[i],\n key_to_bolt(\n val,\n &self.analyzed_data_coll.schema.key_fields[i].value_type.typ,\n )?,\n );\n }\n Ok(query)\n }\n\n fn add_upsert_queries(\n &self,\n upsert: &ExportTargetUpsertEntry,\n queries: &mut Vec,\n ) -> Result<()> {\n if self.delete_before_upsert {\n queries.push(\n self.bind_rel_key_field_params(neo4rs::query(&self.delete_cypher), &upsert.key)?,\n );\n }\n\n let value = &upsert.value;\n let mut query =\n self.bind_rel_key_field_params(neo4rs::query(&self.insert_cypher), &upsert.key)?;\n\n if let Some(analyzed_rel) = &self.analyzed_data_coll.rel {\n let bind_params = |query: neo4rs::Query,\n analyzed: &AnalyzedGraphElementFieldMapping,\n key_field_params: &[String]|\n -> Result {\n let mut query = Self::bind_key_field_params(\n query,\n key_field_params,\n std::iter::zip(\n analyzed.schema.key_fields.iter(),\n analyzed.fields_input_idx.key.iter(),\n )\n .map(|(f, field_idx)| (&f.value_type.typ, &value.fields[*field_idx])),\n )?;\n if analyzed.has_value_fields() {\n query = query.param(\n SRC_PROPS_PARAM,\n mapped_field_values_to_bolt(\n &analyzed.schema.value_fields,\n &analyzed.fields_input_idx.value,\n value,\n )?,\n );\n }\n Ok(query)\n };\n query = bind_params(query, &analyzed_rel.source, &self.src_key_field_params)?;\n query = bind_params(query, &analyzed_rel.target, &self.tgt_key_field_params)?;\n }\n\n if !self.analyzed_data_coll.value_fields_input_idx.is_empty() {\n query = query.param(\n CORE_PROPS_PARAM,\n mapped_field_values_to_bolt(\n &self.analyzed_data_coll.schema.value_fields,\n &self.analyzed_data_coll.value_fields_input_idx,\n value,\n )?,\n );\n }\n queries.push(query);\n Ok(())\n }\n\n fn add_delete_queries(\n &self,\n delete_key: &value::KeyValue,\n queries: &mut Vec,\n ) -> Result<()> {\n queries\n .push(self.bind_rel_key_field_params(neo4rs::query(&self.delete_cypher), delete_key)?);\n Ok(())\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct SetupState {\n key_field_names: Vec,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n dependent_node_labels: Vec,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n sub_components: Vec,\n}\n\nimpl SetupState {\n fn new(\n schema: &GraphElementSchema,\n index_options: &IndexOptions,\n dependent_node_labels: Vec,\n ) -> Result {\n let key_field_names: Vec =\n schema.key_fields.iter().map(|f| f.name.clone()).collect();\n let mut sub_components = vec![];\n sub_components.push(ComponentState {\n object_label: schema.elem_type.clone(),\n index_def: IndexDef::KeyConstraint {\n field_names: key_field_names.clone(),\n },\n });\n let value_field_types = schema\n .value_fields\n .iter()\n .map(|f| (f.name.as_str(), &f.value_type.typ))\n .collect::>();\n for index_def in index_options.vector_indexes.iter() {\n sub_components.push(ComponentState {\n object_label: schema.elem_type.clone(),\n index_def: IndexDef::from_vector_index_def(\n index_def,\n value_field_types\n .get(index_def.field_name.as_str())\n .ok_or_else(|| {\n api_error!(\n \"Unknown field name for vector index: {}\",\n index_def.field_name\n )\n })?,\n )?,\n });\n }\n Ok(Self {\n key_field_names,\n dependent_node_labels,\n sub_components,\n })\n }\n\n fn check_compatible(&self, existing: &Self) -> SetupStateCompatibility {\n if self.key_field_names == existing.key_field_names {\n SetupStateCompatibility::Compatible\n } else {\n SetupStateCompatibility::NotCompatible\n }\n }\n}\n\nimpl IntoIterator for SetupState {\n type Item = ComponentState;\n type IntoIter = std::vec::IntoIter;\n\n fn into_iter(self) -> Self::IntoIter {\n self.sub_components.into_iter()\n }\n}\n#[derive(Debug, Default)]\nstruct DataClearAction {\n dependent_node_labels: Vec,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\nenum ComponentKind {\n KeyConstraint,\n VectorIndex,\n}\n\nimpl ComponentKind {\n fn describe(&self) -> &str {\n match self {\n ComponentKind::KeyConstraint => \"KEY CONSTRAINT\",\n ComponentKind::VectorIndex => \"VECTOR INDEX\",\n }\n }\n}\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct ComponentKey {\n kind: ComponentKind,\n name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\nenum IndexDef {\n KeyConstraint {\n field_names: Vec,\n },\n VectorIndex {\n field_name: String,\n metric: spec::VectorSimilarityMetric,\n vector_size: usize,\n },\n}\n\nimpl IndexDef {\n fn from_vector_index_def(\n index_def: &spec::VectorIndexDef,\n field_typ: &schema::ValueType,\n ) -> Result {\n Ok(Self::VectorIndex {\n field_name: index_def.field_name.clone(),\n vector_size: (match field_typ {\n schema::ValueType::Basic(schema::BasicValueType::Vector(schema)) => {\n schema.dimension\n }\n _ => None,\n })\n .ok_or_else(|| {\n api_error!(\"Vector index field must be a vector with fixed dimension\")\n })?,\n metric: index_def.metric,\n })\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\npub struct ComponentState {\n object_label: ElementType,\n index_def: IndexDef,\n}\n\nimpl components::State for ComponentState {\n fn key(&self) -> ComponentKey {\n let prefix = match &self.object_label {\n ElementType::Relationship(_) => \"r\",\n ElementType::Node(_) => \"n\",\n };\n let label = self.object_label.label();\n match &self.index_def {\n IndexDef::KeyConstraint { .. } => ComponentKey {\n kind: ComponentKind::KeyConstraint,\n name: format!(\"{prefix}__{label}__key\"),\n },\n IndexDef::VectorIndex {\n field_name, metric, ..\n } => ComponentKey {\n kind: ComponentKind::VectorIndex,\n name: format!(\"{prefix}__{label}__{field_name}__{metric}__vidx\"),\n },\n }\n }\n}\n\npub struct SetupComponentOperator {\n graph_pool: Arc,\n conn_spec: ConnectionSpec,\n}\n\n#[async_trait]\nimpl components::SetupOperator for SetupComponentOperator {\n type Key = ComponentKey;\n type State = ComponentState;\n type SetupState = SetupState;\n type Context = ();\n\n fn describe_key(&self, key: &Self::Key) -> String {\n format!(\"{} {}\", key.kind.describe(), key.name)\n }\n\n fn describe_state(&self, state: &Self::State) -> String {\n let key_desc = self.describe_key(&state.key());\n let label = state.object_label.label();\n match &state.index_def {\n IndexDef::KeyConstraint { field_names } => {\n format!(\"{key_desc} ON {label} (key: {})\", field_names.join(\", \"))\n }\n IndexDef::VectorIndex {\n field_name,\n metric,\n vector_size,\n } => {\n format!(\n \"{key_desc} ON {label} (field_name: {field_name}, vector_size: {vector_size}, metric: {metric})\",\n )\n }\n }\n }\n\n fn is_up_to_date(&self, current: &ComponentState, desired: &ComponentState) -> bool {\n current == desired\n }\n\n async fn create(&self, state: &ComponentState, _context: &Self::Context) -> Result<()> {\n let graph = self.graph_pool.get_graph(&self.conn_spec).await?;\n let key = state.key();\n let qualifier = CORE_ELEMENT_MATCHER_VAR;\n let matcher = state.object_label.matcher(qualifier);\n let query = neo4rs::query(&match &state.index_def {\n IndexDef::KeyConstraint { field_names } => {\n let key_type = match &state.object_label {\n ElementType::Node(_) => \"NODE\",\n ElementType::Relationship(_) => \"RELATIONSHIP\",\n };\n format!(\n \"CREATE CONSTRAINT {name} IF NOT EXISTS FOR {matcher} REQUIRE {field_names} IS {key_type} KEY\",\n name = key.name,\n field_names = build_composite_field_names(qualifier, field_names),\n )\n }\n IndexDef::VectorIndex {\n field_name,\n metric,\n vector_size,\n } => {\n formatdoc! {\"\n CREATE VECTOR INDEX {name} IF NOT EXISTS\n FOR {matcher} ON {qualifier}.{field_name}\n OPTIONS {{\n indexConfig: {{\n `vector.dimensions`: {vector_size},\n `vector.similarity_function`: '{metric}'\n }}\n }}\",\n name = key.name,\n }\n }\n });\n Ok(graph.run(query).await?)\n }\n\n async fn delete(&self, key: &ComponentKey, _context: &Self::Context) -> Result<()> {\n let graph = self.graph_pool.get_graph(&self.conn_spec).await?;\n let query = neo4rs::query(&format!(\n \"DROP {kind} {name} IF EXISTS\",\n kind = match key.kind {\n ComponentKind::KeyConstraint => \"CONSTRAINT\",\n ComponentKind::VectorIndex => \"INDEX\",\n },\n name = key.name,\n ));\n Ok(graph.run(query).await?)\n }\n}\n\nfn build_composite_field_names(qualifier: &str, field_names: &[String]) -> String {\n let strs = field_names\n .iter()\n .map(|name| format!(\"{qualifier}.{name}\"))\n .join(\", \");\n if field_names.len() == 1 {\n strs\n } else {\n format!(\"({strs})\")\n }\n}\n#[derive(Debug)]\npub struct GraphElementDataSetupStatus {\n data_clear: Option,\n change_type: SetupChangeType,\n}\n\nimpl GraphElementDataSetupStatus {\n fn new(desired_state: Option<&SetupState>, existing: &CombinedState) -> Self {\n let mut data_clear: Option = None;\n for v in existing.possible_versions() {\n if desired_state.as_ref().is_none_or(|desired| {\n desired.check_compatible(v) == SetupStateCompatibility::NotCompatible\n }) {\n data_clear\n .get_or_insert_default()\n .dependent_node_labels\n .extend(v.dependent_node_labels.iter().cloned());\n }\n }\n\n let change_type = match (desired_state, existing.possible_versions().next()) {\n (Some(_), Some(_)) => {\n if data_clear.is_none() {\n SetupChangeType::NoChange\n } else {\n SetupChangeType::Update\n }\n }\n (Some(_), None) => SetupChangeType::Create,\n (None, Some(_)) => SetupChangeType::Delete,\n (None, None) => SetupChangeType::NoChange,\n };\n\n Self {\n data_clear,\n change_type,\n }\n }\n}\n\nimpl ResourceSetupStatus for GraphElementDataSetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n if let Some(data_clear) = &self.data_clear {\n let mut desc = \"Clear data\".to_string();\n if !data_clear.dependent_node_labels.is_empty() {\n write!(\n &mut desc,\n \"; dependents {}\",\n data_clear\n .dependent_node_labels\n .iter()\n .map(|l| format!(\"{}\", ElementType::Node(l.clone())))\n .join(\", \")\n )\n .unwrap();\n }\n result.push(setup::ChangeDescription::Action(desc));\n }\n result\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.change_type\n }\n}\n\nasync fn clear_graph_element_data(\n graph: &Graph,\n key: &Neo4jGraphElement,\n is_self_contained: bool,\n) -> Result<()> {\n let var_name = CORE_ELEMENT_MATCHER_VAR;\n let matcher = key.typ.matcher(var_name);\n let query_string = match key.typ {\n ElementType::Node(_) => {\n let optional_reset_self_contained = if is_self_contained {\n formatdoc! {\"\n WITH {var_name}\n SET {var_name}.{SELF_CONTAINED_TAG_FIELD_NAME} = NULL\n \"}\n } else {\n \"\".to_string()\n };\n formatdoc! {\"\n CALL {{\n MATCH {matcher}\n {optional_reset_self_contained}\n WITH {var_name} WHERE NOT ({var_name})--() DELETE {var_name}\n }} IN TRANSACTIONS\n \"}\n }\n ElementType::Relationship(_) => {\n formatdoc! {\"\n CALL {{\n MATCH {matcher} WITH {var_name} DELETE {var_name}\n }} IN TRANSACTIONS\n \"}\n }\n };\n let delete_query = neo4rs::query(&query_string);\n graph.run(delete_query).await?;\n Ok(())\n}\n\n/// Factory for Neo4j relationships\npub struct Factory {\n graph_pool: Arc,\n}\n\nimpl Factory {\n pub fn new() -> Self {\n Self {\n graph_pool: Arc::default(),\n }\n }\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = Declaration;\n type SetupState = SetupState;\n type SetupStatus = (\n GraphElementDataSetupStatus,\n components::SetupStatus,\n );\n type Key = Neo4jGraphElement;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Neo4j\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(Neo4jGraphElement, SetupState)>,\n )> {\n let (analyzed_data_colls, declared_graph_elements) = analyze_graph_mappings(\n data_collections\n .iter()\n .map(|d| DataCollectionGraphMappingInput {\n auth_ref: &d.spec.connection,\n mapping: &d.spec.mapping,\n index_options: &d.index_options,\n key_fields_schema: d.key_fields_schema.clone(),\n value_fields_schema: d.value_fields_schema.clone(),\n }),\n declarations.iter().map(|d| (&d.connection, &d.decl)),\n )?;\n let data_coll_output = std::iter::zip(data_collections, analyzed_data_colls)\n .map(|(data_coll, analyzed)| {\n let setup_key = Neo4jGraphElement {\n connection: data_coll.spec.connection.clone(),\n typ: analyzed.schema.elem_type.clone(),\n };\n let desired_setup_state = SetupState::new(\n &analyzed.schema,\n &data_coll.index_options,\n analyzed\n .dependent_node_labels()\n .into_iter()\n .map(|s| s.to_string())\n .collect(),\n )?;\n\n let conn_spec = context\n .auth_registry\n .get::(&data_coll.spec.connection)?;\n let factory = self.clone();\n let export_context = async move {\n Ok(Arc::new(ExportContext::new(\n factory.graph_pool.get_graph(&conn_spec).await?,\n data_coll.spec,\n analyzed,\n )?))\n }\n .boxed();\n\n Ok(TypedExportDataCollectionBuildOutput {\n export_context,\n setup_key,\n desired_setup_state,\n })\n })\n .collect::>>()?;\n let decl_output = std::iter::zip(declarations, declared_graph_elements)\n .map(|(decl, graph_elem_schema)| {\n let setup_state =\n SetupState::new(&graph_elem_schema, &decl.decl.index_options, vec![])?;\n let setup_key = GraphElementType {\n connection: decl.connection,\n typ: graph_elem_schema.elem_type.clone(),\n };\n Ok((setup_key, setup_state))\n })\n .collect::>>()?;\n Ok((data_coll_output, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n key: Neo4jGraphElement,\n desired: Option,\n existing: CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result {\n let conn_spec = flow_instance_ctx\n .auth_registry\n .get::(&key.connection)?;\n let data_status = GraphElementDataSetupStatus::new(desired.as_ref(), &existing);\n let components = components::SetupStatus::create(\n SetupComponentOperator {\n graph_pool: self.graph_pool.clone(),\n conn_spec,\n },\n desired,\n existing,\n )?;\n Ok((data_status, components))\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(desired.check_compatible(existing))\n }\n\n fn describe_resource(&self, key: &Neo4jGraphElement) -> Result {\n Ok(format!(\"Neo4j {}\", key.typ))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mut muts_by_graph = HashMap::new();\n for mut_with_ctx in mutations.iter() {\n muts_by_graph\n .entry(&mut_with_ctx.export_context.connection_ref)\n .or_insert_with(Vec::new)\n .push(mut_with_ctx);\n }\n let retry_options = retryable::RetryOptions::default();\n for muts in muts_by_graph.values_mut() {\n muts.sort_by_key(|m| m.export_context.create_order);\n let graph = &muts[0].export_context.graph;\n retryable::run(\n async || {\n let mut queries = vec![];\n for mut_with_ctx in muts.iter() {\n let export_ctx = &mut_with_ctx.export_context;\n for upsert in mut_with_ctx.mutation.upserts.iter() {\n export_ctx.add_upsert_queries(upsert, &mut queries)?;\n }\n }\n for mut_with_ctx in muts.iter().rev() {\n let export_ctx = &mut_with_ctx.export_context;\n for deletion in mut_with_ctx.mutation.deletes.iter() {\n export_ctx.add_delete_queries(&deletion.key, &mut queries)?;\n }\n }\n let mut txn = graph.start_txn().await?;\n txn.run_queries(queries).await?;\n txn.commit().await?;\n retryable::Ok(())\n },\n &retry_options,\n )\n .await\n .map_err(Into::::into)?\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n changes: Vec>,\n context: Arc,\n ) -> Result<()> {\n // Relationships first, then nodes, as relationships need to be deleted before nodes they referenced.\n let mut relationship_types = IndexSet::<&Neo4jGraphElement>::new();\n let mut node_labels = IndexSet::<&Neo4jGraphElement>::new();\n let mut dependent_node_labels = IndexSet::::new();\n\n let mut components = vec![];\n for change in changes.iter() {\n if let Some(data_clear) = &change.setup_status.0.data_clear {\n match &change.key.typ {\n ElementType::Relationship(_) => {\n relationship_types.insert(&change.key);\n for label in &data_clear.dependent_node_labels {\n dependent_node_labels.insert(Neo4jGraphElement {\n connection: change.key.connection.clone(),\n typ: ElementType::Node(label.clone()),\n });\n }\n }\n ElementType::Node(_) => {\n node_labels.insert(&change.key);\n }\n }\n }\n components.push(&change.setup_status.1);\n }\n\n // Relationships have no dependency, so can be cleared first.\n for rel_type in relationship_types.into_iter() {\n let graph = self\n .graph_pool\n .get_graph_for_key(rel_type, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, rel_type, true).await?;\n }\n // Clear standalone nodes, which is simpler than dependent nodes.\n for node_label in node_labels.iter() {\n let graph = self\n .graph_pool\n .get_graph_for_key(node_label, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, node_label, true).await?;\n }\n // Clear dependent nodes if they're not covered by standalone nodes.\n for node_label in dependent_node_labels.iter() {\n if !node_labels.contains(node_label) {\n let graph = self\n .graph_pool\n .get_graph_for_key(node_label, &context.auth_registry)\n .await?;\n clear_graph_element_data(&graph, node_label, false).await?;\n }\n }\n\n apply_component_changes(components, &()).await?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/setup/driver.rs", "use crate::{\n lib_context::{FlowContext, FlowExecutionContext, LibSetupContext},\n ops::{\n get_optional_executor_factory,\n interface::{ExportTargetFactory, FlowInstanceContext},\n },\n prelude::*,\n};\n\nuse sqlx::PgPool;\nuse std::{\n fmt::{Debug, Display},\n str::FromStr,\n};\n\nuse super::{AllSetupStates, GlobalSetupStatus};\nuse super::{\n CombinedState, DesiredMode, ExistingMode, FlowSetupState, FlowSetupStatus, ObjectSetupStatus,\n ObjectStatus, ResourceIdentifier, ResourceSetupInfo, ResourceSetupStatus, SetupChangeType,\n StateChange, TargetSetupState, db_metadata,\n};\nuse crate::execution::db_tracking_setup;\nuse crate::ops::interface::ExecutorFactory;\nuse std::fmt::Write;\n\nenum MetadataRecordType {\n FlowVersion,\n FlowMetadata,\n TrackingTable,\n Target(String),\n}\n\nimpl Display for MetadataRecordType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n MetadataRecordType::FlowVersion => f.write_str(db_metadata::FLOW_VERSION_RESOURCE_TYPE),\n MetadataRecordType::FlowMetadata => write!(f, \"FlowMetadata\"),\n MetadataRecordType::TrackingTable => write!(f, \"TrackingTable\"),\n MetadataRecordType::Target(target_id) => write!(f, \"Target:{target_id}\"),\n }\n }\n}\n\nimpl std::str::FromStr for MetadataRecordType {\n type Err = anyhow::Error;\n\n fn from_str(s: &str) -> Result {\n if s == db_metadata::FLOW_VERSION_RESOURCE_TYPE {\n Ok(Self::FlowVersion)\n } else if s == \"FlowMetadata\" {\n Ok(Self::FlowMetadata)\n } else if s == \"TrackingTable\" {\n Ok(Self::TrackingTable)\n } else if let Some(target_id) = s.strip_prefix(\"Target:\") {\n Ok(Self::Target(target_id.to_string()))\n } else {\n anyhow::bail!(\"Invalid MetadataRecordType string: {}\", s)\n }\n }\n}\n\nfn from_metadata_record(\n state: Option,\n staging_changes: sqlx::types::Json>>,\n legacy_state_key: Option,\n) -> Result> {\n let current: Option = state.map(serde_json::from_value).transpose()?;\n let staging: Vec> = (staging_changes.0.into_iter())\n .map(|sc| -> Result<_> {\n Ok(match sc {\n StateChange::Upsert(v) => StateChange::Upsert(serde_json::from_value(v)?),\n StateChange::Delete => StateChange::Delete,\n })\n })\n .collect::>()?;\n Ok(CombinedState {\n current,\n staging,\n legacy_state_key,\n })\n}\n\nfn get_export_target_factory(\n target_type: &str,\n) -> Option> {\n match get_optional_executor_factory(target_type) {\n Some(ExecutorFactory::ExportTarget(factory)) => Some(factory),\n _ => None,\n }\n}\n\npub async fn get_existing_setup_state(pool: &PgPool) -> Result> {\n let setup_metadata_records = db_metadata::read_setup_metadata(pool).await?;\n\n let setup_metadata_records = if let Some(records) = setup_metadata_records {\n records\n } else {\n return Ok(AllSetupStates::default());\n };\n\n // Group setup metadata records by flow name\n let setup_metadata_records = setup_metadata_records.into_iter().fold(\n BTreeMap::>::new(),\n |mut acc, record| {\n acc.entry(record.flow_name.clone())\n .or_default()\n .push(record);\n acc\n },\n );\n\n let flows = setup_metadata_records\n .into_iter()\n .map(|(flow_name, metadata_records)| -> anyhow::Result<_> {\n let mut flow_ss = FlowSetupState::default();\n for metadata_record in metadata_records {\n let state = metadata_record.state;\n let staging_changes = metadata_record.staging_changes;\n match MetadataRecordType::from_str(&metadata_record.resource_type)? {\n MetadataRecordType::FlowVersion => {\n flow_ss.seen_flow_metadata_version =\n db_metadata::parse_flow_version(&state);\n }\n MetadataRecordType::FlowMetadata => {\n flow_ss.metadata = from_metadata_record(state, staging_changes, None)?;\n }\n MetadataRecordType::TrackingTable => {\n flow_ss.tracking_table =\n from_metadata_record(state, staging_changes, None)?;\n }\n MetadataRecordType::Target(target_type) => {\n let normalized_key = {\n if let Some(factory) = get_export_target_factory(&target_type) {\n factory.normalize_setup_key(&metadata_record.key)?\n } else {\n metadata_record.key.clone()\n }\n };\n let combined_state = from_metadata_record(\n state,\n staging_changes,\n (normalized_key != metadata_record.key).then_some(metadata_record.key),\n )?;\n flow_ss.targets.insert(\n super::ResourceIdentifier {\n key: normalized_key,\n target_kind: target_type,\n },\n combined_state,\n );\n }\n }\n }\n Ok((flow_name, flow_ss))\n })\n .collect::>()?;\n\n Ok(AllSetupStates {\n has_metadata_table: true,\n flows,\n })\n}\n\nfn diff_state(\n existing_state: Option<&E>,\n desired_state: Option<&D>,\n diff: impl Fn(Option<&E>, &D) -> Option>,\n) -> Option>\nwhere\n E: PartialEq,\n{\n match (existing_state, desired_state) {\n (None, None) => None,\n (Some(_), None) => Some(StateChange::Delete),\n (existing_state, Some(desired_state)) => {\n if existing_state.map(|e| e == desired_state).unwrap_or(false) {\n None\n } else {\n diff(existing_state, desired_state)\n }\n }\n }\n}\n\nfn to_object_status(existing: Option, desired: Option) -> Option {\n Some(match (&existing, &desired) {\n (Some(_), None) => ObjectStatus::Deleted,\n (None, Some(_)) => ObjectStatus::New,\n (Some(_), Some(_)) => ObjectStatus::Existing,\n (None, None) => return None,\n })\n}\n\n#[derive(Debug, Default)]\nstruct GroupedResourceStates {\n desired: Option,\n existing: CombinedState,\n}\n\nfn group_resource_states<'a>(\n desired: impl Iterator,\n existing: impl Iterator)>,\n) -> Result> {\n let mut grouped: IndexMap<&'a ResourceIdentifier, GroupedResourceStates> = desired\n .into_iter()\n .map(|(key, state)| {\n (\n key,\n GroupedResourceStates {\n desired: Some(state.clone()),\n existing: CombinedState::default(),\n },\n )\n })\n .collect();\n for (key, state) in existing {\n let entry = grouped.entry(key);\n if state.current.is_some() {\n if let indexmap::map::Entry::Occupied(entry) = &entry {\n if entry.get().existing.current.is_some() {\n bail!(\"Duplicate existing state for key: {}\", entry.key());\n }\n }\n }\n let entry = entry.or_default();\n if let Some(current) = &state.current {\n entry.existing.current = Some(current.clone());\n }\n if let Some(legacy_state_key) = &state.legacy_state_key {\n if entry\n .existing\n .legacy_state_key\n .as_ref()\n .is_some_and(|v| v != legacy_state_key)\n {\n warn!(\n \"inconsistent legacy key: {:?}, {:?}\",\n key, entry.existing.legacy_state_key\n );\n }\n entry.existing.legacy_state_key = Some(legacy_state_key.clone());\n }\n for s in state.staging.iter() {\n match s {\n StateChange::Upsert(v) => {\n entry.existing.staging.push(StateChange::Upsert(v.clone()))\n }\n StateChange::Delete => entry.existing.staging.push(StateChange::Delete),\n }\n }\n }\n Ok(grouped)\n}\n\npub async fn check_flow_setup_status(\n desired_state: Option<&FlowSetupState>,\n existing_state: Option<&FlowSetupState>,\n flow_instance_ctx: &Arc,\n) -> Result {\n let metadata_change = diff_state(\n existing_state.map(|e| &e.metadata),\n desired_state.map(|d| &d.metadata),\n |_, desired_state| Some(StateChange::Upsert(desired_state.clone())),\n );\n\n let new_source_ids = desired_state\n .iter()\n .flat_map(|d| d.metadata.sources.values().map(|v| v.source_id))\n .collect::>();\n let tracking_table_change = db_tracking_setup::TrackingTableSetupStatus::new(\n desired_state.map(|d| &d.tracking_table),\n &existing_state\n .map(|e| Cow::Borrowed(&e.tracking_table))\n .unwrap_or_default(),\n (existing_state.iter())\n .flat_map(|state| state.metadata.possible_versions())\n .flat_map(|metadata| {\n metadata\n .sources\n .values()\n .map(|v| v.source_id)\n .filter(|id| !new_source_ids.contains(id))\n })\n .collect::>()\n .into_iter()\n .collect(),\n );\n\n let mut target_resources = Vec::new();\n let mut unknown_resources = Vec::new();\n\n let grouped_target_resources = group_resource_states(\n desired_state.iter().flat_map(|d| d.targets.iter()),\n existing_state.iter().flat_map(|e| e.targets.iter()),\n )?;\n for (resource_id, v) in grouped_target_resources.into_iter() {\n let factory = match get_export_target_factory(&resource_id.target_kind) {\n Some(factory) => factory,\n None => {\n unknown_resources.push(resource_id.clone());\n continue;\n }\n };\n let state = v.desired.clone();\n let target_state = v\n .desired\n .and_then(|state| (!state.common.setup_by_user).then_some(state.state));\n let existing_without_setup_by_user = CombinedState {\n current: v\n .existing\n .current\n .and_then(|s| s.state_unless_setup_by_user()),\n staging: v\n .existing\n .staging\n .into_iter()\n .filter_map(|s| match s {\n StateChange::Upsert(s) => {\n s.state_unless_setup_by_user().map(StateChange::Upsert)\n }\n StateChange::Delete => Some(StateChange::Delete),\n })\n .collect(),\n legacy_state_key: v.existing.legacy_state_key.clone(),\n };\n let never_setup_by_sys = target_state.is_none()\n && existing_without_setup_by_user.current.is_none()\n && existing_without_setup_by_user.staging.is_empty();\n let setup_status = if never_setup_by_sys {\n None\n } else {\n Some(\n factory\n .check_setup_status(\n &resource_id.key,\n target_state,\n existing_without_setup_by_user,\n flow_instance_ctx.clone(),\n )\n .await?,\n )\n };\n target_resources.push(ResourceSetupInfo {\n key: resource_id.clone(),\n state,\n description: factory.describe_resource(&resource_id.key)?,\n setup_status,\n legacy_key: v\n .existing\n .legacy_state_key\n .map(|legacy_state_key| ResourceIdentifier {\n target_kind: resource_id.target_kind.clone(),\n key: legacy_state_key,\n }),\n });\n }\n Ok(FlowSetupStatus {\n status: to_object_status(existing_state, desired_state),\n seen_flow_metadata_version: existing_state.and_then(|s| s.seen_flow_metadata_version),\n metadata_change,\n tracking_table: tracking_table_change.map(|c| c.into_setup_info()),\n target_resources,\n unknown_resources,\n })\n}\n\nstruct ResourceSetupChangeItem<'a, K: 'a, C: ResourceSetupStatus> {\n key: &'a K,\n setup_status: &'a C,\n}\n\nasync fn maybe_update_resource_setup<\n 'a,\n K: 'a,\n S: 'a,\n C: ResourceSetupStatus,\n ChangeApplierResultFut: Future>,\n>(\n resource_kind: &str,\n write: &mut (dyn std::io::Write + Send),\n resources: impl Iterator>,\n apply_change: impl FnOnce(Vec>) -> ChangeApplierResultFut,\n) -> Result<()> {\n let mut changes = Vec::new();\n for resource in resources {\n if let Some(setup_status) = &resource.setup_status {\n if setup_status.change_type() != SetupChangeType::NoChange {\n changes.push(ResourceSetupChangeItem {\n key: &resource.key,\n setup_status,\n });\n writeln!(write, \"{}:\", resource.description)?;\n for change in setup_status.describe_changes() {\n match change {\n setup::ChangeDescription::Action(action) => {\n writeln!(write, \" - {action}\")?;\n }\n setup::ChangeDescription::Note(_) => {}\n }\n }\n }\n }\n }\n if !changes.is_empty() {\n write!(write, \"Pushing change for {resource_kind}...\")?;\n apply_change(changes).await?;\n writeln!(write, \"DONE\")?;\n }\n Ok(())\n}\n\nasync fn apply_changes_for_flow(\n write: &mut (dyn std::io::Write + Send),\n flow_ctx: &FlowContext,\n flow_status: &FlowSetupStatus,\n existing_setup_state: &mut Option>,\n pool: &PgPool,\n) -> Result<()> {\n let Some(status) = flow_status.status else {\n return Ok(());\n };\n let verb = match status {\n ObjectStatus::New => \"Creating\",\n ObjectStatus::Deleted => \"Deleting\",\n ObjectStatus::Existing => \"Updating resources for \",\n _ => bail!(\"invalid flow status\"),\n };\n write!(write, \"\\n{verb} flow {}:\\n\", flow_ctx.flow_name())?;\n\n let mut update_info =\n HashMap::::new();\n\n if let Some(metadata_change) = &flow_status.metadata_change {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::FlowMetadata.to_string(),\n serde_json::Value::Null,\n ),\n db_metadata::StateUpdateInfo::new(metadata_change.desired_state(), None)?,\n );\n }\n if let Some(tracking_table) = &flow_status.tracking_table {\n if tracking_table\n .setup_status\n .as_ref()\n .map(|c| c.change_type() != SetupChangeType::NoChange)\n .unwrap_or_default()\n {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::TrackingTable.to_string(),\n serde_json::Value::Null,\n ),\n db_metadata::StateUpdateInfo::new(tracking_table.state.as_ref(), None)?,\n );\n }\n }\n\n for target_resource in &flow_status.target_resources {\n update_info.insert(\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::Target(target_resource.key.target_kind.clone()).to_string(),\n target_resource.key.key.clone(),\n ),\n db_metadata::StateUpdateInfo::new(\n target_resource.state.as_ref(),\n target_resource.legacy_key.as_ref().map(|k| {\n db_metadata::ResourceTypeKey::new(\n MetadataRecordType::Target(k.target_kind.clone()).to_string(),\n k.key.clone(),\n )\n }),\n )?,\n );\n }\n\n let new_version_id = db_metadata::stage_changes_for_flow(\n flow_ctx.flow_name(),\n flow_status.seen_flow_metadata_version,\n &update_info,\n pool,\n )\n .await?;\n\n if let Some(tracking_table) = &flow_status.tracking_table {\n maybe_update_resource_setup(\n \"tracking table\",\n write,\n std::iter::once(tracking_table),\n |setup_status| setup_status[0].setup_status.apply_change(),\n )\n .await?;\n }\n\n let mut setup_status_by_target_kind = IndexMap::<&str, Vec<_>>::new();\n for target_resource in &flow_status.target_resources {\n setup_status_by_target_kind\n .entry(target_resource.key.target_kind.as_str())\n .or_default()\n .push(target_resource);\n }\n for (target_kind, resources) in setup_status_by_target_kind.into_iter() {\n maybe_update_resource_setup(\n target_kind,\n write,\n resources.into_iter(),\n |setup_status| async move {\n let factory = get_export_target_factory(target_kind).ok_or_else(|| {\n anyhow::anyhow!(\"No factory found for target kind: {}\", target_kind)\n })?;\n factory\n .apply_setup_changes(\n setup_status\n .into_iter()\n .map(|s| interface::ResourceSetupChangeItem {\n key: &s.key.key,\n setup_status: s.setup_status.as_ref(),\n })\n .collect(),\n flow_ctx.flow.flow_instance_ctx.clone(),\n )\n .await?;\n Ok(())\n },\n )\n .await?;\n }\n\n let is_deletion = status == ObjectStatus::Deleted;\n db_metadata::commit_changes_for_flow(\n flow_ctx.flow_name(),\n new_version_id,\n &update_info,\n is_deletion,\n pool,\n )\n .await?;\n if is_deletion {\n *existing_setup_state = None;\n } else {\n let (existing_metadata, existing_tracking_table, existing_targets) =\n match std::mem::take(existing_setup_state) {\n Some(s) => (Some(s.metadata), Some(s.tracking_table), s.targets),\n None => Default::default(),\n };\n let metadata = CombinedState::from_change(\n existing_metadata,\n flow_status\n .metadata_change\n .as_ref()\n .map(|v| v.desired_state()),\n );\n let tracking_table = CombinedState::from_change(\n existing_tracking_table,\n flow_status.tracking_table.as_ref().map(|c| {\n c.setup_status\n .as_ref()\n .and_then(|c| c.desired_state.as_ref())\n }),\n );\n let mut targets = existing_targets;\n for target_resource in &flow_status.target_resources {\n match &target_resource.state {\n Some(state) => {\n targets.insert(\n target_resource.key.clone(),\n CombinedState::from_desired(state.clone()),\n );\n }\n None => {\n targets.shift_remove(&target_resource.key);\n }\n }\n }\n *existing_setup_state = Some(setup::FlowSetupState {\n metadata,\n tracking_table,\n seen_flow_metadata_version: Some(new_version_id),\n targets,\n });\n }\n\n writeln!(write, \"Done for flow {}\", flow_ctx.flow_name())?;\n Ok(())\n}\n\nasync fn apply_global_changes(\n write: &mut (dyn std::io::Write + Send),\n setup_status: &GlobalSetupStatus,\n all_setup_states: &mut AllSetupStates,\n) -> Result<()> {\n maybe_update_resource_setup(\n \"metadata table\",\n write,\n std::iter::once(&setup_status.metadata_table),\n |setup_status| setup_status[0].setup_status.apply_change(),\n )\n .await?;\n\n if setup_status\n .metadata_table\n .setup_status\n .as_ref()\n .is_some_and(|c| c.change_type() == SetupChangeType::Create)\n {\n all_setup_states.has_metadata_table = true;\n }\n\n Ok(())\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum FlowSetupChangeAction {\n Setup,\n Drop,\n}\npub struct SetupChangeBundle {\n pub action: FlowSetupChangeAction,\n pub flow_names: Vec,\n}\n\nimpl SetupChangeBundle {\n async fn get_flow_setup_status<'a>(\n setup_ctx: &LibSetupContext,\n flow_ctx: &'a FlowContext,\n flow_exec_ctx: &'a FlowExecutionContext,\n action: &FlowSetupChangeAction,\n buffer: &'a mut Option,\n ) -> Result<&'a FlowSetupStatus> {\n let result = match action {\n FlowSetupChangeAction::Setup => &flow_exec_ctx.setup_status,\n FlowSetupChangeAction::Drop => {\n let existing_state = setup_ctx.all_setup_states.flows.get(flow_ctx.flow_name());\n buffer.insert(\n check_flow_setup_status(None, existing_state, &flow_ctx.flow.flow_instance_ctx)\n .await?,\n )\n }\n };\n Ok(result)\n }\n\n pub async fn describe(&self, lib_context: &LibContext) -> Result<(String, bool)> {\n let mut text = String::new();\n let mut is_up_to_date = true;\n\n let setup_ctx = lib_context\n .require_persistence_ctx()?\n .setup_ctx\n .read()\n .await;\n let setup_ctx = &*setup_ctx;\n\n if self.action == FlowSetupChangeAction::Setup {\n is_up_to_date = is_up_to_date && setup_ctx.global_setup_status.is_up_to_date();\n write!(&mut text, \"{}\", setup_ctx.global_setup_status)?;\n }\n\n for flow_name in &self.flow_names {\n let flow_ctx = {\n let flows = lib_context.flows.lock().unwrap();\n flows\n .get(flow_name)\n .ok_or_else(|| anyhow::anyhow!(\"Flow instance not found: {flow_name}\"))?\n .clone()\n };\n let flow_exec_ctx = flow_ctx.get_execution_ctx_for_setup().read().await;\n\n let mut setup_status_buffer = None;\n let setup_status = Self::get_flow_setup_status(\n setup_ctx,\n &flow_ctx,\n &flow_exec_ctx,\n &self.action,\n &mut setup_status_buffer,\n )\n .await?;\n\n is_up_to_date = is_up_to_date && setup_status.is_up_to_date();\n write!(\n &mut text,\n \"{}\",\n setup::FormattedFlowSetupStatus(flow_name, setup_status)\n )?;\n }\n Ok((text, is_up_to_date))\n }\n\n pub async fn apply(\n &self,\n lib_context: &LibContext,\n write: &mut (dyn std::io::Write + Send),\n ) -> Result<()> {\n let persistence_ctx = lib_context.require_persistence_ctx()?;\n let mut setup_ctx = persistence_ctx.setup_ctx.write().await;\n let setup_ctx = &mut *setup_ctx;\n\n if self.action == FlowSetupChangeAction::Setup\n && !setup_ctx.global_setup_status.is_up_to_date()\n {\n apply_global_changes(\n write,\n &setup_ctx.global_setup_status,\n &mut setup_ctx.all_setup_states,\n )\n .await?;\n setup_ctx.global_setup_status =\n GlobalSetupStatus::from_setup_states(&setup_ctx.all_setup_states);\n }\n\n for flow_name in &self.flow_names {\n let flow_ctx = {\n let flows = lib_context.flows.lock().unwrap();\n flows\n .get(flow_name)\n .ok_or_else(|| anyhow::anyhow!(\"Flow instance not found: {flow_name}\"))?\n .clone()\n };\n let mut flow_exec_ctx = flow_ctx.get_execution_ctx_for_setup().write().await;\n\n let mut setup_status_buffer = None;\n let setup_status = Self::get_flow_setup_status(\n setup_ctx,\n &flow_ctx,\n &flow_exec_ctx,\n &self.action,\n &mut setup_status_buffer,\n )\n .await?;\n if setup_status.is_up_to_date() {\n continue;\n }\n\n let mut flow_states = setup_ctx.all_setup_states.flows.remove(flow_name);\n apply_changes_for_flow(\n write,\n &flow_ctx,\n setup_status,\n &mut flow_states,\n &persistence_ctx.builtin_db_pool,\n )\n .await?;\n\n flow_exec_ctx\n .update_setup_state(&flow_ctx.flow, flow_states.as_ref())\n .await?;\n if let Some(flow_states) = flow_states {\n setup_ctx\n .all_setup_states\n .flows\n .insert(flow_name.to_string(), flow_states);\n }\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/targets/qdrant.rs", "use crate::ops::sdk::*;\nuse crate::prelude::*;\n\nuse std::fmt::Display;\n\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::setup;\nuse qdrant_client::Qdrant;\nuse qdrant_client::qdrant::{\n CreateCollectionBuilder, DeletePointsBuilder, DenseVector, Distance, MultiDenseVector,\n MultiVectorComparator, MultiVectorConfigBuilder, NamedVectors, PointId, PointStruct,\n PointsIdsList, UpsertPointsBuilder, Value as QdrantValue, Vector as QdrantVector,\n VectorParamsBuilder, VectorsConfigBuilder,\n};\n\nconst DEFAULT_VECTOR_SIMILARITY_METRIC: spec::VectorSimilarityMetric =\n spec::VectorSimilarityMetric::CosineSimilarity;\nconst DEFAULT_URL: &str = \"http://localhost:6334/\";\n\n////////////////////////////////////////////////////////////\n// Public Types\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Deserialize, Clone)]\npub struct ConnectionSpec {\n grpc_url: String,\n api_key: Option,\n}\n\n#[derive(Debug, Deserialize, Clone)]\nstruct Spec {\n connection: Option>,\n collection_name: String,\n}\n\n////////////////////////////////////////////////////////////\n// Common\n////////////////////////////////////////////////////////////\n\nstruct FieldInfo {\n field_schema: schema::FieldSchema,\n vector_shape: Option,\n}\n\nenum VectorShape {\n Vector(usize),\n MultiVector(usize),\n}\n\nimpl VectorShape {\n fn vector_size(&self) -> usize {\n match self {\n VectorShape::Vector(size) => *size,\n VectorShape::MultiVector(size) => *size,\n }\n }\n\n fn multi_vector_comparator(&self) -> Option {\n match self {\n VectorShape::MultiVector(_) => Some(MultiVectorComparator::MaxSim),\n _ => None,\n }\n }\n}\n\nfn parse_vector_schema_shape(vector_schema: &schema::VectorTypeSchema) -> Option {\n match &*vector_schema.element_type {\n schema::BasicValueType::Float32\n | schema::BasicValueType::Float64\n | schema::BasicValueType::Int64 => vector_schema.dimension.map(VectorShape::Vector),\n\n schema::BasicValueType::Vector(nested_vector_schema) => {\n match parse_vector_schema_shape(nested_vector_schema) {\n Some(VectorShape::Vector(dim)) => Some(VectorShape::MultiVector(dim)),\n _ => None,\n }\n }\n _ => None,\n }\n}\n\nfn parse_vector_shape(typ: &schema::ValueType) -> Option {\n match typ {\n schema::ValueType::Basic(schema::BasicValueType::Vector(vector_schema)) => {\n parse_vector_schema_shape(vector_schema)\n }\n _ => None,\n }\n}\n\nfn encode_dense_vector(v: &BasicValue) -> Result {\n let vec = match v {\n BasicValue::Vector(v) => v\n .iter()\n .map(|elem| {\n Ok(match elem {\n BasicValue::Float32(f) => *f,\n BasicValue::Float64(f) => *f as f32,\n BasicValue::Int64(i) => *i as f32,\n _ => bail!(\"Unsupported vector type: {:?}\", elem.kind()),\n })\n })\n .collect::>>()?,\n _ => bail!(\"Expected a vector field, got {:?}\", v),\n };\n Ok(vec.into())\n}\n\nfn encode_multi_dense_vector(v: &BasicValue) -> Result {\n let vecs = match v {\n BasicValue::Vector(v) => v\n .iter()\n .map(encode_dense_vector)\n .collect::>>()?,\n _ => bail!(\"Expected a vector field, got {:?}\", v),\n };\n Ok(vecs.into())\n}\n\nfn embedding_metric_to_qdrant(metric: spec::VectorSimilarityMetric) -> Result {\n Ok(match metric {\n spec::VectorSimilarityMetric::CosineSimilarity => Distance::Cosine,\n spec::VectorSimilarityMetric::L2Distance => Distance::Euclid,\n spec::VectorSimilarityMetric::InnerProduct => Distance::Dot,\n })\n}\n\n////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct CollectionKey {\n connection: Option>,\n collection_name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]\nstruct VectorDef {\n vector_size: usize,\n metric: spec::VectorSimilarityMetric,\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n multi_vector_comparator: Option,\n}\n#[derive(Debug, Clone, Serialize, Deserialize)]\nstruct SetupState {\n #[serde(default)]\n vectors: BTreeMap,\n\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n unsupported_vector_fields: Vec<(String, ValueType)>,\n}\n\n#[derive(Debug)]\nstruct SetupStatus {\n delete_collection: bool,\n add_collection: Option,\n}\n\nimpl setup::ResourceSetupStatus for SetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut result = vec![];\n if self.delete_collection {\n result.push(setup::ChangeDescription::Action(\n \"Delete collection\".to_string(),\n ));\n }\n if let Some(add_collection) = &self.add_collection {\n let vector_descriptions = add_collection\n .vectors\n .iter()\n .map(|(name, vector_def)| {\n format!(\n \"{}[{}], {}\",\n name, vector_def.vector_size, vector_def.metric\n )\n })\n .collect::>()\n .join(\"; \");\n result.push(setup::ChangeDescription::Action(format!(\n \"Create collection{}\",\n if vector_descriptions.is_empty() {\n \"\".to_string()\n } else {\n format!(\" with vectors: {vector_descriptions}\")\n }\n )));\n for (name, schema) in add_collection.unsupported_vector_fields.iter() {\n result.push(setup::ChangeDescription::Note(format!(\n \"Field `{}` has type `{}`. Only number vector with fixed size is supported by Qdrant. It will be stored in payload.\",\n name, schema\n )));\n }\n }\n result\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n match (self.delete_collection, self.add_collection.is_some()) {\n (false, false) => setup::SetupChangeType::NoChange,\n (false, true) => setup::SetupChangeType::Create,\n (true, false) => setup::SetupChangeType::Delete,\n (true, true) => setup::SetupChangeType::Update,\n }\n }\n}\n\nimpl SetupStatus {\n async fn apply_delete(&self, collection_name: &String, qdrant_client: &Qdrant) -> Result<()> {\n if self.delete_collection {\n qdrant_client.delete_collection(collection_name).await?;\n }\n Ok(())\n }\n\n async fn apply_create(&self, collection_name: &String, qdrant_client: &Qdrant) -> Result<()> {\n if let Some(add_collection) = &self.add_collection {\n let mut builder = CreateCollectionBuilder::new(collection_name);\n if !add_collection.vectors.is_empty() {\n let mut vectors_config = VectorsConfigBuilder::default();\n for (name, vector_def) in add_collection.vectors.iter() {\n let mut params = VectorParamsBuilder::new(\n vector_def.vector_size as u64,\n embedding_metric_to_qdrant(vector_def.metric)?,\n );\n if let Some(multi_vector_comparator) = &vector_def.multi_vector_comparator {\n params = params.multivector_config(MultiVectorConfigBuilder::new(\n MultiVectorComparator::from_str_name(multi_vector_comparator)\n .ok_or_else(|| {\n anyhow!(\n \"unrecognized multi vector comparator: {}\",\n multi_vector_comparator\n )\n })?,\n ));\n }\n vectors_config.add_named_vector_params(name, params);\n }\n builder = builder.vectors_config(vectors_config);\n }\n qdrant_client.create_collection(builder).await?;\n }\n Ok(())\n }\n}\n\n////////////////////////////////////////////////////////////\n// Deal with mutations\n////////////////////////////////////////////////////////////\n\nstruct ExportContext {\n qdrant_client: Arc,\n collection_name: String,\n fields_info: Vec,\n}\n\nimpl ExportContext {\n async fn apply_mutation(&self, mutation: ExportTargetMutation) -> Result<()> {\n let mut points: Vec = Vec::with_capacity(mutation.upserts.len());\n for upsert in mutation.upserts.iter() {\n let point_id = key_to_point_id(&upsert.key)?;\n let (payload, vectors) = values_to_payload(&upsert.value.fields, &self.fields_info)?;\n\n points.push(PointStruct::new(point_id, vectors, payload));\n }\n\n if !points.is_empty() {\n self.qdrant_client\n .upsert_points(UpsertPointsBuilder::new(&self.collection_name, points).wait(true))\n .await?;\n }\n\n let ids = mutation\n .deletes\n .iter()\n .map(|deletion| key_to_point_id(&deletion.key))\n .collect::>>()?;\n\n if !ids.is_empty() {\n self.qdrant_client\n .delete_points(\n DeletePointsBuilder::new(&self.collection_name)\n .points(PointsIdsList { ids })\n .wait(true),\n )\n .await?;\n }\n\n Ok(())\n }\n}\nfn key_to_point_id(key_value: &KeyValue) -> Result {\n let point_id = match key_value {\n KeyValue::Str(v) => PointId::from(v.to_string()),\n KeyValue::Int64(v) => PointId::from(*v as u64),\n KeyValue::Uuid(v) => PointId::from(v.to_string()),\n e => bail!(\"Invalid Qdrant point ID: {e}\"),\n };\n\n Ok(point_id)\n}\n\nfn values_to_payload(\n value_fields: &[Value],\n fields_info: &[FieldInfo],\n) -> Result<(HashMap, NamedVectors)> {\n let mut payload = HashMap::with_capacity(value_fields.len());\n let mut vectors = NamedVectors::default();\n\n for (value, field_info) in value_fields.iter().zip(fields_info.iter()) {\n let field_name = &field_info.field_schema.name;\n\n match &field_info.vector_shape {\n Some(vector_shape) => {\n if value.is_null() {\n continue;\n }\n let vector: QdrantVector = match value {\n Value::Basic(basic_value) => match vector_shape {\n VectorShape::Vector(_) => encode_dense_vector(&basic_value)?.into(),\n VectorShape::MultiVector(_) => {\n encode_multi_dense_vector(&basic_value)?.into()\n }\n },\n _ => {\n bail!(\"Expected a vector field, got {:?}\", value);\n }\n };\n vectors = vectors.add_vector(field_name.clone(), vector);\n }\n None => {\n let json_value = serde_json::to_value(TypedValue {\n t: &field_info.field_schema.value_type.typ,\n v: value,\n })?;\n payload.insert(field_name.clone(), json_value.into());\n }\n }\n }\n\n Ok((payload, vectors))\n}\n\n////////////////////////////////////////////////////////////\n// Factory implementation\n////////////////////////////////////////////////////////////\n\n#[derive(Default)]\nstruct Factory {\n qdrant_clients: Mutex>, Arc>>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\nstruct CollectionId {\n collection_name: String,\n}\n\nimpl Display for CollectionId {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.collection_name)?;\n Ok(())\n }\n}\n\n#[async_trait]\nimpl StorageFactoryBase for Factory {\n type Spec = Spec;\n type DeclarationSpec = ();\n type SetupState = SetupState;\n type SetupStatus = SetupStatus;\n type Key = CollectionKey;\n type ExportContext = ExportContext;\n\n fn name(&self) -> &str {\n \"Qdrant\"\n }\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n _declarations: Vec<()>,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(CollectionKey, SetupState)>,\n )> {\n let data_coll_output = data_collections\n .into_iter()\n .map(|d| {\n if d.key_fields_schema.len() != 1 {\n api_bail!(\n \"Expected one primary key field for the point ID. Got {}.\",\n d.key_fields_schema.len()\n )\n }\n\n let mut fields_info = Vec::::new();\n let mut vector_def = BTreeMap::::new();\n let mut unsupported_vector_fields = Vec::<(String, ValueType)>::new();\n\n for field in d.value_fields_schema.iter() {\n let vector_shape = parse_vector_shape(&field.value_type.typ);\n if let Some(vector_shape) = &vector_shape {\n vector_def.insert(\n field.name.clone(),\n VectorDef {\n vector_size: vector_shape.vector_size(),\n metric: DEFAULT_VECTOR_SIMILARITY_METRIC,\n multi_vector_comparator: vector_shape.multi_vector_comparator().map(|s| s.as_str_name().to_string()),\n },\n );\n } else if matches!(\n &field.value_type.typ,\n schema::ValueType::Basic(schema::BasicValueType::Vector(_))\n ) {\n // This is a vector field but not supported by Qdrant\n unsupported_vector_fields.push((field.name.clone(), field.value_type.typ.clone()));\n }\n fields_info.push(FieldInfo {\n field_schema: field.clone(),\n vector_shape,\n });\n }\n\n let mut specified_vector_fields = HashSet::new();\n for vector_index in d.index_options.vector_indexes {\n match vector_def.get_mut(&vector_index.field_name) {\n Some(vector_def) => {\n if specified_vector_fields.insert(vector_index.field_name.clone()) {\n // Validate the metric is supported by Qdrant\n embedding_metric_to_qdrant(vector_index.metric)\n .with_context(||\n format!(\"Parsing vector index metric {} for field `{}`\", vector_index.metric, vector_index.field_name))?;\n vector_def.metric = vector_index.metric;\n } else {\n api_bail!(\"Field `{}` specified more than once in vector index definition\", vector_index.field_name);\n }\n }\n None => {\n if let Some(field) = d.value_fields_schema.iter().find(|f| f.name == vector_index.field_name) {\n api_bail!(\n \"Field `{}` specified in vector index is expected to be a number vector with fixed size, actual type: {}\",\n vector_index.field_name, field.value_type.typ\n );\n } else {\n api_bail!(\"Field `{}` specified in vector index is not found\", vector_index.field_name);\n }\n }\n }\n }\n\n let export_context = Arc::new(ExportContext {\n qdrant_client: self\n .get_qdrant_client(&d.spec.connection, &context.auth_registry)?,\n collection_name: d.spec.collection_name.clone(),\n fields_info,\n });\n Ok(TypedExportDataCollectionBuildOutput {\n export_context: Box::pin(async move { Ok(export_context) }),\n setup_key: CollectionKey {\n connection: d.spec.connection,\n collection_name: d.spec.collection_name,\n },\n desired_setup_state: SetupState {\n vectors: vector_def,\n unsupported_vector_fields,\n },\n })\n })\n .collect::>>()?;\n Ok((data_coll_output, vec![]))\n }\n\n fn deserialize_setup_key(key: serde_json::Value) -> Result {\n Ok(match key {\n serde_json::Value::String(s) => {\n // For backward compatibility.\n CollectionKey {\n collection_name: s,\n connection: None,\n }\n }\n _ => serde_json::from_value(key)?,\n })\n }\n\n async fn check_setup_status(\n &self,\n _key: CollectionKey,\n desired: Option,\n existing: setup::CombinedState,\n _flow_instance_ctx: Arc,\n ) -> Result {\n let desired_exists = desired.is_some();\n let add_collection = desired.filter(|state| {\n !existing.always_exists()\n || existing\n .possible_versions()\n .any(|v| v.vectors != state.vectors)\n });\n let delete_collection = existing.possible_versions().next().is_some()\n && (!desired_exists || add_collection.is_some());\n Ok(SetupStatus {\n delete_collection,\n add_collection,\n })\n }\n\n fn check_state_compatibility(\n &self,\n desired: &SetupState,\n existing: &SetupState,\n ) -> Result {\n Ok(if desired.vectors == existing.vectors {\n SetupStateCompatibility::Compatible\n } else {\n SetupStateCompatibility::NotCompatible\n })\n }\n\n fn describe_resource(&self, key: &CollectionKey) -> Result {\n Ok(format!(\n \"Qdrant collection {}{}\",\n key.collection_name,\n key.connection\n .as_ref()\n .map_or_else(|| \"\".to_string(), |auth_entry| format!(\" @ {auth_entry}\"))\n ))\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n for mutation_w_ctx in mutations.into_iter() {\n mutation_w_ctx\n .export_context\n .apply_mutation(mutation_w_ctx.mutation)\n .await?;\n }\n Ok(())\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n for setup_change in setup_status.iter() {\n let qdrant_client =\n self.get_qdrant_client(&setup_change.key.connection, &context.auth_registry)?;\n setup_change\n .setup_status\n .apply_delete(&setup_change.key.collection_name, &qdrant_client)\n .await?;\n }\n for setup_change in setup_status.iter() {\n let qdrant_client =\n self.get_qdrant_client(&setup_change.key.connection, &context.auth_registry)?;\n setup_change\n .setup_status\n .apply_create(&setup_change.key.collection_name, &qdrant_client)\n .await?;\n }\n Ok(())\n }\n}\n\nimpl Factory {\n fn new() -> Self {\n Self {\n qdrant_clients: Mutex::new(HashMap::new()),\n }\n }\n\n fn get_qdrant_client(\n &self,\n auth_entry: &Option>,\n auth_registry: &AuthRegistry,\n ) -> Result> {\n let mut clients = self.qdrant_clients.lock().unwrap();\n if let Some(client) = clients.get(auth_entry) {\n return Ok(client.clone());\n }\n\n let spec = auth_entry.as_ref().map_or_else(\n || {\n Ok(ConnectionSpec {\n grpc_url: DEFAULT_URL.to_string(),\n api_key: None,\n })\n },\n |auth_entry| auth_registry.get(auth_entry),\n )?;\n let client = Arc::new(\n Qdrant::from_url(&spec.grpc_url)\n .api_key(spec.api_key)\n .skip_compatibility_check()\n .build()?,\n );\n clients.insert(auth_entry.clone(), client.clone());\n Ok(client)\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory::new().register(registry)\n}\n"], ["/cocoindex/src/execution/db_tracking_setup.rs", "use crate::prelude::*;\n\nuse crate::setup::{CombinedState, ResourceSetupInfo, ResourceSetupStatus, SetupChangeType};\nuse serde::{Deserialize, Serialize};\nuse sqlx::PgPool;\n\npub fn default_tracking_table_name(flow_name: &str) -> String {\n format!(\n \"{}__cocoindex_tracking\",\n utils::db::sanitize_identifier(flow_name)\n )\n}\n\npub const CURRENT_TRACKING_TABLE_VERSION: i32 = 1;\n\nasync fn upgrade_tracking_table(\n pool: &PgPool,\n table_name: &str,\n existing_version_id: i32,\n target_version_id: i32,\n) -> Result<()> {\n if existing_version_id < 1 && target_version_id >= 1 {\n let query = format!(\n \"CREATE TABLE IF NOT EXISTS {table_name} (\n source_id INTEGER NOT NULL,\n source_key JSONB NOT NULL,\n\n -- Update in the precommit phase: after evaluation done, before really applying the changes to the target storage.\n max_process_ordinal BIGINT NOT NULL,\n staging_target_keys JSONB NOT NULL,\n memoization_info JSONB,\n\n -- Update after applying the changes to the target storage.\n processed_source_ordinal BIGINT,\n process_logic_fingerprint BYTEA,\n process_ordinal BIGINT,\n process_time_micros BIGINT,\n target_keys JSONB,\n\n PRIMARY KEY (source_id, source_key)\n );\",\n );\n sqlx::query(&query).execute(pool).await?;\n }\n\n Ok(())\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TrackingTableSetupState {\n pub table_name: String,\n pub version_id: i32,\n}\n\n#[derive(Debug)]\npub struct TrackingTableSetupStatus {\n pub desired_state: Option,\n\n pub legacy_table_names: Vec,\n\n pub min_existing_version_id: Option,\n pub source_ids_to_delete: Vec,\n}\n\nimpl TrackingTableSetupStatus {\n pub fn new(\n desired: Option<&TrackingTableSetupState>,\n existing: &CombinedState,\n source_ids_to_delete: Vec,\n ) -> Option {\n let legacy_table_names = existing\n .legacy_values(desired, |v| &v.table_name)\n .into_iter()\n .cloned()\n .collect();\n let min_existing_version_id = existing\n .always_exists()\n .then(|| existing.possible_versions().map(|v| v.version_id).min())\n .flatten();\n if desired.is_some() || min_existing_version_id.is_some() {\n Some(Self {\n desired_state: desired.cloned(),\n legacy_table_names,\n min_existing_version_id,\n source_ids_to_delete,\n })\n } else {\n None\n }\n }\n\n pub fn into_setup_info(\n self,\n ) -> ResourceSetupInfo<(), TrackingTableSetupState, TrackingTableSetupStatus> {\n ResourceSetupInfo {\n key: (),\n state: self.desired_state.clone(),\n description: \"Tracking Table\".to_string(),\n setup_status: Some(self),\n legacy_key: None,\n }\n }\n}\n\nimpl ResourceSetupStatus for TrackingTableSetupStatus {\n fn describe_changes(&self) -> Vec {\n let mut changes: Vec = vec![];\n if self.desired_state.is_some() && !self.legacy_table_names.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Rename legacy tracking tables: {}. \",\n self.legacy_table_names.join(\", \")\n )));\n }\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(state)) => {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Create the tracking table: {}. \",\n state.table_name\n )));\n }\n (Some(min_version_id), Some(desired)) => {\n if min_version_id < desired.version_id {\n changes.push(setup::ChangeDescription::Action(\n \"Update the tracking table. \".into(),\n ));\n }\n }\n (Some(_), None) => changes.push(setup::ChangeDescription::Action(format!(\n \"Drop existing tracking table: {}. \",\n self.legacy_table_names.join(\", \")\n ))),\n (None, None) => (),\n }\n if !self.source_ids_to_delete.is_empty() {\n changes.push(setup::ChangeDescription::Action(format!(\n \"Delete source IDs: {}. \",\n self.source_ids_to_delete\n .iter()\n .map(|id| id.to_string())\n .collect::>()\n .join(\", \")\n )));\n }\n changes\n }\n\n fn change_type(&self) -> SetupChangeType {\n match (self.min_existing_version_id, &self.desired_state) {\n (None, Some(_)) => SetupChangeType::Create,\n (Some(min_version_id), Some(desired)) => {\n if min_version_id == desired.version_id && self.legacy_table_names.is_empty() {\n SetupChangeType::NoChange\n } else if min_version_id < desired.version_id {\n SetupChangeType::Update\n } else {\n SetupChangeType::Invalid\n }\n }\n (Some(_), None) => SetupChangeType::Delete,\n (None, None) => SetupChangeType::NoChange,\n }\n }\n}\n\nimpl TrackingTableSetupStatus {\n pub async fn apply_change(&self) -> Result<()> {\n let lib_context = get_lib_context()?;\n let pool = lib_context.require_builtin_db_pool()?;\n if let Some(desired) = &self.desired_state {\n for lagacy_name in self.legacy_table_names.iter() {\n let query = format!(\n \"ALTER TABLE IF EXISTS {} RENAME TO {}\",\n lagacy_name, desired.table_name\n );\n sqlx::query(&query).execute(pool).await?;\n }\n\n if self.min_existing_version_id != Some(desired.version_id) {\n upgrade_tracking_table(\n pool,\n &desired.table_name,\n self.min_existing_version_id.unwrap_or(0),\n desired.version_id,\n )\n .await?;\n }\n } else {\n for lagacy_name in self.legacy_table_names.iter() {\n let query = format!(\"DROP TABLE IF EXISTS {lagacy_name}\");\n sqlx::query(&query).execute(pool).await?;\n }\n return Ok(());\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/setup/states.rs", "/// Concepts:\n/// - Resource: some setup that needs to be tracked and maintained.\n/// - Setup State: current state of a resource.\n/// - Staging Change: states changes that may not be really applied yet.\n/// - Combined Setup State: Setup State + Staging Change.\n/// - Status Check: information about changes that are being applied / need to be applied.\n///\n/// Resource hierarchy:\n/// - [resource: setup metadata table] /// - Flow\n/// - [resource: metadata]\n/// - [resource: tracking table]\n/// - Target\n/// - [resource: target-specific stuff]\nuse crate::prelude::*;\n\nuse indenter::indented;\nuse owo_colors::{AnsiColors, OwoColorize};\nuse std::any::Any;\nuse std::fmt::Debug;\nuse std::fmt::{Display, Write};\nuse std::hash::Hash;\n\nuse super::db_metadata;\nuse crate::execution::db_tracking_setup::{\n self, TrackingTableSetupState, TrackingTableSetupStatus,\n};\n\nconst INDENT: &str = \" \";\n\npub trait StateMode: Clone + Copy {\n type State: Debug + Clone;\n type DefaultState: Debug + Clone + Default;\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct DesiredMode;\nimpl StateMode for DesiredMode {\n type State = T;\n type DefaultState = T;\n}\n\n#[derive(Debug, Clone)]\npub struct CombinedState {\n pub current: Option,\n pub staging: Vec>,\n /// Legacy state keys that no longer identical to the latest serialized form (usually caused by code change).\n /// They will be deleted when the next change is applied.\n pub legacy_state_key: Option,\n}\n\nimpl CombinedState {\n pub fn from_desired(desired: T) -> Self {\n Self {\n current: Some(desired),\n staging: vec![],\n legacy_state_key: None,\n }\n }\n\n pub fn from_change(prev: Option>, change: Option>) -> Self\n where\n T: Clone,\n {\n Self {\n current: match change {\n Some(Some(state)) => Some(state.clone()),\n Some(None) => None,\n None => prev.and_then(|v| v.current),\n },\n staging: vec![],\n legacy_state_key: None,\n }\n }\n\n pub fn possible_versions(&self) -> impl Iterator {\n self.current\n .iter()\n .chain(self.staging.iter().flat_map(|s| s.state().into_iter()))\n }\n\n pub fn always_exists(&self) -> bool {\n self.current.is_some() && self.staging.iter().all(|s| !s.is_delete())\n }\n\n pub fn legacy_values &V>(\n &self,\n desired: Option<&T>,\n f: F,\n ) -> BTreeSet<&V> {\n let desired_value = desired.map(&f);\n self.possible_versions()\n .map(f)\n .filter(|v| Some(*v) != desired_value)\n .collect()\n }\n}\n\nimpl Default for CombinedState {\n fn default() -> Self {\n Self {\n current: None,\n staging: vec![],\n legacy_state_key: None,\n }\n }\n}\n\nimpl PartialEq for CombinedState {\n fn eq(&self, other: &T) -> bool {\n self.staging.is_empty() && self.current.as_ref() == Some(other)\n }\n}\n\n#[derive(Clone, Copy)]\npub struct ExistingMode;\nimpl StateMode for ExistingMode {\n type State = CombinedState;\n type DefaultState = CombinedState;\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub enum StateChange {\n Upsert(State),\n Delete,\n}\n\nimpl StateChange {\n pub fn is_delete(&self) -> bool {\n matches!(self, StateChange::Delete)\n }\n\n pub fn desired_state(&self) -> Option<&State> {\n match self {\n StateChange::Upsert(state) => Some(state),\n StateChange::Delete => None,\n }\n }\n\n pub fn state(&self) -> Option<&State> {\n match self {\n StateChange::Upsert(state) => Some(state),\n StateChange::Delete => None,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct SourceSetupState {\n pub source_id: i32,\n pub key_schema: schema::ValueType,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct ResourceIdentifier {\n pub key: serde_json::Value,\n pub target_kind: String,\n}\n\nimpl Display for ResourceIdentifier {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}:{}\", self.target_kind, self.key)\n }\n}\n\n/// Common state (i.e. not specific to a target kind) for a target.\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TargetSetupStateCommon {\n pub target_id: i32,\n pub schema_version_id: i32,\n pub max_schema_version_id: i32,\n #[serde(default)]\n pub setup_by_user: bool,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct TargetSetupState {\n pub common: TargetSetupStateCommon,\n\n pub state: serde_json::Value,\n}\n\nimpl TargetSetupState {\n pub fn state_unless_setup_by_user(self) -> Option {\n (!self.common.setup_by_user).then_some(self.state)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]\npub struct FlowSetupMetadata {\n pub last_source_id: i32,\n pub last_target_id: i32,\n pub sources: BTreeMap,\n}\n\n#[derive(Debug, Clone)]\npub struct FlowSetupState {\n // The version number for the flow, last seen in the metadata table.\n pub seen_flow_metadata_version: Option,\n pub metadata: Mode::DefaultState,\n pub tracking_table: Mode::State,\n pub targets: IndexMap>,\n}\n\nimpl Default for FlowSetupState {\n fn default() -> Self {\n Self {\n seen_flow_metadata_version: None,\n metadata: Default::default(),\n tracking_table: Default::default(),\n targets: IndexMap::new(),\n }\n }\n}\n\nimpl PartialEq for FlowSetupState {\n fn eq(&self, other: &Self) -> bool {\n self.metadata == other.metadata\n && self.tracking_table == other.tracking_table\n && self.targets == other.targets\n }\n}\n\n#[derive(Debug, Clone)]\npub struct AllSetupStates {\n pub has_metadata_table: bool,\n pub flows: BTreeMap>,\n}\n\nimpl Default for AllSetupStates {\n fn default() -> Self {\n Self {\n has_metadata_table: false,\n flows: BTreeMap::new(),\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum SetupChangeType {\n NoChange,\n Create,\n Update,\n Delete,\n Invalid,\n}\n\npub enum ChangeDescription {\n Action(String),\n Note(String),\n}\n\npub trait ResourceSetupStatus: Send + Sync + Debug + Any + 'static {\n fn describe_changes(&self) -> Vec;\n\n fn change_type(&self) -> SetupChangeType;\n}\n\nimpl ResourceSetupStatus for Box {\n fn describe_changes(&self) -> Vec {\n self.as_ref().describe_changes()\n }\n\n fn change_type(&self) -> SetupChangeType {\n self.as_ref().change_type()\n }\n}\n\nimpl ResourceSetupStatus for std::convert::Infallible {\n fn describe_changes(&self) -> Vec {\n unreachable!()\n }\n\n fn change_type(&self) -> SetupChangeType {\n unreachable!()\n }\n}\n\n#[derive(Debug)]\npub struct ResourceSetupInfo {\n pub key: K,\n pub state: Option,\n pub description: String,\n\n /// If `None`, the resource is managed by users.\n pub setup_status: Option,\n\n pub legacy_key: Option,\n}\n\nimpl std::fmt::Display for ResourceSetupInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let status_code = match self.setup_status.as_ref().map(|c| c.change_type()) {\n Some(SetupChangeType::NoChange) => \"READY\",\n Some(SetupChangeType::Create) => \"TO CREATE\",\n Some(SetupChangeType::Update) => \"TO UPDATE\",\n Some(SetupChangeType::Delete) => \"TO DELETE\",\n Some(SetupChangeType::Invalid) => \"INVALID\",\n None => \"USER MANAGED\",\n };\n let status_str = format!(\"[ {status_code:^9} ]\");\n let status_full = status_str.color(AnsiColors::Cyan);\n let desc_colored = &self.description;\n writeln!(f, \"{status_full} {desc_colored}\")?;\n if let Some(setup_status) = &self.setup_status {\n let changes = setup_status.describe_changes();\n if !changes.is_empty() {\n let mut f = indented(f).with_str(INDENT);\n writeln!(f, \"\")?;\n for change in changes {\n match change {\n ChangeDescription::Action(action) => {\n writeln!(\n f,\n \"{} {}\",\n \"TODO:\".color(AnsiColors::BrightBlack).bold(),\n action.color(AnsiColors::BrightBlack)\n )?;\n }\n ChangeDescription::Note(note) => {\n writeln!(\n f,\n \"{} {}\",\n \"NOTE:\".color(AnsiColors::Yellow).bold(),\n note.color(AnsiColors::Yellow)\n )?;\n }\n }\n }\n writeln!(f)?;\n }\n }\n Ok(())\n }\n}\n\nimpl ResourceSetupInfo {\n pub fn is_up_to_date(&self) -> bool {\n self.setup_status\n .as_ref()\n .is_none_or(|c| c.change_type() == SetupChangeType::NoChange)\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum ObjectStatus {\n Invalid,\n New,\n Existing,\n Deleted,\n}\n\npub trait ObjectSetupStatus {\n fn status(&self) -> Option;\n fn is_up_to_date(&self) -> bool;\n}\n\n#[derive(Debug)]\npub struct FlowSetupStatus {\n pub status: Option,\n pub seen_flow_metadata_version: Option,\n\n pub metadata_change: Option>,\n\n pub tracking_table:\n Option>,\n pub target_resources:\n Vec>>,\n\n pub unknown_resources: Vec,\n}\n\nimpl ObjectSetupStatus for FlowSetupStatus {\n fn status(&self) -> Option {\n self.status\n }\n\n fn is_up_to_date(&self) -> bool {\n self.metadata_change.is_none()\n && self\n .tracking_table\n .as_ref()\n .is_none_or(|t| t.is_up_to_date())\n && self\n .target_resources\n .iter()\n .all(|target| target.is_up_to_date())\n }\n}\n\n#[derive(Debug)]\npub struct GlobalSetupStatus {\n pub metadata_table: ResourceSetupInfo<(), (), db_metadata::MetadataTableSetup>,\n}\n\nimpl GlobalSetupStatus {\n pub fn from_setup_states(setup_states: &AllSetupStates) -> Self {\n Self {\n metadata_table: db_metadata::MetadataTableSetup {\n metadata_table_missing: !setup_states.has_metadata_table,\n }\n .into_setup_info(),\n }\n }\n\n pub fn is_up_to_date(&self) -> bool {\n self.metadata_table.is_up_to_date()\n }\n}\n\npub struct ObjectSetupStatusCode<'a, Status: ObjectSetupStatus>(&'a Status);\nimpl std::fmt::Display for ObjectSetupStatusCode<'_, Status> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let Some(status) = self.0.status() else {\n return Ok(());\n };\n write!(\n f,\n \"[ {:^9} ]\",\n match status {\n ObjectStatus::New => \"TO CREATE\",\n ObjectStatus::Existing =>\n if self.0.is_up_to_date() {\n \"READY\"\n } else {\n \"TO UPDATE\"\n },\n ObjectStatus::Deleted => \"TO DELETE\",\n ObjectStatus::Invalid => \"INVALID\",\n }\n )\n }\n}\n\nimpl std::fmt::Display for GlobalSetupStatus {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n writeln!(f, \"{}\", self.metadata_table)\n }\n}\n\npub struct FormattedFlowSetupStatus<'a>(pub &'a str, pub &'a FlowSetupStatus);\n\nimpl std::fmt::Display for FormattedFlowSetupStatus<'_> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let flow_ssc = self.1;\n if flow_ssc.status.is_none() {\n return Ok(());\n }\n\n writeln!(\n f,\n \"{} Flow: {}\",\n ObjectSetupStatusCode(flow_ssc)\n .to_string()\n .color(AnsiColors::Cyan),\n self.0\n )?;\n\n let mut f = indented(f).with_str(INDENT);\n if let Some(tracking_table) = &flow_ssc.tracking_table {\n write!(f, \"{tracking_table}\")?;\n }\n for target_resource in &flow_ssc.target_resources {\n write!(f, \"{target_resource}\")?;\n }\n for resource in &flow_ssc.unknown_resources {\n writeln!(f, \"[ UNKNOWN ] {resource}\")?;\n }\n\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/py_factory.rs", "use crate::prelude::*;\n\nuse pyo3::{\n IntoPyObjectExt, Py, PyAny, Python, pyclass, pymethods,\n types::{IntoPyDict, PyList, PyString, PyTuple},\n};\nuse pythonize::{depythonize, pythonize};\n\nuse crate::{\n base::{schema, value},\n builder::plan,\n ops::sdk::SetupStateCompatibility,\n py::{self, ToResultWithPyTrace},\n};\nuse anyhow::{Result, anyhow};\n\n#[pyclass(name = \"OpArgSchema\")]\npub struct PyOpArgSchema {\n value_type: crate::py::Pythonized,\n analyzed_value: crate::py::Pythonized,\n}\n\n#[pymethods]\nimpl PyOpArgSchema {\n #[getter]\n fn value_type(&self) -> &crate::py::Pythonized {\n &self.value_type\n }\n\n #[getter]\n fn analyzed_value(&self) -> &crate::py::Pythonized {\n &self.analyzed_value\n }\n}\n\nstruct PyFunctionExecutor {\n py_function_executor: Py,\n py_exec_ctx: Arc,\n\n num_positional_args: usize,\n kw_args_names: Vec>,\n result_type: schema::EnrichedValueType,\n\n enable_cache: bool,\n behavior_version: Option,\n}\n\nimpl PyFunctionExecutor {\n fn call_py_fn<'py>(\n &self,\n py: Python<'py>,\n input: Vec,\n ) -> Result> {\n let mut args = Vec::with_capacity(self.num_positional_args);\n for v in input[0..self.num_positional_args].iter() {\n args.push(py::value_to_py_object(py, v)?);\n }\n\n let kwargs = if self.kw_args_names.is_empty() {\n None\n } else {\n let mut kwargs = Vec::with_capacity(self.kw_args_names.len());\n for (name, v) in self\n .kw_args_names\n .iter()\n .zip(input[self.num_positional_args..].iter())\n {\n kwargs.push((name.bind(py), py::value_to_py_object(py, v)?));\n }\n Some(kwargs)\n };\n\n let result = self\n .py_function_executor\n .call(\n py,\n PyTuple::new(py, args.into_iter())?,\n kwargs\n .map(|kwargs| -> Result<_> { Ok(kwargs.into_py_dict(py)?) })\n .transpose()?\n .as_ref(),\n )\n .to_result_with_py_trace(py)?;\n Ok(result.into_bound(py))\n }\n}\n\n#[async_trait]\nimpl interface::SimpleFunctionExecutor for Arc {\n async fn evaluate(&self, input: Vec) -> Result {\n let self = self.clone();\n let result_fut = Python::with_gil(|py| -> Result<_> {\n let result_coro = self.call_py_fn(py, input)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(self.py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro,\n )?)\n })?;\n let result = result_fut.await;\n Python::with_gil(|py| -> Result<_> {\n let result = result.to_result_with_py_trace(py)?;\n Ok(py::value_from_py_object(\n &self.result_type.typ,\n &result.into_bound(py),\n )?)\n })\n }\n\n fn enable_cache(&self) -> bool {\n self.enable_cache\n }\n\n fn behavior_version(&self) -> Option {\n self.behavior_version\n }\n}\n\npub(crate) struct PyFunctionFactory {\n pub py_function_factory: Py,\n}\n\n#[async_trait]\nimpl interface::SimpleFunctionFactory for PyFunctionFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n schema::EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let (result_type, executor, kw_args_names, num_positional_args) =\n Python::with_gil(|py| -> anyhow::Result<_> {\n let mut args = vec![pythonize(py, &spec)?];\n let mut kwargs = vec![];\n let mut num_positional_args = 0;\n for arg in input_schema.into_iter() {\n let py_arg_schema = PyOpArgSchema {\n value_type: crate::py::Pythonized(arg.value_type.clone()),\n analyzed_value: crate::py::Pythonized(arg.analyzed_value.clone()),\n };\n match arg.name.0 {\n Some(name) => {\n kwargs.push((name.clone(), py_arg_schema));\n }\n None => {\n args.push(py_arg_schema.into_bound_py_any(py)?);\n num_positional_args += 1;\n }\n }\n }\n\n let kw_args_names = kwargs\n .iter()\n .map(|(name, _)| PyString::new(py, name).unbind())\n .collect::>();\n let result = self\n .py_function_factory\n .call(\n py,\n PyTuple::new(py, args.into_iter())?,\n Some(&kwargs.into_py_dict(py)?),\n )\n .to_result_with_py_trace(py)?;\n let (result_type, executor) = result\n .extract::<(crate::py::Pythonized, Py)>(py)?;\n Ok((\n result_type.into_inner(),\n executor,\n kw_args_names,\n num_positional_args,\n ))\n })?;\n\n let executor_fut = {\n let result_type = result_type.clone();\n async move {\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n let (prepare_fut, enable_cache, behavior_version) =\n Python::with_gil(|py| -> anyhow::Result<_> {\n let prepare_coro = executor\n .call_method(py, \"prepare\", (), None)\n .to_result_with_py_trace(py)?;\n let prepare_fut = pyo3_async_runtimes::into_future_with_locals(\n &pyo3_async_runtimes::TaskLocals::new(\n py_exec_ctx.event_loop.bind(py).clone(),\n ),\n prepare_coro.into_bound(py),\n )?;\n let enable_cache = executor\n .call_method(py, \"enable_cache\", (), None)\n .to_result_with_py_trace(py)?\n .extract::(py)?;\n let behavior_version = executor\n .call_method(py, \"behavior_version\", (), None)\n .to_result_with_py_trace(py)?\n .extract::>(py)?;\n Ok((prepare_fut, enable_cache, behavior_version))\n })?;\n prepare_fut.await?;\n Ok(Box::new(Arc::new(PyFunctionExecutor {\n py_function_executor: executor,\n py_exec_ctx,\n num_positional_args,\n kw_args_names,\n result_type,\n enable_cache,\n behavior_version,\n }))\n as Box)\n }\n };\n\n Ok((result_type, executor_fut.boxed()))\n }\n}\n\npub(crate) struct PyExportTargetFactory {\n pub py_target_connector: Py,\n}\n\nstruct PyTargetExecutorContext {\n py_export_ctx: Py,\n py_exec_ctx: Arc,\n}\n\n#[derive(Debug)]\nstruct PyTargetResourceSetupStatus {\n stale_existing_states: IndexSet>,\n desired_state: Option,\n}\n\nimpl setup::ResourceSetupStatus for PyTargetResourceSetupStatus {\n fn describe_changes(&self) -> Vec {\n vec![]\n }\n\n fn change_type(&self) -> setup::SetupChangeType {\n if self.stale_existing_states.is_empty() {\n setup::SetupChangeType::NoChange\n } else if self.desired_state.is_some() {\n if self\n .stale_existing_states\n .iter()\n .any(|state| state.is_none())\n {\n setup::SetupChangeType::Create\n } else {\n setup::SetupChangeType::Update\n }\n } else {\n setup::SetupChangeType::Delete\n }\n }\n}\n\n#[async_trait]\nimpl interface::ExportTargetFactory for PyExportTargetFactory {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )> {\n if declarations.len() != 0 {\n api_error!(\"Custom target connector doesn't support declarations yet\");\n }\n\n let mut build_outputs = Vec::with_capacity(data_collections.len());\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n for data_collection in data_collections.into_iter() {\n let (py_export_ctx, persistent_key) =\n Python::with_gil(|py| -> Result<(Py, serde_json::Value)> {\n // Deserialize the spec to Python object.\n let py_export_ctx = self\n .py_target_connector\n .call_method(\n py,\n \"create_export_context\",\n (\n &data_collection.name,\n pythonize(py, &data_collection.spec)?,\n pythonize(py, &data_collection.key_fields_schema)?,\n pythonize(py, &data_collection.value_fields_schema)?,\n ),\n None,\n )\n .to_result_with_py_trace(py)?;\n\n // Call the `get_persistent_key` method to get the persistent key.\n let persistent_key = self\n .py_target_connector\n .call_method(py, \"get_persistent_key\", (&py_export_ctx,), None)\n .to_result_with_py_trace(py)?;\n let persistent_key = depythonize(&persistent_key.into_bound(py))?;\n Ok((py_export_ctx, persistent_key))\n })?;\n\n let py_exec_ctx = py_exec_ctx.clone();\n let build_output = interface::ExportDataCollectionBuildOutput {\n export_context: Box::pin(async move {\n Ok(Arc::new(PyTargetExecutorContext {\n py_export_ctx,\n py_exec_ctx,\n }) as Arc)\n }),\n setup_key: persistent_key,\n desired_setup_state: data_collection.spec,\n };\n build_outputs.push(build_output);\n }\n Ok((build_outputs, vec![]))\n }\n\n async fn check_setup_status(\n &self,\n _key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n _context: Arc,\n ) -> Result> {\n // Collect all possible existing states that are not the desired state.\n let mut stale_existing_states = IndexSet::new();\n if !existing_states.always_exists() && desired_state.is_some() {\n stale_existing_states.insert(None);\n }\n for possible_state in existing_states.possible_versions() {\n if Some(possible_state) != desired_state.as_ref() {\n stale_existing_states.insert(Some(possible_state.clone()));\n }\n }\n\n Ok(Box::new(PyTargetResourceSetupStatus {\n stale_existing_states,\n desired_state,\n }))\n }\n\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result {\n Ok(key.clone())\n }\n\n fn check_state_compatibility(\n &self,\n _desired_state: &serde_json::Value,\n _existing_state: &serde_json::Value,\n ) -> Result {\n // The Python target connector doesn't support state update yet.\n Ok(SetupStateCompatibility::Compatible)\n }\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result {\n Python::with_gil(|py| -> Result {\n let result = self\n .py_target_connector\n .call_method(py, \"describe_resource\", (pythonize(py, key)?,), None)\n .to_result_with_py_trace(py)?;\n let description = result.extract::(py)?;\n Ok(description)\n })\n }\n\n fn extract_additional_key(\n &self,\n _key: &value::KeyValue,\n _value: &value::FieldValues,\n _export_context: &(dyn Any + Send + Sync),\n ) -> Result {\n Ok(serde_json::Value::Null)\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n // Filter the setup changes that are not NoChange, and flatten to\n // `list[tuple[key, list[stale_existing_states | None], desired_state | None]]` for Python.\n let mut setup_changes = Vec::new();\n for item in setup_status.into_iter() {\n let decoded_setup_status = (item.setup_status as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?;\n if ::change_type(decoded_setup_status)\n != setup::SetupChangeType::NoChange\n {\n setup_changes.push((\n item.key,\n &decoded_setup_status.stale_existing_states,\n &decoded_setup_status.desired_state,\n ));\n }\n }\n\n if setup_changes.is_empty() {\n return Ok(());\n }\n\n // Call the `apply_setup_changes_async()` method.\n let py_exec_ctx = context\n .py_exec_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Python execution context is missing\"))?\n .clone();\n let py_result = Python::with_gil(move |py| -> Result<_> {\n let result_coro = self\n .py_target_connector\n .call_method(\n py,\n \"apply_setup_changes_async\",\n (pythonize(py, &setup_changes)?,),\n None,\n )\n .to_result_with_py_trace(py)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro.into_bound(py),\n )?)\n })?\n .await;\n Python::with_gil(move |py| py_result.to_result_with_py_trace(py))?;\n\n Ok(())\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec<\n interface::ExportTargetMutationWithContext<'async_trait, dyn Any + Send + Sync>,\n >,\n ) -> Result<()> {\n if mutations.is_empty() {\n return Ok(());\n }\n\n let py_result = Python::with_gil(|py| -> Result<_> {\n // Create a `list[tuple[export_ctx, list[tuple[key, value | None]]]]` for Python, and collect `py_exec_ctx`.\n let mut py_args = Vec::with_capacity(mutations.len());\n let mut py_exec_ctx: Option<&Arc> = None;\n for mutation in mutations.into_iter() {\n // Downcast export_context to PyTargetExecutorContext.\n let export_context = (mutation.export_context as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?;\n\n let mut flattened_mutations = Vec::with_capacity(\n mutation.mutation.upserts.len() + mutation.mutation.deletes.len(),\n );\n for upsert in mutation.mutation.upserts.into_iter() {\n flattened_mutations.push((\n py::value_to_py_object(py, &upsert.key.into())?,\n py::field_values_to_py_object(py, upsert.value.fields.iter())?,\n ));\n }\n for delete in mutation.mutation.deletes.into_iter() {\n flattened_mutations.push((\n py::value_to_py_object(py, &delete.key.into())?,\n py.None().into_bound(py),\n ));\n }\n py_args.push((\n &export_context.py_export_ctx,\n PyList::new(py, flattened_mutations)?.into_any(),\n ));\n py_exec_ctx = py_exec_ctx.or(Some(&export_context.py_exec_ctx));\n }\n let py_exec_ctx = py_exec_ctx.ok_or_else(invariance_violation)?;\n\n let result_coro = self\n .py_target_connector\n .call_method(py, \"mutate_async\", (py_args,), None)\n .to_result_with_py_trace(py)?;\n let task_locals =\n pyo3_async_runtimes::TaskLocals::new(py_exec_ctx.event_loop.bind(py).clone());\n Ok(pyo3_async_runtimes::into_future_with_locals(\n &task_locals,\n result_coro.into_bound(py),\n )?)\n })?\n .await;\n\n Python::with_gil(move |py| py_result.to_result_with_py_trace(py))?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/setup/db_metadata.rs", "use crate::prelude::*;\n\nuse super::{ResourceSetupInfo, ResourceSetupStatus, SetupChangeType, StateChange};\nuse crate::utils::db::WriteAction;\nuse axum::http::StatusCode;\nuse sqlx::PgPool;\n\nconst SETUP_METADATA_TABLE_NAME: &str = \"cocoindex_setup_metadata\";\npub const FLOW_VERSION_RESOURCE_TYPE: &str = \"__FlowVersion\";\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SetupMetadataRecord {\n pub flow_name: String,\n // e.g. \"Flow\", \"SourceTracking\", \"Target:{TargetType}\"\n pub resource_type: String,\n pub key: serde_json::Value,\n pub state: Option,\n pub staging_changes: sqlx::types::Json>>,\n}\n\npub fn parse_flow_version(state: &Option) -> Option {\n match state {\n Some(serde_json::Value::Number(n)) => n.as_u64(),\n _ => None,\n }\n}\n\n/// Returns None if metadata table doesn't exist.\npub async fn read_setup_metadata(pool: &PgPool) -> Result>> {\n let mut db_conn = pool.acquire().await?;\n let query_str = format!(\n \"SELECT flow_name, resource_type, key, state, staging_changes FROM {SETUP_METADATA_TABLE_NAME}\",\n );\n let metadata = sqlx::query_as(&query_str).fetch_all(&mut *db_conn).await;\n let result = match metadata {\n Ok(metadata) => Some(metadata),\n Err(err) => {\n let exists: Option = sqlx::query_scalar(\n \"SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = $1)\",\n )\n .bind(SETUP_METADATA_TABLE_NAME)\n .fetch_one(&mut *db_conn)\n .await?;\n if !exists.unwrap_or(false) {\n None\n } else {\n return Err(err.into());\n }\n }\n };\n Ok(result)\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct ResourceTypeKey {\n pub resource_type: String,\n pub key: serde_json::Value,\n}\n\nimpl ResourceTypeKey {\n pub fn new(resource_type: String, key: serde_json::Value) -> Self {\n Self { resource_type, key }\n }\n}\n\nstatic VERSION_RESOURCE_TYPE_ID: LazyLock = LazyLock::new(|| ResourceTypeKey {\n resource_type: FLOW_VERSION_RESOURCE_TYPE.to_string(),\n key: serde_json::Value::Null,\n});\n\nasync fn read_metadata_records_for_flow(\n flow_name: &str,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT flow_name, resource_type, key, state, staging_changes FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1\",\n );\n let metadata: Vec = sqlx::query_as(&query_str)\n .bind(flow_name)\n .fetch_all(db_executor)\n .await?;\n let result = metadata\n .into_iter()\n .map(|m| {\n (\n ResourceTypeKey {\n resource_type: m.resource_type.clone(),\n key: m.key.clone(),\n },\n m,\n )\n })\n .collect();\n Ok(result)\n}\n\nasync fn read_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT state FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n );\n let state: Option = sqlx::query_scalar(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .fetch_optional(db_executor)\n .await?;\n Ok(state)\n}\n\nasync fn upsert_staging_changes(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n staging_changes: Vec>,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {SETUP_METADATA_TABLE_NAME} (flow_name, resource_type, key, staging_changes) VALUES ($1, $2, $3, $4)\",\n ),\n WriteAction::Update => format!(\n \"UPDATE {SETUP_METADATA_TABLE_NAME} SET staging_changes = $4 WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n ),\n };\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .bind(sqlx::types::Json(staging_changes))\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\nasync fn upsert_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n state: &serde_json::Value,\n action: WriteAction,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {SETUP_METADATA_TABLE_NAME} (flow_name, resource_type, key, state, staging_changes) VALUES ($1, $2, $3, $4, $5)\",\n ),\n WriteAction::Update => format!(\n \"UPDATE {SETUP_METADATA_TABLE_NAME} SET state = $4, staging_changes = $5 WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n ),\n };\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .bind(sqlx::types::Json(state))\n .bind(sqlx::types::Json(Vec::::new()))\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\nasync fn delete_state(\n flow_name: &str,\n type_id: &ResourceTypeKey,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"DELETE FROM {SETUP_METADATA_TABLE_NAME} WHERE flow_name = $1 AND resource_type = $2 AND key = $3\",\n );\n sqlx::query(&query_str)\n .bind(flow_name)\n .bind(&type_id.resource_type)\n .bind(&type_id.key)\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\npub struct StateUpdateInfo {\n pub desired_state: Option,\n pub legacy_key: Option,\n}\n\nimpl StateUpdateInfo {\n pub fn new(\n desired_state: Option<&impl Serialize>,\n legacy_key: Option,\n ) -> Result {\n Ok(Self {\n desired_state: desired_state\n .as_ref()\n .map(serde_json::to_value)\n .transpose()?,\n legacy_key,\n })\n }\n}\n\npub async fn stage_changes_for_flow(\n flow_name: &str,\n seen_metadata_version: Option,\n resource_update_info: &HashMap,\n pool: &PgPool,\n) -> Result {\n let mut txn = pool.begin().await?;\n let mut existing_records = read_metadata_records_for_flow(flow_name, &mut *txn).await?;\n let latest_metadata_version = existing_records\n .get(&VERSION_RESOURCE_TYPE_ID)\n .and_then(|m| parse_flow_version(&m.state));\n if seen_metadata_version < latest_metadata_version {\n return Err(ApiError::new(\n \"seen newer version in the metadata table\",\n StatusCode::CONFLICT,\n ))?;\n }\n let new_metadata_version = seen_metadata_version.unwrap_or_default() + 1;\n upsert_state(\n flow_name,\n &VERSION_RESOURCE_TYPE_ID,\n &serde_json::Value::Number(new_metadata_version.into()),\n if latest_metadata_version.is_some() {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n &mut *txn,\n )\n .await?;\n\n for (type_id, update_info) in resource_update_info {\n let existing = existing_records.remove(type_id);\n let change = match &update_info.desired_state {\n Some(desired_state) => StateChange::Upsert(desired_state.clone()),\n None => StateChange::Delete,\n };\n let mut new_staging_changes = vec![];\n if let Some(legacy_key) = &update_info.legacy_key {\n if let Some(legacy_record) = existing_records.remove(legacy_key) {\n new_staging_changes.extend(legacy_record.staging_changes.0);\n delete_state(flow_name, legacy_key, &mut *txn).await?;\n }\n }\n let (action, existing_staging_changes) = match existing {\n Some(existing) => {\n let existing_staging_changes = existing.staging_changes.0;\n if existing_staging_changes.iter().all(|c| c != &change) {\n new_staging_changes.push(change);\n }\n (WriteAction::Update, existing_staging_changes)\n }\n None => {\n if update_info.desired_state.is_some() {\n new_staging_changes.push(change);\n }\n (WriteAction::Insert, vec![])\n }\n };\n if !new_staging_changes.is_empty() {\n upsert_staging_changes(\n flow_name,\n type_id,\n [existing_staging_changes, new_staging_changes].concat(),\n &mut *txn,\n action,\n )\n .await?;\n }\n }\n txn.commit().await?;\n Ok(new_metadata_version)\n}\n\npub async fn commit_changes_for_flow(\n flow_name: &str,\n curr_metadata_version: u64,\n state_updates: &HashMap,\n delete_version: bool,\n pool: &PgPool,\n) -> Result<()> {\n let mut txn = pool.begin().await?;\n let latest_metadata_version =\n parse_flow_version(&read_state(flow_name, &VERSION_RESOURCE_TYPE_ID, &mut *txn).await?);\n if latest_metadata_version != Some(curr_metadata_version) {\n return Err(ApiError::new(\n \"seen newer version in the metadata table\",\n StatusCode::CONFLICT,\n ))?;\n }\n for (type_id, update_info) in state_updates.iter() {\n match &update_info.desired_state {\n Some(desired_state) => {\n upsert_state(\n flow_name,\n type_id,\n desired_state,\n WriteAction::Update,\n &mut *txn,\n )\n .await?;\n }\n None => {\n delete_state(flow_name, type_id, &mut *txn).await?;\n }\n }\n }\n if delete_version {\n delete_state(flow_name, &VERSION_RESOURCE_TYPE_ID, &mut *txn).await?;\n }\n txn.commit().await?;\n Ok(())\n}\n\n#[derive(Debug)]\npub struct MetadataTableSetup {\n pub metadata_table_missing: bool,\n}\n\nimpl MetadataTableSetup {\n pub fn into_setup_info(self) -> ResourceSetupInfo<(), (), MetadataTableSetup> {\n ResourceSetupInfo {\n key: (),\n state: None,\n description: \"CocoIndex Metadata Table\".to_string(),\n setup_status: Some(self),\n legacy_key: None,\n }\n }\n}\n\nimpl ResourceSetupStatus for MetadataTableSetup {\n fn describe_changes(&self) -> Vec {\n if self.metadata_table_missing {\n vec![setup::ChangeDescription::Action(format!(\n \"Create the cocoindex metadata table {SETUP_METADATA_TABLE_NAME}\"\n ))]\n } else {\n vec![]\n }\n }\n\n fn change_type(&self) -> SetupChangeType {\n if self.metadata_table_missing {\n SetupChangeType::Create\n } else {\n SetupChangeType::NoChange\n }\n }\n}\n\nimpl MetadataTableSetup {\n pub async fn apply_change(&self) -> Result<()> {\n if !self.metadata_table_missing {\n return Ok(());\n }\n let lib_context = get_lib_context()?;\n let pool = lib_context.require_builtin_db_pool()?;\n let query_str = format!(\n \"CREATE TABLE IF NOT EXISTS {SETUP_METADATA_TABLE_NAME} (\n flow_name TEXT NOT NULL,\n resource_type TEXT NOT NULL,\n key JSONB NOT NULL,\n state JSONB,\n staging_changes JSONB NOT NULL,\n\n PRIMARY KEY (flow_name, resource_type, key)\n )\n \",\n );\n sqlx::query(&query_str).execute(pool).await?;\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/factory_bases.rs", "use crate::prelude::*;\nuse crate::setup::ResourceSetupStatus;\nuse std::fmt::Debug;\nuse std::hash::Hash;\n\nuse super::interface::*;\nuse super::registry::*;\nuse crate::api_bail;\nuse crate::api_error;\nuse crate::base::schema::*;\nuse crate::base::spec::*;\nuse crate::builder::plan::AnalyzedValueMapping;\nuse crate::setup;\n// SourceFactoryBase\npub struct ResolvedOpArg {\n pub name: String,\n pub typ: EnrichedValueType,\n pub idx: usize,\n}\n\npub trait ResolvedOpArgExt: Sized {\n fn expect_type(self, expected_type: &ValueType) -> Result;\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value>;\n fn take_value(&self, args: &mut [value::Value]) -> Result;\n}\n\nimpl ResolvedOpArgExt for ResolvedOpArg {\n fn expect_type(self, expected_type: &ValueType) -> Result {\n if &self.typ.typ != expected_type {\n api_bail!(\n \"Expected argument `{}` to be of type `{}`, got `{}`\",\n self.name,\n expected_type,\n self.typ.typ\n );\n }\n Ok(self)\n }\n\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value> {\n if self.idx >= args.len() {\n api_bail!(\n \"Two few arguments, {} provided, expected at least {} for `{}`\",\n args.len(),\n self.idx + 1,\n self.name\n );\n }\n Ok(&args[self.idx])\n }\n\n fn take_value(&self, args: &mut [value::Value]) -> Result {\n if self.idx >= args.len() {\n api_bail!(\n \"Two few arguments, {} provided, expected at least {} for `{}`\",\n args.len(),\n self.idx + 1,\n self.name\n );\n }\n Ok(std::mem::take(&mut args[self.idx]))\n }\n}\n\nimpl ResolvedOpArgExt for Option {\n fn expect_type(self, expected_type: &ValueType) -> Result {\n self.map(|arg| arg.expect_type(expected_type)).transpose()\n }\n\n fn value<'a>(&self, args: &'a [value::Value]) -> Result<&'a value::Value> {\n Ok(self\n .as_ref()\n .map(|arg| arg.value(args))\n .transpose()?\n .unwrap_or(&value::Value::Null))\n }\n\n fn take_value(&self, args: &mut [value::Value]) -> Result {\n Ok(self\n .as_ref()\n .map(|arg| arg.take_value(args))\n .transpose()?\n .unwrap_or(value::Value::Null))\n }\n}\n\npub struct OpArgsResolver<'a> {\n args: &'a [OpArgSchema],\n num_positional_args: usize,\n next_positional_idx: usize,\n remaining_kwargs: HashMap<&'a str, usize>,\n}\n\nimpl<'a> OpArgsResolver<'a> {\n pub fn new(args: &'a [OpArgSchema]) -> Result {\n let mut num_positional_args = 0;\n let mut kwargs = HashMap::new();\n for (idx, arg) in args.iter().enumerate() {\n if let Some(name) = &arg.name.0 {\n kwargs.insert(name.as_str(), idx);\n } else {\n if !kwargs.is_empty() {\n api_bail!(\"Positional arguments must be provided before keyword arguments\");\n }\n num_positional_args += 1;\n }\n }\n Ok(Self {\n args,\n num_positional_args,\n next_positional_idx: 0,\n remaining_kwargs: kwargs,\n })\n }\n\n pub fn next_optional_arg(&mut self, name: &str) -> Result> {\n let idx = if let Some(idx) = self.remaining_kwargs.remove(name) {\n if self.next_positional_idx < self.num_positional_args {\n api_bail!(\"`{name}` is provided as both positional and keyword arguments\");\n } else {\n Some(idx)\n }\n } else if self.next_positional_idx < self.num_positional_args {\n let idx = self.next_positional_idx;\n self.next_positional_idx += 1;\n Some(idx)\n } else {\n None\n };\n Ok(idx.map(|idx| ResolvedOpArg {\n name: name.to_string(),\n typ: self.args[idx].value_type.clone(),\n idx,\n }))\n }\n\n pub fn next_arg(&mut self, name: &str) -> Result {\n Ok(self\n .next_optional_arg(name)?\n .ok_or_else(|| api_error!(\"Required argument `{name}` is missing\",))?)\n }\n\n pub fn done(self) -> Result<()> {\n if self.next_positional_idx < self.num_positional_args {\n api_bail!(\n \"Expected {} positional arguments, got {}\",\n self.next_positional_idx,\n self.num_positional_args\n );\n }\n if !self.remaining_kwargs.is_empty() {\n api_bail!(\n \"Unexpected keyword arguments: {}\",\n self.remaining_kwargs\n .keys()\n .map(|k| format!(\"`{k}`\"))\n .collect::>()\n .join(\", \")\n )\n }\n Ok(())\n }\n\n pub fn get_analyze_value(&self, resolved_arg: &ResolvedOpArg) -> &AnalyzedValueMapping {\n &self.args[resolved_arg.idx].analyzed_value\n }\n}\n\n#[async_trait]\npub trait SourceFactoryBase: SourceFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n\n fn name(&self) -> &str;\n\n async fn get_output_schema(\n &self,\n spec: &Self::Spec,\n context: &FlowInstanceContext,\n ) -> Result;\n\n async fn build_executor(\n self: Arc,\n spec: Self::Spec,\n context: Arc,\n ) -> Result>;\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::Source(Arc::new(self)),\n )\n }\n}\n\n#[async_trait]\nimpl SourceFactory for T {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let spec: T::Spec = serde_json::from_value(spec)?;\n let output_schema = self.get_output_schema(&spec, &context).await?;\n let executor = self.build_executor(spec, context);\n Ok((output_schema, executor))\n }\n}\n\n// SimpleFunctionFactoryBase\n\n#[async_trait]\npub trait SimpleFunctionFactoryBase: SimpleFunctionFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n type ResolvedArgs: Send + Sync;\n\n fn name(&self) -> &str;\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Self::Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n context: &FlowInstanceContext,\n ) -> Result<(Self::ResolvedArgs, EnrichedValueType)>;\n\n async fn build_executor(\n self: Arc,\n spec: Self::Spec,\n resolved_input_schema: Self::ResolvedArgs,\n context: Arc,\n ) -> Result>;\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::SimpleFunction(Arc::new(self)),\n )\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionFactory for T {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )> {\n let spec: T::Spec = serde_json::from_value(spec)?;\n let mut args_resolver = OpArgsResolver::new(&input_schema)?;\n let (resolved_input_schema, output_schema) = self\n .resolve_schema(&spec, &mut args_resolver, &context)\n .await?;\n args_resolver.done()?;\n let executor = self.build_executor(spec, resolved_input_schema, context);\n Ok((output_schema, executor))\n }\n}\n\npub struct TypedExportDataCollectionBuildOutput {\n pub export_context: BoxFuture<'static, Result>>,\n pub setup_key: F::Key,\n pub desired_setup_state: F::SetupState,\n}\npub struct TypedExportDataCollectionSpec {\n pub name: String,\n pub spec: F::Spec,\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n pub index_options: IndexOptions,\n}\n\npub struct TypedResourceSetupChangeItem<'a, F: StorageFactoryBase + ?Sized> {\n pub key: F::Key,\n pub setup_status: &'a F::SetupStatus,\n}\n\n#[async_trait]\npub trait StorageFactoryBase: ExportTargetFactory + Send + Sync + 'static {\n type Spec: DeserializeOwned + Send + Sync;\n type DeclarationSpec: DeserializeOwned + Send + Sync;\n type Key: Debug + Clone + Serialize + DeserializeOwned + Eq + Hash + Send + Sync;\n type SetupState: Debug + Clone + Serialize + DeserializeOwned + Send + Sync;\n type SetupStatus: ResourceSetupStatus;\n type ExportContext: Send + Sync + 'static;\n\n fn name(&self) -> &str;\n\n async fn build(\n self: Arc,\n data_collections: Vec>,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec>,\n Vec<(Self::Key, Self::SetupState)>,\n )>;\n\n /// Deserialize the setup key from a JSON value.\n /// You can override this method to provide a custom deserialization logic, e.g. to perform backward compatible deserialization.\n fn deserialize_setup_key(key: serde_json::Value) -> Result {\n Ok(serde_json::from_value(key)?)\n }\n\n /// Will not be called if it's setup by user.\n /// It returns an error if the target only supports setup by user.\n async fn check_setup_status(\n &self,\n key: Self::Key,\n desired_state: Option,\n existing_states: setup::CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result;\n\n fn check_state_compatibility(\n &self,\n desired_state: &Self::SetupState,\n existing_state: &Self::SetupState,\n ) -> Result;\n\n fn describe_resource(&self, key: &Self::Key) -> Result;\n\n fn extract_additional_key(\n &self,\n _key: &value::KeyValue,\n _value: &value::FieldValues,\n _export_context: &Self::ExportContext,\n ) -> Result {\n Ok(serde_json::Value::Null)\n }\n\n fn register(self, registry: &mut ExecutorFactoryRegistry) -> Result<()>\n where\n Self: Sized,\n {\n registry.register(\n self.name().to_string(),\n ExecutorFactory::ExportTarget(Arc::new(self)),\n )\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()>;\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()>;\n}\n\n#[async_trait]\nimpl ExportTargetFactory for T {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )> {\n let (data_coll_output, decl_output) = StorageFactoryBase::build(\n self,\n data_collections\n .into_iter()\n .map(|d| {\n anyhow::Ok(TypedExportDataCollectionSpec {\n name: d.name,\n spec: serde_json::from_value(d.spec)?,\n key_fields_schema: d.key_fields_schema,\n value_fields_schema: d.value_fields_schema,\n index_options: d.index_options,\n })\n })\n .collect::>>()?,\n declarations\n .into_iter()\n .map(|d| anyhow::Ok(serde_json::from_value(d)?))\n .collect::>>()?,\n context,\n )\n .await?;\n\n let data_coll_output = data_coll_output\n .into_iter()\n .map(|d| {\n Ok(interface::ExportDataCollectionBuildOutput {\n export_context: async move {\n Ok(d.export_context.await? as Arc)\n }\n .boxed(),\n setup_key: serde_json::to_value(d.setup_key)?,\n desired_setup_state: serde_json::to_value(d.desired_setup_state)?,\n })\n })\n .collect::>>()?;\n let decl_output = decl_output\n .into_iter()\n .map(|(key, state)| Ok((serde_json::to_value(key)?, serde_json::to_value(state)?)))\n .collect::>>()?;\n Ok((data_coll_output, decl_output))\n }\n\n async fn check_setup_status(\n &self,\n key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n flow_instance_ctx: Arc,\n ) -> Result> {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n let desired_state: Option = desired_state\n .map(|v| serde_json::from_value(v.clone()))\n .transpose()?;\n let existing_states = from_json_combined_state(existing_states)?;\n let setup_status = StorageFactoryBase::check_setup_status(\n self,\n key,\n desired_state,\n existing_states,\n flow_instance_ctx,\n )\n .await?;\n Ok(Box::new(setup_status))\n }\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n StorageFactoryBase::describe_resource(self, &key)\n }\n\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result {\n let key: T::Key = Self::deserialize_setup_key(key.clone())?;\n Ok(serde_json::to_value(key)?)\n }\n\n fn check_state_compatibility(\n &self,\n desired_state: &serde_json::Value,\n existing_state: &serde_json::Value,\n ) -> Result {\n let result = StorageFactoryBase::check_state_compatibility(\n self,\n &serde_json::from_value(desired_state.clone())?,\n &serde_json::from_value(existing_state.clone())?,\n )?;\n Ok(result)\n }\n\n fn extract_additional_key(\n &self,\n key: &value::KeyValue,\n value: &value::FieldValues,\n export_context: &(dyn Any + Send + Sync),\n ) -> Result {\n StorageFactoryBase::extract_additional_key(\n self,\n key,\n value,\n export_context\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n )\n }\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()> {\n let mutations = mutations\n .into_iter()\n .map(|m| {\n anyhow::Ok(ExportTargetMutationWithContext {\n mutation: m.mutation,\n export_context: m\n .export_context\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n })\n })\n .collect::>()?;\n StorageFactoryBase::apply_mutation(self, mutations).await\n }\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()> {\n StorageFactoryBase::apply_setup_changes(\n self,\n setup_status\n .into_iter()\n .map(|item| -> anyhow::Result<_> {\n Ok(TypedResourceSetupChangeItem {\n key: serde_json::from_value(item.key.clone())?,\n setup_status: (item.setup_status as &dyn Any)\n .downcast_ref::()\n .ok_or_else(invariance_violation)?,\n })\n })\n .collect::>>()?,\n context,\n )\n .await\n }\n}\nfn from_json_combined_state(\n existing_states: setup::CombinedState,\n) -> Result> {\n Ok(setup::CombinedState {\n current: existing_states\n .current\n .map(|v| serde_json::from_value(v))\n .transpose()?,\n staging: existing_states\n .staging\n .into_iter()\n .map(|v| {\n anyhow::Ok(match v {\n setup::StateChange::Upsert(v) => {\n setup::StateChange::Upsert(serde_json::from_value(v)?)\n }\n setup::StateChange::Delete => setup::StateChange::Delete,\n })\n })\n .collect::>()?,\n legacy_state_key: existing_states.legacy_state_key,\n })\n}\n"], ["/cocoindex/src/execution/row_indexer.rs", "use crate::prelude::*;\n\nuse futures::future::try_join_all;\nuse sqlx::PgPool;\nuse std::collections::{HashMap, HashSet};\n\nuse super::db_tracking::{self, TrackedTargetKeyInfo, read_source_tracking_info_for_processing};\nuse super::db_tracking_setup;\nuse super::evaluator::{\n EvaluateSourceEntryOutput, SourceRowEvaluationContext, evaluate_source_entry,\n};\nuse super::memoization::{EvaluationMemory, EvaluationMemoryOptions, StoredMemoizationInfo};\nuse super::stats;\n\nuse crate::base::value::{self, FieldValues, KeyValue};\nuse crate::builder::plan::*;\nuse crate::ops::interface::{\n ExportTargetMutation, ExportTargetUpsertEntry, Ordinal, SourceExecutorGetOptions,\n};\nuse crate::utils::db::WriteAction;\nuse crate::utils::fingerprint::{Fingerprint, Fingerprinter};\n\npub fn extract_primary_key(\n primary_key_def: &AnalyzedPrimaryKeyDef,\n record: &FieldValues,\n) -> Result {\n match primary_key_def {\n AnalyzedPrimaryKeyDef::Fields(fields) => {\n KeyValue::from_values(fields.iter().map(|field| &record.fields[*field]))\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]\npub enum SourceVersionKind {\n #[default]\n UnknownLogic,\n DifferentLogic,\n CurrentLogic,\n NonExistence,\n}\n\n#[derive(Debug, Clone, Default)]\npub struct SourceVersion {\n pub ordinal: Ordinal,\n pub kind: SourceVersionKind,\n}\n\nimpl SourceVersion {\n pub fn from_stored(\n stored_ordinal: Option,\n stored_fp: &Option>,\n curr_fp: Fingerprint,\n ) -> Self {\n Self {\n ordinal: Ordinal(stored_ordinal),\n kind: match &stored_fp {\n Some(stored_fp) => {\n if stored_fp.as_slice() == curr_fp.0.as_slice() {\n SourceVersionKind::CurrentLogic\n } else {\n SourceVersionKind::DifferentLogic\n }\n }\n None => SourceVersionKind::UnknownLogic,\n },\n }\n }\n\n pub fn from_stored_processing_info(\n info: &db_tracking::SourceTrackingInfoForProcessing,\n curr_fp: Fingerprint,\n ) -> Self {\n Self::from_stored(\n info.processed_source_ordinal,\n &info.process_logic_fingerprint,\n curr_fp,\n )\n }\n\n pub fn from_stored_precommit_info(\n info: &db_tracking::SourceTrackingInfoForPrecommit,\n curr_fp: Fingerprint,\n ) -> Self {\n Self::from_stored(\n info.processed_source_ordinal,\n &info.process_logic_fingerprint,\n curr_fp,\n )\n }\n\n pub fn from_current_with_ordinal(ordinal: Ordinal) -> Self {\n Self {\n ordinal,\n kind: SourceVersionKind::CurrentLogic,\n }\n }\n\n pub fn from_current_data(data: &interface::SourceData) -> Self {\n let kind = match &data.value {\n interface::SourceValue::Existence(_) => SourceVersionKind::CurrentLogic,\n interface::SourceValue::NonExistence => SourceVersionKind::NonExistence,\n };\n Self {\n ordinal: data.ordinal,\n kind,\n }\n }\n\n pub fn should_skip(\n &self,\n target: &SourceVersion,\n update_stats: Option<&stats::UpdateStats>,\n ) -> bool {\n // Ordinal indicates monotonic invariance - always respect ordinal order\n // Never process older ordinals to maintain consistency\n let should_skip = match (self.ordinal.0, target.ordinal.0) {\n (Some(existing_ordinal), Some(target_ordinal)) => {\n // Skip if target ordinal is older, or same ordinal with same/older logic version\n existing_ordinal > target_ordinal\n || (existing_ordinal == target_ordinal && self.kind >= target.kind)\n }\n _ => false,\n };\n if should_skip {\n if let Some(update_stats) = update_stats {\n update_stats.num_no_change.inc(1);\n }\n }\n should_skip\n }\n}\n\npub enum SkippedOr {\n Normal(T),\n Skipped(SourceVersion),\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\nstruct TargetKeyPair {\n pub key: serde_json::Value,\n pub additional_key: serde_json::Value,\n}\n\n#[derive(Default)]\nstruct TrackingInfoForTarget<'a> {\n export_op: Option<&'a AnalyzedExportOp>,\n\n // Existing keys info. Keyed by target key.\n // Will be removed after new rows for the same key are added into `new_staging_keys_info` and `mutation.upserts`,\n // hence all remaining ones are to be deleted.\n existing_staging_keys_info: HashMap)>>,\n existing_keys_info: HashMap)>>,\n\n // New keys info for staging.\n new_staging_keys_info: Vec,\n\n // Mutation to apply to the target storage.\n mutation: ExportTargetMutation,\n}\n\n#[derive(Debug)]\nstruct PrecommitData<'a> {\n evaluate_output: &'a EvaluateSourceEntryOutput,\n memoization_info: &'a StoredMemoizationInfo,\n}\nstruct PrecommitMetadata {\n source_entry_exists: bool,\n process_ordinal: i64,\n existing_process_ordinal: Option,\n new_target_keys: db_tracking::TrackedTargetKeyForSource,\n}\nstruct PrecommitOutput {\n metadata: PrecommitMetadata,\n target_mutations: HashMap,\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn precommit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n logic_fp: Fingerprint,\n data: Option>,\n process_timestamp: &chrono::DateTime,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n export_ops: &[AnalyzedExportOp],\n export_ops_exec_ctx: &[exec_ctx::ExportOpExecutionContext],\n update_stats: &stats::UpdateStats,\n pool: &PgPool,\n) -> Result> {\n let mut txn = pool.begin().await?;\n\n let tracking_info = db_tracking::read_source_tracking_info_for_precommit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n if let Some(tracking_info) = &tracking_info {\n let existing_source_version =\n SourceVersion::from_stored_precommit_info(tracking_info, logic_fp);\n if existing_source_version.should_skip(source_version, Some(update_stats)) {\n return Ok(SkippedOr::Skipped(existing_source_version));\n }\n }\n let tracking_info_exists = tracking_info.is_some();\n let process_ordinal = (tracking_info\n .as_ref()\n .map(|info| info.max_process_ordinal)\n .unwrap_or(0)\n + 1)\n .max(process_timestamp.timestamp_millis());\n let existing_process_ordinal = tracking_info.as_ref().and_then(|info| info.process_ordinal);\n\n let mut tracking_info_for_targets = HashMap::::new();\n for (export_op, export_op_exec_ctx) in\n std::iter::zip(export_ops.iter(), export_ops_exec_ctx.iter())\n {\n tracking_info_for_targets\n .entry(export_op_exec_ctx.target_id)\n .or_default()\n .export_op = Some(export_op);\n }\n\n // Collect `tracking_info_for_targets` from existing tracking info.\n if let Some(info) = tracking_info {\n let sqlx::types::Json(staging_target_keys) = info.staging_target_keys;\n for (target_id, keys_info) in staging_target_keys.into_iter() {\n let target_info = tracking_info_for_targets.entry(target_id).or_default();\n for key_info in keys_info.into_iter() {\n target_info\n .existing_staging_keys_info\n .entry(TargetKeyPair {\n key: key_info.key,\n additional_key: key_info.additional_key,\n })\n .or_default()\n .push((key_info.process_ordinal, key_info.fingerprint));\n }\n }\n\n if let Some(sqlx::types::Json(target_keys)) = info.target_keys {\n for (target_id, keys_info) in target_keys.into_iter() {\n let target_info = tracking_info_for_targets.entry(target_id).or_default();\n for key_info in keys_info.into_iter() {\n target_info\n .existing_keys_info\n .entry(TargetKeyPair {\n key: key_info.key,\n additional_key: key_info.additional_key,\n })\n .or_default()\n .push((key_info.process_ordinal, key_info.fingerprint));\n }\n }\n }\n }\n\n let mut new_target_keys_info = db_tracking::TrackedTargetKeyForSource::default();\n if let Some(data) = &data {\n for (export_op, export_op_exec_ctx) in\n std::iter::zip(export_ops.iter(), export_ops_exec_ctx.iter())\n {\n let target_info = tracking_info_for_targets\n .entry(export_op_exec_ctx.target_id)\n .or_default();\n let mut keys_info = Vec::new();\n let collected_values =\n &data.evaluate_output.collected_values[export_op.input.collector_idx as usize];\n for value in collected_values.iter() {\n let primary_key = extract_primary_key(&export_op.primary_key_def, value)?;\n let primary_key_json = serde_json::to_value(&primary_key)?;\n\n let mut field_values = FieldValues {\n fields: Vec::with_capacity(export_op.value_fields.len()),\n };\n for field in export_op.value_fields.iter() {\n field_values\n .fields\n .push(value.fields[*field as usize].clone());\n }\n let additional_key = export_op.export_target_factory.extract_additional_key(\n &primary_key,\n &field_values,\n export_op.export_context.as_ref(),\n )?;\n let target_key_pair = TargetKeyPair {\n key: primary_key_json,\n additional_key,\n };\n let existing_target_keys = target_info.existing_keys_info.remove(&target_key_pair);\n let existing_staging_target_keys = target_info\n .existing_staging_keys_info\n .remove(&target_key_pair);\n\n let curr_fp = if !export_op.value_stable {\n Some(\n Fingerprinter::default()\n .with(&field_values)?\n .into_fingerprint(),\n )\n } else {\n None\n };\n if existing_target_keys\n .as_ref()\n .map(|keys| !keys.is_empty() && keys.iter().all(|(_, fp)| fp == &curr_fp))\n .unwrap_or(false)\n && existing_staging_target_keys\n .map(|keys| keys.iter().all(|(_, fp)| fp == &curr_fp))\n .unwrap_or(true)\n {\n // Already exists, with exactly the same value fingerprint.\n // Nothing need to be changed, except carrying over the existing target keys info.\n let (existing_ordinal, existing_fp) = existing_target_keys\n .ok_or_else(invariance_violation)?\n .into_iter()\n .next()\n .ok_or_else(invariance_violation)?;\n keys_info.push(TrackedTargetKeyInfo {\n key: target_key_pair.key,\n additional_key: target_key_pair.additional_key,\n process_ordinal: existing_ordinal,\n fingerprint: existing_fp,\n });\n } else {\n // Entry with new value. Needs to be upserted.\n let tracked_target_key = TrackedTargetKeyInfo {\n key: target_key_pair.key.clone(),\n additional_key: target_key_pair.additional_key.clone(),\n process_ordinal,\n fingerprint: curr_fp,\n };\n target_info.mutation.upserts.push(ExportTargetUpsertEntry {\n key: primary_key,\n additional_key: target_key_pair.additional_key,\n value: field_values,\n });\n target_info\n .new_staging_keys_info\n .push(tracked_target_key.clone());\n keys_info.push(tracked_target_key);\n }\n }\n new_target_keys_info.push((export_op_exec_ctx.target_id, keys_info));\n }\n }\n\n let mut new_staging_target_keys = db_tracking::TrackedTargetKeyForSource::default();\n let mut target_mutations = HashMap::with_capacity(export_ops.len());\n for (target_id, target_tracking_info) in tracking_info_for_targets.into_iter() {\n let legacy_keys: HashSet = target_tracking_info\n .existing_keys_info\n .into_keys()\n .chain(target_tracking_info.existing_staging_keys_info.into_keys())\n .collect();\n\n let mut new_staging_keys_info = target_tracking_info.new_staging_keys_info;\n // Add tracking info for deletions.\n new_staging_keys_info.extend(legacy_keys.iter().map(|key| TrackedTargetKeyInfo {\n key: key.key.clone(),\n additional_key: key.additional_key.clone(),\n process_ordinal,\n fingerprint: None,\n }));\n new_staging_target_keys.push((target_id, new_staging_keys_info));\n\n if let Some(export_op) = target_tracking_info.export_op {\n let mut mutation = target_tracking_info.mutation;\n mutation.deletes.reserve(legacy_keys.len());\n for legacy_key in legacy_keys.into_iter() {\n let key = value::Value::::from_json(\n legacy_key.key,\n &export_op.primary_key_type,\n )?\n .as_key()?;\n mutation.deletes.push(interface::ExportTargetDeleteEntry {\n key,\n additional_key: legacy_key.additional_key,\n });\n }\n target_mutations.insert(target_id, mutation);\n }\n }\n\n db_tracking::precommit_source_tracking_info(\n source_id,\n source_key_json,\n process_ordinal,\n new_staging_target_keys,\n data.as_ref().map(|data| data.memoization_info),\n db_setup,\n &mut *txn,\n if tracking_info_exists {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n )\n .await?;\n\n txn.commit().await?;\n\n Ok(SkippedOr::Normal(PrecommitOutput {\n metadata: PrecommitMetadata {\n source_entry_exists: data.is_some(),\n process_ordinal,\n existing_process_ordinal,\n new_target_keys: new_target_keys_info,\n },\n target_mutations,\n }))\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn commit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n logic_fingerprint: &[u8],\n precommit_metadata: PrecommitMetadata,\n process_timestamp: &chrono::DateTime,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n pool: &PgPool,\n) -> Result<()> {\n let mut txn = pool.begin().await?;\n\n let tracking_info = db_tracking::read_source_tracking_info_for_commit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n let tracking_info_exists = tracking_info.is_some();\n if tracking_info.as_ref().and_then(|info| info.process_ordinal)\n >= Some(precommit_metadata.process_ordinal)\n {\n return Ok(());\n }\n\n let cleaned_staging_target_keys = tracking_info\n .map(|info| {\n let sqlx::types::Json(staging_target_keys) = info.staging_target_keys;\n staging_target_keys\n .into_iter()\n .filter_map(|(target_id, target_keys)| {\n let cleaned_target_keys: Vec<_> = target_keys\n .into_iter()\n .filter(|key_info| {\n Some(key_info.process_ordinal)\n > precommit_metadata.existing_process_ordinal\n && key_info.process_ordinal != precommit_metadata.process_ordinal\n })\n .collect();\n if !cleaned_target_keys.is_empty() {\n Some((target_id, cleaned_target_keys))\n } else {\n None\n }\n })\n .collect::>()\n })\n .unwrap_or_default();\n if !precommit_metadata.source_entry_exists && cleaned_staging_target_keys.is_empty() {\n // TODO: When we support distributed execution in the future, we'll need to leave a tombstone for a while\n // to prevent an earlier update causing the record reappear because of out-of-order processing.\n if tracking_info_exists {\n db_tracking::delete_source_tracking_info(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n }\n } else {\n db_tracking::commit_source_tracking_info(\n source_id,\n source_key_json,\n cleaned_staging_target_keys,\n source_version.ordinal.into(),\n logic_fingerprint,\n precommit_metadata.process_ordinal,\n process_timestamp.timestamp_micros(),\n precommit_metadata.new_target_keys,\n db_setup,\n &mut *txn,\n if tracking_info_exists {\n WriteAction::Update\n } else {\n WriteAction::Insert\n },\n )\n .await?;\n }\n\n txn.commit().await?;\n\n Ok(())\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn try_content_hash_optimization(\n source_id: i32,\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n source_key_json: &serde_json::Value,\n source_version: &SourceVersion,\n current_hash: &crate::utils::fingerprint::Fingerprint,\n tracking_info: &db_tracking::SourceTrackingInfoForProcessing,\n existing_version: &Option,\n db_setup: &db_tracking_setup::TrackingTableSetupState,\n update_stats: &stats::UpdateStats,\n pool: &PgPool,\n) -> Result>> {\n // Check if we can use content hash optimization\n if existing_version\n .as_ref()\n .is_none_or(|v| v.kind != SourceVersionKind::CurrentLogic)\n {\n return Ok(None);\n }\n\n if tracking_info\n .max_process_ordinal\n .zip(tracking_info.process_ordinal)\n .is_none_or(|(max_ord, proc_ord)| max_ord != proc_ord)\n {\n return Ok(None);\n }\n\n let existing_hash = tracking_info\n .memoization_info\n .as_ref()\n .and_then(|info| info.0.as_ref())\n .and_then(|stored_info| stored_info.content_hash.as_ref());\n\n if existing_hash != Some(current_hash) {\n return Ok(None);\n }\n\n // Content hash matches - try optimization\n let mut txn = pool.begin().await?;\n\n let current_tracking_info = db_tracking::read_source_tracking_info_for_precommit(\n source_id,\n source_key_json,\n db_setup,\n &mut *txn,\n )\n .await?;\n\n let Some(current_tracking_info) = current_tracking_info else {\n return Ok(None);\n };\n\n // Check 1: Same check as precommit - verify no newer version exists\n let current_source_version = SourceVersion::from_stored_precommit_info(\n ¤t_tracking_info,\n src_eval_ctx.plan.logic_fingerprint,\n );\n if current_source_version.should_skip(source_version, Some(update_stats)) {\n return Ok(Some(SkippedOr::Skipped(current_source_version)));\n }\n\n // Check 2: Verify process_ordinal hasn't changed (no concurrent processing)\n let original_process_ordinal = tracking_info.process_ordinal;\n if current_tracking_info.process_ordinal != original_process_ordinal {\n return Ok(None);\n }\n\n // Safe to apply optimization - just update tracking table\n db_tracking::update_source_tracking_ordinal(\n source_id,\n source_key_json,\n source_version.ordinal.0,\n db_setup,\n &mut *txn,\n )\n .await?;\n\n txn.commit().await?;\n update_stats.num_no_change.inc(1);\n Ok(Some(SkippedOr::Normal(())))\n}\n\npub async fn evaluate_source_entry_with_memory(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n options: EvaluationMemoryOptions,\n pool: &PgPool,\n) -> Result> {\n let stored_info = if options.enable_cache || !options.evaluation_only {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let source_id = setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id;\n let existing_tracking_info = read_source_tracking_info_for_processing(\n source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n existing_tracking_info\n .and_then(|info| info.memoization_info.map(|info| info.0))\n .flatten()\n } else {\n None\n };\n let memory = EvaluationMemory::new(chrono::Utc::now(), stored_info, options);\n let source_value = src_eval_ctx\n .import_op\n .executor\n .get_value(\n src_eval_ctx.key,\n &SourceExecutorGetOptions {\n include_value: true,\n include_ordinal: false,\n },\n )\n .await?\n .value\n .ok_or_else(|| anyhow::anyhow!(\"value not returned\"))?;\n let output = match source_value {\n interface::SourceValue::Existence(source_value) => {\n Some(evaluate_source_entry(src_eval_ctx, source_value, &memory).await?)\n }\n interface::SourceValue::NonExistence => None,\n };\n Ok(output)\n}\n\npub async fn update_source_row(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n source_value: interface::SourceValue,\n source_version: &SourceVersion,\n pool: &PgPool,\n update_stats: &stats::UpdateStats,\n) -> Result> {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let process_time = chrono::Utc::now();\n let source_id = setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id;\n\n // Phase 1: Check existing tracking info and apply optimizations\n let existing_tracking_info = read_source_tracking_info_for_processing(\n source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n\n let existing_version = match &existing_tracking_info {\n Some(info) => {\n let existing_version = SourceVersion::from_stored_processing_info(\n info,\n src_eval_ctx.plan.logic_fingerprint,\n );\n\n // First check ordinal-based skipping\n if existing_version.should_skip(source_version, Some(update_stats)) {\n return Ok(SkippedOr::Skipped(existing_version));\n }\n\n Some(existing_version)\n }\n None => None,\n };\n\n // Compute content hash once if needed for both optimization and evaluation\n let current_content_hash = match &source_value {\n interface::SourceValue::Existence(source_value) => Some(\n Fingerprinter::default()\n .with(source_value)?\n .into_fingerprint(),\n ),\n interface::SourceValue::NonExistence => None,\n };\n\n if let (Some(current_hash), Some(existing_tracking_info)) =\n (¤t_content_hash, &existing_tracking_info)\n {\n if let Some(optimization_result) = try_content_hash_optimization(\n source_id,\n src_eval_ctx,\n &source_key_json,\n source_version,\n current_hash,\n existing_tracking_info,\n &existing_version,\n &setup_execution_ctx.setup_state.tracking_table,\n update_stats,\n pool,\n )\n .await?\n {\n return Ok(optimization_result);\n }\n }\n\n let (output, stored_mem_info) = {\n let extracted_memoization_info = existing_tracking_info\n .and_then(|info| info.memoization_info)\n .and_then(|info| info.0);\n\n match source_value {\n interface::SourceValue::Existence(source_value) => {\n let evaluation_memory = EvaluationMemory::new(\n process_time,\n extracted_memoization_info,\n EvaluationMemoryOptions {\n enable_cache: true,\n evaluation_only: false,\n },\n );\n\n let output =\n evaluate_source_entry(src_eval_ctx, source_value, &evaluation_memory).await?;\n let mut stored_info = evaluation_memory.into_stored()?;\n stored_info.content_hash = current_content_hash;\n\n (Some(output), stored_info)\n }\n interface::SourceValue::NonExistence => (None, Default::default()),\n }\n };\n\n // Phase 2 (precommit): Update with the memoization info and stage target keys.\n let precommit_output = precommit_source_tracking_info(\n source_id,\n &source_key_json,\n source_version,\n src_eval_ctx.plan.logic_fingerprint,\n output.as_ref().map(|scope_value| PrecommitData {\n evaluate_output: scope_value,\n memoization_info: &stored_mem_info,\n }),\n &process_time,\n &setup_execution_ctx.setup_state.tracking_table,\n &src_eval_ctx.plan.export_ops,\n &setup_execution_ctx.export_ops,\n update_stats,\n pool,\n )\n .await?;\n let precommit_output = match precommit_output {\n SkippedOr::Normal(output) => output,\n SkippedOr::Skipped(source_version) => return Ok(SkippedOr::Skipped(source_version)),\n };\n\n // Phase 3: Apply changes to the target storage, including upserting new target records and removing existing ones.\n let mut target_mutations = precommit_output.target_mutations;\n let apply_futs = src_eval_ctx\n .plan\n .export_op_groups\n .iter()\n .filter_map(|export_op_group| {\n let mutations_w_ctx: Vec<_> = export_op_group\n .op_idx\n .iter()\n .filter_map(|export_op_idx| {\n let export_op = &src_eval_ctx.plan.export_ops[*export_op_idx];\n target_mutations\n .remove(&setup_execution_ctx.export_ops[*export_op_idx].target_id)\n .filter(|m| !m.is_empty())\n .map(|mutation| interface::ExportTargetMutationWithContext {\n mutation,\n export_context: export_op.export_context.as_ref(),\n })\n })\n .collect();\n (!mutations_w_ctx.is_empty()).then(|| {\n export_op_group\n .target_factory\n .apply_mutation(mutations_w_ctx)\n })\n });\n\n // TODO: Handle errors.\n try_join_all(apply_futs).await?;\n\n // Phase 4: Update the tracking record.\n commit_source_tracking_info(\n source_id,\n &source_key_json,\n source_version,\n &src_eval_ctx.plan.logic_fingerprint.0,\n precommit_output.metadata,\n &process_time,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n )\n .await?;\n\n if let Some(existing_version) = existing_version {\n if output.is_some() {\n if !source_version.ordinal.is_available()\n || source_version.ordinal != existing_version.ordinal\n {\n update_stats.num_updates.inc(1);\n } else {\n update_stats.num_reprocesses.inc(1);\n }\n } else {\n update_stats.num_deletions.inc(1);\n }\n } else if output.is_some() {\n update_stats.num_insertions.inc(1);\n }\n\n Ok(SkippedOr::Normal(()))\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_github_actions_scenario_ordinal_behavior() {\n // Test ordinal-based behavior - should_skip only cares about ordinal monotonic invariance\n // Content hash optimization is handled at update_source_row level\n\n let processed_version = SourceVersion {\n ordinal: Ordinal(Some(1000)), // Original timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // GitHub Actions checkout: timestamp changes but content same\n let after_checkout_version = SourceVersion {\n ordinal: Ordinal(Some(2000)), // New timestamp after checkout\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Should NOT skip at should_skip level (ordinal is newer - monotonic invariance)\n // Content hash optimization happens at update_source_row level to update only tracking\n assert!(!processed_version.should_skip(&after_checkout_version, None));\n\n // Reverse case: if we somehow get an older ordinal, always skip\n assert!(after_checkout_version.should_skip(&processed_version, None));\n\n // Now simulate actual content change\n let content_changed_version = SourceVersion {\n ordinal: Ordinal(Some(3000)), // Even newer timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Should NOT skip processing (ordinal is newer)\n assert!(!processed_version.should_skip(&content_changed_version, None));\n }\n\n #[test]\n fn test_content_hash_computation() {\n use crate::base::value::{BasicValue, FieldValues, Value};\n use crate::utils::fingerprint::Fingerprinter;\n\n // Test that content hash is computed correctly from source data\n let source_data1 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"Hello\".into())),\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let source_data2 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"Hello\".into())),\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let source_data3 = FieldValues {\n fields: vec![\n Value::Basic(BasicValue::Str(\"World\".into())), // Different content\n Value::Basic(BasicValue::Int64(42)),\n ],\n };\n\n let hash1 = Fingerprinter::default()\n .with(&source_data1)\n .unwrap()\n .into_fingerprint();\n\n let hash2 = Fingerprinter::default()\n .with(&source_data2)\n .unwrap()\n .into_fingerprint();\n\n let hash3 = Fingerprinter::default()\n .with(&source_data3)\n .unwrap()\n .into_fingerprint();\n\n // Same content should produce same hash\n assert_eq!(hash1, hash2);\n\n // Different content should produce different hash\n assert_ne!(hash1, hash3);\n assert_ne!(hash2, hash3);\n }\n\n #[test]\n fn test_github_actions_content_hash_optimization_requirements() {\n // This test documents the exact requirements for GitHub Actions scenario\n // where file modification times change but content remains the same\n\n use crate::utils::fingerprint::Fingerprinter;\n\n // Simulate file content that remains the same across GitHub Actions checkout\n let file_content = \"const hello = 'world';\\nexport default hello;\";\n\n // Hash before checkout (original file)\n let hash_before_checkout = Fingerprinter::default()\n .with(&file_content)\n .unwrap()\n .into_fingerprint();\n\n // Hash after checkout (same content, different timestamp)\n let hash_after_checkout = Fingerprinter::default()\n .with(&file_content)\n .unwrap()\n .into_fingerprint();\n\n // Content hashes must be identical for optimization to work\n assert_eq!(\n hash_before_checkout, hash_after_checkout,\n \"Content hash optimization requires identical hashes for same content\"\n );\n\n // Test with slightly different content (should produce different hashes)\n let modified_content = \"const hello = 'world!';\\nexport default hello;\"; // Added !\n let hash_modified = Fingerprinter::default()\n .with(&modified_content)\n .unwrap()\n .into_fingerprint();\n\n assert_ne!(\n hash_before_checkout, hash_modified,\n \"Different content should produce different hashes\"\n );\n }\n\n #[test]\n fn test_github_actions_ordinal_behavior_with_content_optimization() {\n // Test the complete GitHub Actions scenario:\n // 1. File processed with ordinal=1000, content_hash=ABC\n // 2. GitHub Actions checkout: ordinal=2000, content_hash=ABC (same content)\n // 3. Should use content hash optimization (update only tracking, skip evaluation)\n\n let original_processing = SourceVersion {\n ordinal: Ordinal(Some(1000)), // Original file timestamp\n kind: SourceVersionKind::CurrentLogic,\n };\n\n let after_github_checkout = SourceVersion {\n ordinal: Ordinal(Some(2000)), // New timestamp after checkout\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Step 1: Ordinal check should NOT skip (newer ordinal means potential processing needed)\n assert!(\n !original_processing.should_skip(&after_github_checkout, None),\n \"GitHub Actions: newer ordinal should not be skipped at ordinal level\"\n );\n\n // Step 2: Content hash optimization should trigger when content is same\n // This is tested in the integration level - the optimization path should:\n // - Compare content hashes\n // - If same: update only tracking info (process_ordinal, process_time)\n // - Skip expensive evaluation and target storage updates\n\n // Step 3: After optimization, tracking shows the new ordinal\n let after_optimization = SourceVersion {\n ordinal: Ordinal(Some(2000)), // Updated to new ordinal\n kind: SourceVersionKind::CurrentLogic,\n };\n\n // Future requests with same ordinal should be skipped\n assert!(\n after_optimization.should_skip(&after_github_checkout, None),\n \"After optimization, same ordinal should be skipped\"\n );\n }\n}\n"], ["/cocoindex/src/builder/analyzer.rs", "use crate::builder::exec_ctx::AnalyzedSetupState;\nuse crate::ops::get_executor_factory;\nuse crate::prelude::*;\n\nuse super::plan::*;\nuse crate::lib_context::get_auth_registry;\nuse crate::utils::fingerprint::Fingerprinter;\nuse crate::{\n base::{schema::*, spec::*},\n ops::interface::*,\n};\nuse futures::future::{BoxFuture, try_join3};\nuse futures::{FutureExt, future::try_join_all};\n\n#[derive(Debug)]\npub(super) enum ValueTypeBuilder {\n Basic(BasicValueType),\n Struct(StructSchemaBuilder),\n Table(TableSchemaBuilder),\n}\n\nimpl TryFrom<&ValueType> for ValueTypeBuilder {\n type Error = anyhow::Error;\n\n fn try_from(value_type: &ValueType) -> Result {\n match value_type {\n ValueType::Basic(basic_type) => Ok(ValueTypeBuilder::Basic(basic_type.clone())),\n ValueType::Struct(struct_type) => Ok(ValueTypeBuilder::Struct(struct_type.try_into()?)),\n ValueType::Table(table_type) => Ok(ValueTypeBuilder::Table(table_type.try_into()?)),\n }\n }\n}\n\nimpl TryInto for &ValueTypeBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n match self {\n ValueTypeBuilder::Basic(basic_type) => Ok(ValueType::Basic(basic_type.clone())),\n ValueTypeBuilder::Struct(struct_type) => Ok(ValueType::Struct(struct_type.try_into()?)),\n ValueTypeBuilder::Table(table_type) => Ok(ValueType::Table(table_type.try_into()?)),\n }\n }\n}\n\n#[derive(Default, Debug)]\npub(super) struct StructSchemaBuilder {\n fields: Vec>,\n field_name_idx: HashMap,\n description: Option>,\n}\n\nimpl StructSchemaBuilder {\n fn add_field(&mut self, field: FieldSchema) -> Result {\n let field_idx = self.fields.len() as u32;\n match self.field_name_idx.entry(field.name.clone()) {\n std::collections::hash_map::Entry::Occupied(_) => {\n bail!(\"Field name already exists: {}\", field.name);\n }\n std::collections::hash_map::Entry::Vacant(entry) => {\n entry.insert(field_idx);\n }\n }\n self.fields.push(field);\n Ok(field_idx)\n }\n\n pub fn find_field(&self, field_name: &'_ str) -> Option<(u32, &FieldSchema)> {\n self.field_name_idx\n .get(field_name)\n .map(|&field_idx| (field_idx, &self.fields[field_idx as usize]))\n }\n}\n\nimpl TryFrom<&StructSchema> for StructSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_from(schema: &StructSchema) -> Result {\n let mut result = StructSchemaBuilder {\n fields: Vec::with_capacity(schema.fields.len()),\n field_name_idx: HashMap::with_capacity(schema.fields.len()),\n description: schema.description.clone(),\n };\n for field in schema.fields.iter() {\n result.add_field(FieldSchema::::from_alternative(field)?)?;\n }\n Ok(result)\n }\n}\n\nimpl TryInto for &StructSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n Ok(StructSchema {\n fields: Arc::new(\n self.fields\n .iter()\n .map(FieldSchema::::from_alternative)\n .collect::>>()?,\n ),\n description: self.description.clone(),\n })\n }\n}\n\n#[derive(Debug)]\npub(super) struct TableSchemaBuilder {\n pub kind: TableKind,\n pub sub_scope: Arc>,\n}\n\nimpl TryFrom<&TableSchema> for TableSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_from(schema: &TableSchema) -> Result {\n Ok(Self {\n kind: schema.kind,\n sub_scope: Arc::new(Mutex::new(DataScopeBuilder {\n data: (&schema.row).try_into()?,\n })),\n })\n }\n}\n\nimpl TryInto for &TableSchemaBuilder {\n type Error = anyhow::Error;\n\n fn try_into(self) -> Result {\n let sub_scope = self.sub_scope.lock().unwrap();\n let row = (&sub_scope.data).try_into()?;\n Ok(TableSchema {\n kind: self.kind,\n row,\n })\n }\n}\n\nfn try_make_common_value_type(\n value_type1: &EnrichedValueType,\n value_type2: &EnrichedValueType,\n) -> Result {\n let typ = match (&value_type1.typ, &value_type2.typ) {\n (ValueType::Basic(basic_type1), ValueType::Basic(basic_type2)) => {\n if basic_type1 != basic_type2 {\n api_bail!(\"Value types are not compatible: {basic_type1} vs {basic_type2}\");\n }\n ValueType::Basic(basic_type1.clone())\n }\n (ValueType::Struct(struct_type1), ValueType::Struct(struct_type2)) => {\n let common_schema = try_merge_struct_schemas(struct_type1, struct_type2)?;\n ValueType::Struct(common_schema)\n }\n (ValueType::Table(table_type1), ValueType::Table(table_type2)) => {\n if table_type1.kind != table_type2.kind {\n api_bail!(\n \"Collection types are not compatible: {} vs {}\",\n table_type1,\n table_type2\n );\n }\n let row = try_merge_struct_schemas(&table_type1.row, &table_type2.row)?;\n ValueType::Table(TableSchema {\n kind: table_type1.kind,\n row,\n })\n }\n (t1 @ (ValueType::Basic(_) | ValueType::Struct(_) | ValueType::Table(_)), t2) => {\n api_bail!(\"Unmatched types:\\n {t1}\\n {t2}\\n\",)\n }\n };\n let common_attrs: Vec<_> = value_type1\n .attrs\n .iter()\n .filter_map(|(k, v)| {\n if value_type2.attrs.get(k) == Some(v) {\n Some((k, v))\n } else {\n None\n }\n })\n .collect();\n let attrs = if common_attrs.len() == value_type1.attrs.len() {\n value_type1.attrs.clone()\n } else {\n Arc::new(\n common_attrs\n .into_iter()\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect(),\n )\n };\n\n Ok(EnrichedValueType {\n typ,\n nullable: value_type1.nullable || value_type2.nullable,\n attrs,\n })\n}\n\nfn try_merge_fields_schemas(\n schema1: &[FieldSchema],\n schema2: &[FieldSchema],\n) -> Result> {\n if schema1.len() != schema2.len() {\n api_bail!(\n \"Fields are not compatible as they have different fields count:\\n ({})\\n ({})\\n\",\n schema1\n .iter()\n .map(|f| f.to_string())\n .collect::>()\n .join(\", \"),\n schema2\n .iter()\n .map(|f| f.to_string())\n .collect::>()\n .join(\", \")\n );\n }\n let mut result_fields = Vec::with_capacity(schema1.len());\n for (field1, field2) in schema1.iter().zip(schema2.iter()) {\n if field1.name != field2.name {\n api_bail!(\n \"Structs are not compatible as they have incompatible field names `{}` vs `{}`\",\n field1.name,\n field2.name\n );\n }\n result_fields.push(FieldSchema {\n name: field1.name.clone(),\n value_type: try_make_common_value_type(&field1.value_type, &field2.value_type)?,\n });\n }\n Ok(result_fields)\n}\n\nfn try_merge_struct_schemas(\n schema1: &StructSchema,\n schema2: &StructSchema,\n) -> Result {\n let fields = try_merge_fields_schemas(&schema1.fields, &schema2.fields)?;\n Ok(StructSchema {\n fields: Arc::new(fields),\n description: schema1\n .description\n .clone()\n .or_else(|| schema2.description.clone()),\n })\n}\n\nfn try_merge_collector_schemas(\n schema1: &CollectorSchema,\n schema2: &CollectorSchema,\n) -> Result {\n let fields = try_merge_fields_schemas(&schema1.fields, &schema2.fields)?;\n Ok(CollectorSchema {\n fields,\n auto_uuid_field_idx: if schema1.auto_uuid_field_idx == schema2.auto_uuid_field_idx {\n schema1.auto_uuid_field_idx\n } else {\n None\n },\n })\n}\n\n#[derive(Debug)]\npub(super) struct CollectorBuilder {\n pub schema: Arc,\n pub is_used: bool,\n}\n\nimpl CollectorBuilder {\n pub fn new(schema: Arc) -> Self {\n Self {\n schema,\n is_used: false,\n }\n }\n\n pub fn merge_schema(&mut self, schema: &CollectorSchema) -> Result<()> {\n if self.is_used {\n api_bail!(\"Collector is already used\");\n }\n let existing_schema = Arc::make_mut(&mut self.schema);\n *existing_schema = try_merge_collector_schemas(existing_schema, schema)?;\n Ok(())\n }\n\n pub fn use_schema(&mut self) -> Arc {\n self.is_used = true;\n self.schema.clone()\n }\n}\n\n#[derive(Debug)]\npub(super) struct DataScopeBuilder {\n pub data: StructSchemaBuilder,\n}\n\nimpl DataScopeBuilder {\n pub fn new() -> Self {\n Self {\n data: Default::default(),\n }\n }\n\n pub fn last_field(&self) -> Option<&FieldSchema> {\n self.data.fields.last()\n }\n\n pub fn add_field(\n &mut self,\n name: FieldName,\n value_type: &EnrichedValueType,\n ) -> Result {\n let field_index = self.data.add_field(FieldSchema {\n name,\n value_type: EnrichedValueType::from_alternative(value_type)?,\n })?;\n Ok(AnalyzedOpOutput {\n field_idx: field_index,\n })\n }\n\n pub fn analyze_field_path<'a>(\n &'a self,\n field_path: &'_ FieldPath,\n ) -> Result<(\n AnalyzedLocalFieldReference,\n &'a EnrichedValueType,\n )> {\n let mut indices = Vec::with_capacity(field_path.len());\n let mut struct_schema = &self.data;\n\n let mut i = 0;\n let value_type = loop {\n let field_name = &field_path[i];\n let (field_idx, field) = struct_schema.find_field(field_name).ok_or_else(|| {\n api_error!(\"Field {} not found\", field_path[0..(i + 1)].join(\".\"))\n })?;\n indices.push(field_idx);\n if i + 1 >= field_path.len() {\n break &field.value_type;\n }\n i += 1;\n\n struct_schema = match &field.value_type.typ {\n ValueTypeBuilder::Struct(struct_type) => struct_type,\n _ => {\n api_bail!(\"Field {} is not a struct\", field_path[0..(i + 1)].join(\".\"));\n }\n };\n };\n Ok((\n AnalyzedLocalFieldReference {\n fields_idx: indices,\n },\n value_type,\n ))\n }\n}\n\npub(super) struct AnalyzerContext {\n pub lib_ctx: Arc,\n pub flow_ctx: Arc,\n}\n\n#[derive(Debug, Default)]\npub(super) struct OpScopeStates {\n pub op_output_types: HashMap,\n pub collectors: IndexMap,\n pub sub_scopes: HashMap>,\n}\n\nimpl OpScopeStates {\n pub fn add_collector(\n &mut self,\n collector_name: FieldName,\n schema: CollectorSchema,\n ) -> Result {\n let existing_len = self.collectors.len();\n let idx = match self.collectors.entry(collector_name) {\n indexmap::map::Entry::Occupied(mut entry) => {\n entry.get_mut().merge_schema(&schema)?;\n entry.index()\n }\n indexmap::map::Entry::Vacant(entry) => {\n entry.insert(CollectorBuilder::new(Arc::new(schema)));\n existing_len\n }\n };\n Ok(AnalyzedLocalCollectorReference {\n collector_idx: idx as u32,\n })\n }\n\n pub fn consume_collector(\n &mut self,\n collector_name: &FieldName,\n ) -> Result<(AnalyzedLocalCollectorReference, Arc)> {\n let (collector_idx, _, collector) = self\n .collectors\n .get_full_mut(collector_name)\n .ok_or_else(|| api_error!(\"Collector not found: {}\", collector_name))?;\n Ok((\n AnalyzedLocalCollectorReference {\n collector_idx: collector_idx as u32,\n },\n collector.use_schema(),\n ))\n }\n\n fn build_op_scope_schema(&self) -> OpScopeSchema {\n OpScopeSchema {\n op_output_types: self\n .op_output_types\n .iter()\n .map(|(name, value_type)| (name.clone(), value_type.without_attrs()))\n .collect(),\n collectors: self\n .collectors\n .iter()\n .map(|(name, schema)| NamedSpec {\n name: name.clone(),\n spec: schema.schema.clone(),\n })\n .collect(),\n op_scopes: self.sub_scopes.clone(),\n }\n }\n}\n\n#[derive(Debug)]\npub struct OpScope {\n pub name: String,\n pub parent: Option<(Arc, spec::FieldPath)>,\n pub(super) data: Arc>,\n pub(super) states: Mutex,\n}\n\nstruct Iter<'a>(Option<&'a OpScope>);\n\nimpl<'a> Iterator for Iter<'a> {\n type Item = &'a OpScope;\n\n fn next(&mut self) -> Option {\n match self.0 {\n Some(scope) => {\n self.0 = scope.parent.as_ref().map(|(parent, _)| parent.as_ref());\n Some(scope)\n }\n None => None,\n }\n }\n}\n\nimpl OpScope {\n pub(super) fn new(\n name: String,\n parent: Option<(Arc, spec::FieldPath)>,\n data: Arc>,\n ) -> Arc {\n Arc::new(Self {\n name,\n parent,\n data,\n states: Mutex::default(),\n })\n }\n\n fn add_op_output(\n &self,\n name: FieldName,\n value_type: EnrichedValueType,\n ) -> Result {\n let op_output = self\n .data\n .lock()\n .unwrap()\n .add_field(name.clone(), &value_type)?;\n self.states\n .lock()\n .unwrap()\n .op_output_types\n .insert(name, value_type);\n Ok(op_output)\n }\n\n pub fn ancestors(&self) -> impl Iterator {\n Iter(Some(self))\n }\n\n pub fn is_op_scope_descendant(&self, other: &Self) -> bool {\n if self == other {\n return true;\n }\n match &self.parent {\n Some((parent, _)) => parent.is_op_scope_descendant(other),\n None => false,\n }\n }\n\n pub(super) fn new_foreach_op_scope(\n self: &Arc,\n scope_name: String,\n field_path: &FieldPath,\n ) -> Result<(AnalyzedLocalFieldReference, Arc)> {\n let (local_field_ref, sub_data_scope) = {\n let data_scope = self.data.lock().unwrap();\n let (local_field_ref, value_type) = data_scope.analyze_field_path(field_path)?;\n let sub_data_scope = match &value_type.typ {\n ValueTypeBuilder::Table(table_type) => table_type.sub_scope.clone(),\n _ => api_bail!(\"ForEach only works on collection, field {field_path} is not\"),\n };\n (local_field_ref, sub_data_scope)\n };\n let sub_op_scope = OpScope::new(\n scope_name,\n Some((self.clone(), field_path.clone())),\n sub_data_scope,\n );\n Ok((local_field_ref, sub_op_scope))\n }\n}\n\nimpl std::fmt::Display for OpScope {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n if let Some((scope, field_path)) = &self.parent {\n write!(f, \"{} [{} AS {}]\", scope, field_path, self.name)?;\n } else {\n write!(f, \"[{}]\", self.name)?;\n }\n Ok(())\n }\n}\n\nimpl PartialEq for OpScope {\n fn eq(&self, other: &Self) -> bool {\n std::ptr::eq(self, other)\n }\n}\nimpl Eq for OpScope {}\n\nfn find_scope<'a>(scope_name: &ScopeName, op_scope: &'a OpScope) -> Result<(u32, &'a OpScope)> {\n let (up_level, scope) = op_scope\n .ancestors()\n .enumerate()\n .find(|(_, s)| &s.name == scope_name)\n .ok_or_else(|| api_error!(\"Scope not found: {}\", scope_name))?;\n Ok((up_level as u32, scope))\n}\n\nfn analyze_struct_mapping(\n mapping: &StructMapping,\n op_scope: &OpScope,\n) -> Result<(AnalyzedStructMapping, Vec)> {\n let mut field_mappings = Vec::with_capacity(mapping.fields.len());\n let mut field_schemas = Vec::with_capacity(mapping.fields.len());\n for field in mapping.fields.iter() {\n let (field_mapping, value_type) = analyze_value_mapping(&field.spec, op_scope)?;\n field_mappings.push(field_mapping);\n field_schemas.push(FieldSchema {\n name: field.name.clone(),\n value_type,\n });\n }\n Ok((\n AnalyzedStructMapping {\n fields: field_mappings,\n },\n field_schemas,\n ))\n}\n\nfn analyze_value_mapping(\n value_mapping: &ValueMapping,\n op_scope: &OpScope,\n) -> Result<(AnalyzedValueMapping, EnrichedValueType)> {\n let result = match value_mapping {\n ValueMapping::Constant(v) => {\n let value = value::Value::from_json(v.value.clone(), &v.schema.typ)?;\n (AnalyzedValueMapping::Constant { value }, v.schema.clone())\n }\n\n ValueMapping::Field(v) => {\n let (scope_up_level, op_scope) = match &v.scope {\n Some(scope_name) => find_scope(scope_name, op_scope)?,\n None => (0, op_scope),\n };\n let data_scope = op_scope.data.lock().unwrap();\n let (local_field_ref, value_type) = data_scope.analyze_field_path(&v.field_path)?;\n (\n AnalyzedValueMapping::Field(AnalyzedFieldReference {\n local: local_field_ref,\n scope_up_level,\n }),\n EnrichedValueType::from_alternative(value_type)?,\n )\n }\n\n ValueMapping::Struct(v) => {\n let (struct_mapping, field_schemas) = analyze_struct_mapping(v, op_scope)?;\n (\n AnalyzedValueMapping::Struct(struct_mapping),\n EnrichedValueType {\n typ: ValueType::Struct(StructSchema {\n fields: Arc::new(field_schemas),\n description: None,\n }),\n nullable: false,\n attrs: Default::default(),\n },\n )\n }\n };\n Ok(result)\n}\n\nfn analyze_input_fields(\n arg_bindings: &[OpArgBinding],\n op_scope: &OpScope,\n) -> Result> {\n let mut input_field_schemas = Vec::with_capacity(arg_bindings.len());\n for arg_binding in arg_bindings.iter() {\n let (analyzed_value, value_type) = analyze_value_mapping(&arg_binding.value, op_scope)?;\n input_field_schemas.push(OpArgSchema {\n name: arg_binding.arg_name.clone(),\n value_type,\n analyzed_value: analyzed_value.clone(),\n });\n }\n Ok(input_field_schemas)\n}\n\nfn add_collector(\n scope_name: &ScopeName,\n collector_name: FieldName,\n schema: CollectorSchema,\n op_scope: &OpScope,\n) -> Result {\n let (scope_up_level, scope) = find_scope(scope_name, op_scope)?;\n let local_ref = scope\n .states\n .lock()\n .unwrap()\n .add_collector(collector_name, schema)?;\n Ok(AnalyzedCollectorReference {\n local: local_ref,\n scope_up_level,\n })\n}\n\nstruct ExportDataFieldsInfo {\n local_collector_ref: AnalyzedLocalCollectorReference,\n primary_key_def: AnalyzedPrimaryKeyDef,\n primary_key_type: ValueType,\n value_fields_idx: Vec,\n value_stable: bool,\n}\n\nimpl AnalyzerContext {\n pub(super) async fn analyze_import_op(\n &self,\n op_scope: &Arc,\n import_op: NamedSpec,\n ) -> Result> + Send + use<>> {\n let source_factory = match get_executor_factory(&import_op.spec.source.kind)? {\n ExecutorFactory::Source(source_executor) => source_executor,\n _ => {\n return Err(anyhow::anyhow!(\n \"`{}` is not a source op\",\n import_op.spec.source.kind\n ));\n }\n };\n let (output_type, executor) = source_factory\n .build(\n serde_json::Value::Object(import_op.spec.source.spec),\n self.flow_ctx.clone(),\n )\n .await?;\n\n let op_name = import_op.name.clone();\n let primary_key_type = output_type\n .typ\n .key_type()\n .ok_or_else(|| api_error!(\"Source must produce a type with key: {op_name}\"))?\n .typ\n .clone();\n let output = op_scope.add_op_output(import_op.name, output_type)?;\n\n let concur_control_options = import_op\n .spec\n .execution_options\n .get_concur_control_options();\n let global_concurrency_controller = self.lib_ctx.global_concurrency_controller.clone();\n let result_fut = async move {\n trace!(\"Start building executor for source op `{op_name}`\");\n let executor = executor.await?;\n trace!(\"Finished building executor for source op `{op_name}`\");\n Ok(AnalyzedImportOp {\n executor,\n output,\n primary_key_type,\n name: op_name,\n refresh_options: import_op.spec.refresh_options,\n concurrency_controller: concur_control::CombinedConcurrencyController::new(\n &concur_control_options,\n global_concurrency_controller,\n ),\n })\n };\n Ok(result_fut)\n }\n\n pub(super) async fn analyze_reactive_op(\n &self,\n op_scope: &Arc,\n reactive_op: &NamedSpec,\n ) -> Result>> {\n let result_fut = match &reactive_op.spec {\n ReactiveOpSpec::Transform(op) => {\n let input_field_schemas =\n analyze_input_fields(&op.inputs, op_scope).with_context(|| {\n format!(\n \"Failed to analyze inputs for transform op: {}\",\n reactive_op.name\n )\n })?;\n let spec = serde_json::Value::Object(op.op.spec.clone());\n\n match get_executor_factory(&op.op.kind)? {\n ExecutorFactory::SimpleFunction(fn_executor) => {\n let input_value_mappings = input_field_schemas\n .iter()\n .map(|field| field.analyzed_value.clone())\n .collect();\n let (output_enriched_type, executor) = fn_executor\n .build(spec, input_field_schemas, self.flow_ctx.clone())\n .await?;\n let logic_fingerprinter = Fingerprinter::default()\n .with(&op.op)?\n .with(&output_enriched_type.without_attrs())?;\n let output_type = output_enriched_type.typ.clone();\n let output = op_scope\n .add_op_output(reactive_op.name.clone(), output_enriched_type)?;\n let op_name = reactive_op.name.clone();\n async move {\n trace!(\"Start building executor for transform op `{op_name}`\");\n let executor = executor.await.with_context(|| {\n format!(\"Failed to build executor for transform op: {op_name}\")\n })?;\n let enable_cache = executor.enable_cache();\n let behavior_version = executor.behavior_version();\n trace!(\"Finished building executor for transform op `{op_name}`, enable cache: {enable_cache}, behavior version: {behavior_version:?}\");\n let function_exec_info = AnalyzedFunctionExecInfo {\n enable_cache,\n behavior_version,\n fingerprinter: logic_fingerprinter\n .with(&behavior_version)?,\n output_type\n };\n if function_exec_info.enable_cache\n && function_exec_info.behavior_version.is_none()\n {\n api_bail!(\n \"When caching is enabled, behavior version must be specified for transform op: {op_name}\"\n );\n }\n Ok(AnalyzedReactiveOp::Transform(AnalyzedTransformOp {\n name: op_name,\n inputs: input_value_mappings,\n function_exec_info,\n executor,\n output,\n }))\n }\n .boxed()\n }\n _ => api_bail!(\"`{}` is not a function op\", op.op.kind),\n }\n }\n\n ReactiveOpSpec::ForEach(foreach_op) => {\n let (local_field_ref, sub_op_scope) = op_scope.new_foreach_op_scope(\n foreach_op.op_scope.name.clone(),\n &foreach_op.field_path,\n )?;\n let analyzed_op_scope_fut = {\n let analyzed_op_scope_fut = self\n .analyze_op_scope(&sub_op_scope, &foreach_op.op_scope.ops)\n .boxed_local()\n .await?;\n let sub_op_scope_schema =\n sub_op_scope.states.lock().unwrap().build_op_scope_schema();\n op_scope.states.lock().unwrap().sub_scopes.insert(\n foreach_op.op_scope.name.clone(),\n Arc::new(sub_op_scope_schema),\n );\n analyzed_op_scope_fut\n };\n let op_name = reactive_op.name.clone();\n\n let concur_control_options =\n foreach_op.execution_options.get_concur_control_options();\n async move {\n Ok(AnalyzedReactiveOp::ForEach(AnalyzedForEachOp {\n local_field_ref,\n op_scope: analyzed_op_scope_fut\n .await\n .with_context(|| format!(\"Analyzing foreach op: {op_name}\"))?,\n name: op_name,\n concurrency_controller: concur_control::ConcurrencyController::new(\n &concur_control_options,\n ),\n }))\n }\n .boxed()\n }\n\n ReactiveOpSpec::Collect(op) => {\n let (struct_mapping, fields_schema) = analyze_struct_mapping(&op.input, op_scope)?;\n let has_auto_uuid_field = op.auto_uuid_field.is_some();\n let fingerprinter = Fingerprinter::default().with(&fields_schema)?;\n let collect_op = AnalyzedReactiveOp::Collect(AnalyzedCollectOp {\n name: reactive_op.name.clone(),\n has_auto_uuid_field,\n input: struct_mapping,\n collector_ref: add_collector(\n &op.scope_name,\n op.collector_name.clone(),\n CollectorSchema::from_fields(fields_schema, op.auto_uuid_field.clone()),\n op_scope,\n )?,\n fingerprinter,\n });\n async move { Ok(collect_op) }.boxed()\n }\n };\n Ok(result_fut)\n }\n\n #[allow(clippy::too_many_arguments)]\n async fn analyze_export_op_group(\n &self,\n target_kind: &str,\n op_scope: &Arc,\n flow_inst: &FlowInstanceSpec,\n export_op_group: &AnalyzedExportTargetOpGroup,\n declarations: Vec,\n targets_analyzed_ss: &mut [Option],\n declarations_analyzed_ss: &mut Vec,\n ) -> Result> + Send + use<>>> {\n let mut collection_specs = Vec::::new();\n let mut data_fields_infos = Vec::::new();\n for idx in export_op_group.op_idx.iter() {\n let export_op = &flow_inst.export_ops[*idx];\n let (local_collector_ref, collector_schema) = op_scope\n .states\n .lock()\n .unwrap()\n .consume_collector(&export_op.spec.collector_name)?;\n let (key_fields_schema, value_fields_schema, data_collection_info) =\n match &export_op.spec.index_options.primary_key_fields {\n Some(fields) => {\n let pk_fields_idx = fields\n .iter()\n .map(|f| {\n collector_schema\n .fields\n .iter()\n .position(|field| &field.name == f)\n .ok_or_else(|| anyhow!(\"field not found: {}\", f))\n })\n .collect::>>()?;\n\n let key_fields_schema = pk_fields_idx\n .iter()\n .map(|idx| collector_schema.fields[*idx].clone())\n .collect::>();\n let primary_key_type = if pk_fields_idx.len() == 1 {\n key_fields_schema[0].value_type.typ.clone()\n } else {\n ValueType::Struct(StructSchema {\n fields: Arc::from(key_fields_schema.clone()),\n description: None,\n })\n };\n let mut value_fields_schema: Vec = vec![];\n let mut value_fields_idx = vec![];\n for (idx, field) in collector_schema.fields.iter().enumerate() {\n if !pk_fields_idx.contains(&idx) {\n value_fields_schema.push(field.clone());\n value_fields_idx.push(idx as u32);\n }\n }\n let value_stable = collector_schema\n .auto_uuid_field_idx\n .as_ref()\n .map(|uuid_idx| pk_fields_idx.contains(uuid_idx))\n .unwrap_or(false);\n (\n key_fields_schema,\n value_fields_schema,\n ExportDataFieldsInfo {\n local_collector_ref,\n primary_key_def: AnalyzedPrimaryKeyDef::Fields(pk_fields_idx),\n primary_key_type,\n value_fields_idx,\n value_stable,\n },\n )\n }\n None => {\n // TODO: Support auto-generate primary key\n api_bail!(\"Primary key fields must be specified\")\n }\n };\n collection_specs.push(interface::ExportDataCollectionSpec {\n name: export_op.name.clone(),\n spec: serde_json::Value::Object(export_op.spec.target.spec.clone()),\n key_fields_schema,\n value_fields_schema,\n index_options: export_op.spec.index_options.clone(),\n });\n data_fields_infos.push(data_collection_info);\n }\n let (data_collections_output, declarations_output) = export_op_group\n .target_factory\n .clone()\n .build(collection_specs, declarations, self.flow_ctx.clone())\n .await?;\n let analyzed_export_ops = export_op_group\n .op_idx\n .iter()\n .zip(data_collections_output.into_iter())\n .zip(data_fields_infos.into_iter())\n .map(|((idx, data_coll_output), data_fields_info)| {\n let export_op = &flow_inst.export_ops[*idx];\n let op_name = export_op.name.clone();\n let export_target_factory = export_op_group.target_factory.clone();\n\n let export_op_ss = exec_ctx::AnalyzedTargetSetupState {\n target_kind: target_kind.to_string(),\n setup_key: data_coll_output.setup_key,\n desired_setup_state: data_coll_output.desired_setup_state,\n setup_by_user: export_op.spec.setup_by_user,\n };\n targets_analyzed_ss[*idx] = Some(export_op_ss);\n\n Ok(async move {\n trace!(\"Start building executor for export op `{op_name}`\");\n let export_context = data_coll_output\n .export_context\n .await\n .with_context(|| format!(\"Analyzing export op: {op_name}\"))?;\n trace!(\"Finished building executor for export op `{op_name}`\");\n Ok(AnalyzedExportOp {\n name: op_name,\n input: data_fields_info.local_collector_ref,\n export_target_factory,\n export_context,\n primary_key_def: data_fields_info.primary_key_def,\n primary_key_type: data_fields_info.primary_key_type,\n value_fields: data_fields_info.value_fields_idx,\n value_stable: data_fields_info.value_stable,\n })\n })\n })\n .collect::>>()?;\n for (setup_key, desired_setup_state) in declarations_output {\n let decl_ss = exec_ctx::AnalyzedTargetSetupState {\n target_kind: target_kind.to_string(),\n setup_key,\n desired_setup_state,\n setup_by_user: false,\n };\n declarations_analyzed_ss.push(decl_ss);\n }\n Ok(analyzed_export_ops)\n }\n\n async fn analyze_op_scope(\n &self,\n op_scope: &Arc,\n reactive_ops: &[NamedSpec],\n ) -> Result> + Send + use<>> {\n let mut op_futs = Vec::with_capacity(reactive_ops.len());\n for reactive_op in reactive_ops.iter() {\n op_futs.push(self.analyze_reactive_op(op_scope, reactive_op).await?);\n }\n let collector_len = op_scope.states.lock().unwrap().collectors.len();\n let result_fut = async move {\n Ok(AnalyzedOpScope {\n reactive_ops: try_join_all(op_futs).await?,\n collector_len,\n })\n };\n Ok(result_fut)\n }\n}\n\npub fn build_flow_instance_context(\n flow_inst_name: &str,\n py_exec_ctx: Option,\n) -> Arc {\n Arc::new(FlowInstanceContext {\n flow_instance_name: flow_inst_name.to_string(),\n auth_registry: get_auth_registry().clone(),\n py_exec_ctx: py_exec_ctx.map(Arc::new),\n })\n}\n\nfn build_flow_schema(root_op_scope: &OpScope) -> Result {\n let schema = (&root_op_scope.data.lock().unwrap().data).try_into()?;\n let root_op_scope_schema = root_op_scope.states.lock().unwrap().build_op_scope_schema();\n Ok(FlowSchema {\n schema,\n root_op_scope: root_op_scope_schema,\n })\n}\n\npub async fn analyze_flow(\n flow_inst: &FlowInstanceSpec,\n flow_ctx: Arc,\n) -> Result<(\n FlowSchema,\n AnalyzedSetupState,\n impl Future> + Send + use<>,\n)> {\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: get_lib_context()?,\n flow_ctx,\n };\n let root_data_scope = Arc::new(Mutex::new(DataScopeBuilder::new()));\n let root_op_scope = OpScope::new(ROOT_SCOPE_NAME.to_string(), None, root_data_scope);\n let mut import_ops_futs = Vec::with_capacity(flow_inst.import_ops.len());\n for import_op in flow_inst.import_ops.iter() {\n import_ops_futs.push(\n analyzer_ctx\n .analyze_import_op(&root_op_scope, import_op.clone())\n .await?,\n );\n }\n let op_scope_fut = analyzer_ctx\n .analyze_op_scope(&root_op_scope, &flow_inst.reactive_ops)\n .await?;\n\n #[derive(Default)]\n struct TargetOpGroup {\n export_op_ids: Vec,\n declarations: Vec,\n }\n let mut target_op_group = IndexMap::::new();\n for (idx, export_op) in flow_inst.export_ops.iter().enumerate() {\n target_op_group\n .entry(export_op.spec.target.kind.clone())\n .or_default()\n .export_op_ids\n .push(idx);\n }\n for declaration in flow_inst.declarations.iter() {\n target_op_group\n .entry(declaration.kind.clone())\n .or_default()\n .declarations\n .push(serde_json::Value::Object(declaration.spec.clone()));\n }\n\n let mut export_ops_futs = vec![];\n let mut analyzed_target_op_groups = vec![];\n\n let mut targets_analyzed_ss = Vec::with_capacity(flow_inst.export_ops.len());\n targets_analyzed_ss.resize_with(flow_inst.export_ops.len(), || None);\n\n let mut declarations_analyzed_ss = Vec::with_capacity(flow_inst.declarations.len());\n\n for (target_kind, op_ids) in target_op_group.into_iter() {\n let target_factory = match get_executor_factory(&target_kind)? {\n ExecutorFactory::ExportTarget(export_executor) => export_executor,\n _ => api_bail!(\"`{}` is not a export target op\", target_kind),\n };\n let analyzed_target_op_group = AnalyzedExportTargetOpGroup {\n target_factory,\n op_idx: op_ids.export_op_ids,\n };\n export_ops_futs.extend(\n analyzer_ctx\n .analyze_export_op_group(\n target_kind.as_str(),\n &root_op_scope,\n flow_inst,\n &analyzed_target_op_group,\n op_ids.declarations,\n &mut targets_analyzed_ss,\n &mut declarations_analyzed_ss,\n )\n .await?,\n );\n analyzed_target_op_groups.push(analyzed_target_op_group);\n }\n\n let flow_schema = build_flow_schema(&root_op_scope)?;\n let analyzed_ss = exec_ctx::AnalyzedSetupState {\n targets: targets_analyzed_ss\n .into_iter()\n .enumerate()\n .map(|(idx, v)| v.ok_or_else(|| anyhow!(\"target op `{}` not found\", idx)))\n .collect::>>()?,\n declarations: declarations_analyzed_ss,\n };\n\n let logic_fingerprint = Fingerprinter::default()\n .with(&flow_inst)?\n .with(&flow_schema.schema)?\n .into_fingerprint();\n let plan_fut = async move {\n let (import_ops, op_scope, export_ops) = try_join3(\n try_join_all(import_ops_futs),\n op_scope_fut,\n try_join_all(export_ops_futs),\n )\n .await?;\n\n Ok(ExecutionPlan {\n logic_fingerprint,\n import_ops,\n op_scope,\n export_ops,\n export_op_groups: analyzed_target_op_groups,\n })\n };\n\n Ok((flow_schema, analyzed_ss, plan_fut))\n}\n\npub async fn analyze_transient_flow<'a>(\n flow_inst: &TransientFlowSpec,\n flow_ctx: Arc,\n) -> Result<(\n EnrichedValueType,\n FlowSchema,\n impl Future> + Send + 'a,\n)> {\n let mut root_data_scope = DataScopeBuilder::new();\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: get_lib_context()?,\n flow_ctx,\n };\n let mut input_fields = vec![];\n for field in flow_inst.input_fields.iter() {\n let analyzed_field = root_data_scope.add_field(field.name.clone(), &field.value_type)?;\n input_fields.push(analyzed_field);\n }\n let root_op_scope = OpScope::new(\n ROOT_SCOPE_NAME.to_string(),\n None,\n Arc::new(Mutex::new(root_data_scope)),\n );\n let op_scope_fut = analyzer_ctx\n .analyze_op_scope(&root_op_scope, &flow_inst.reactive_ops)\n .await?;\n let (output_value, output_type) =\n analyze_value_mapping(&flow_inst.output_value, &root_op_scope)?;\n let data_schema = build_flow_schema(&root_op_scope)?;\n let plan_fut = async move {\n let op_scope = op_scope_fut.await?;\n Ok(TransientExecutionPlan {\n input_fields,\n op_scope,\n output_value,\n })\n };\n Ok((output_type, data_schema, plan_fut))\n}\n"], ["/cocoindex/src/py/mod.rs", "use crate::execution::evaluator::evaluate_transient_flow;\nuse crate::prelude::*;\n\nuse crate::base::schema::{FieldSchema, ValueType};\nuse crate::base::spec::{NamedSpec, OutputMode, ReactiveOpSpec, SpecFormatter};\nuse crate::lib_context::{clear_lib_context, get_auth_registry, init_lib_context};\nuse crate::ops::py_factory::{PyExportTargetFactory, PyOpArgSchema};\nuse crate::ops::{interface::ExecutorFactory, py_factory::PyFunctionFactory, register_factory};\nuse crate::server::{self, ServerSettings};\nuse crate::settings::Settings;\nuse crate::setup::{self};\nuse pyo3::IntoPyObjectExt;\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pyo3_async_runtimes::tokio::future_into_py;\nuse std::fmt::Write;\nuse std::sync::Arc;\n\nmod convert;\npub(crate) use convert::*;\n\npub struct PythonExecutionContext {\n pub event_loop: Py,\n}\n\nimpl PythonExecutionContext {\n pub fn new(_py: Python<'_>, event_loop: Py) -> Self {\n Self { event_loop }\n }\n}\n\npub trait ToResultWithPyTrace {\n fn to_result_with_py_trace(self, py: Python<'_>) -> anyhow::Result;\n}\n\nimpl ToResultWithPyTrace for Result {\n fn to_result_with_py_trace(self, py: Python<'_>) -> anyhow::Result {\n match self {\n Ok(value) => Ok(value),\n Err(err) => {\n let mut err_str = format!(\"Error calling Python function: {err}\");\n if let Some(tb) = err.traceback(py) {\n write!(&mut err_str, \"\\n{}\", tb.format()?)?;\n }\n Err(anyhow::anyhow!(err_str))\n }\n }\n }\n}\npub trait IntoPyResult {\n fn into_py_result(self) -> PyResult;\n}\n\nimpl IntoPyResult for Result {\n fn into_py_result(self) -> PyResult {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(PyException::new_err(format!(\"{err:?}\"))),\n }\n }\n}\n\n#[pyfunction]\nfn init(py: Python<'_>, settings: Pythonized) -> PyResult<()> {\n py.allow_threads(|| -> anyhow::Result<()> {\n init_lib_context(settings.into_inner())?;\n Ok(())\n })\n .into_py_result()\n}\n\n#[pyfunction]\nfn start_server(py: Python<'_>, settings: Pythonized) -> PyResult<()> {\n py.allow_threads(|| -> anyhow::Result<()> {\n let server = get_runtime().block_on(server::init_server(\n get_lib_context()?,\n settings.into_inner(),\n ))?;\n get_runtime().spawn(server);\n Ok(())\n })\n .into_py_result()\n}\n\n#[pyfunction]\nfn stop(py: Python<'_>) -> PyResult<()> {\n py.allow_threads(clear_lib_context);\n Ok(())\n}\n\n#[pyfunction]\nfn register_function_factory(name: String, py_function_factory: Py) -> PyResult<()> {\n let factory = PyFunctionFactory {\n py_function_factory,\n };\n register_factory(name, ExecutorFactory::SimpleFunction(Arc::new(factory))).into_py_result()\n}\n\n#[pyfunction]\nfn register_target_connector(name: String, py_target_connector: Py) -> PyResult<()> {\n let factory = PyExportTargetFactory {\n py_target_connector,\n };\n register_factory(name, ExecutorFactory::ExportTarget(Arc::new(factory))).into_py_result()\n}\n\n#[pyclass]\npub struct IndexUpdateInfo(pub execution::stats::IndexUpdateInfo);\n\n#[pymethods]\nimpl IndexUpdateInfo {\n pub fn __str__(&self) -> String {\n format!(\"{}\", self.0)\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\n#[pyclass]\npub struct Flow(pub Arc);\n\n/// A single line in the rendered spec, with hierarchical children\n#[pyclass(get_all, set_all)]\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct RenderedSpecLine {\n /// The formatted content of the line (e.g., \"Import: name=documents, source=LocalFile\")\n pub content: String,\n /// Child lines in the hierarchy\n pub children: Vec,\n}\n\n/// A rendered specification, grouped by sections\n#[pyclass(get_all, set_all)]\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct RenderedSpec {\n /// List of (section_name, lines) pairs\n pub sections: Vec<(String, Vec)>,\n}\n\n#[pyclass]\npub struct FlowLiveUpdaterUpdates(execution::FlowLiveUpdaterUpdates);\n\n#[pymethods]\nimpl FlowLiveUpdaterUpdates {\n #[getter]\n pub fn active_sources(&self) -> Vec {\n self.0.active_sources.clone()\n }\n\n #[getter]\n pub fn updated_sources(&self) -> Vec {\n self.0.updated_sources.clone()\n }\n}\n\n#[pyclass]\npub struct FlowLiveUpdater(pub Arc);\n\n#[pymethods]\nimpl FlowLiveUpdater {\n #[staticmethod]\n pub fn create<'py>(\n py: Python<'py>,\n flow: &Flow,\n options: Pythonized,\n ) -> PyResult> {\n let flow = flow.0.clone();\n future_into_py(py, async move {\n let lib_context = get_lib_context().into_py_result()?;\n let live_updater = execution::FlowLiveUpdater::start(\n flow,\n lib_context.require_builtin_db_pool().into_py_result()?,\n options.into_inner(),\n )\n .await\n .into_py_result()?;\n Ok(Self(Arc::new(live_updater)))\n })\n }\n\n pub fn wait_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let live_updater = self.0.clone();\n future_into_py(\n py,\n async move { live_updater.wait().await.into_py_result() },\n )\n }\n\n pub fn next_status_updates_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let live_updater = self.0.clone();\n future_into_py(py, async move {\n let updates = live_updater.next_status_updates().await.into_py_result()?;\n Ok(FlowLiveUpdaterUpdates(updates))\n })\n }\n\n pub fn abort(&self) {\n self.0.abort();\n }\n\n pub fn index_update_info(&self) -> IndexUpdateInfo {\n IndexUpdateInfo(self.0.index_update_info())\n }\n}\n\n#[pymethods]\nimpl Flow {\n pub fn __str__(&self) -> String {\n serde_json::to_string_pretty(&self.0.flow.flow_instance).unwrap()\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn name(&self) -> &str {\n &self.0.flow.flow_instance.name\n }\n\n pub fn evaluate_and_dump(\n &self,\n py: Python<'_>,\n options: Pythonized,\n ) -> PyResult<()> {\n py.allow_threads(|| {\n get_runtime()\n .block_on(async {\n let exec_plan = self.0.flow.get_execution_plan().await?;\n let lib_context = get_lib_context()?;\n let execution_ctx = self.0.use_execution_ctx().await?;\n execution::dumper::evaluate_and_dump(\n &exec_plan,\n &execution_ctx.setup_execution_context,\n &self.0.flow.data_schema,\n options.into_inner(),\n lib_context.require_builtin_db_pool()?,\n )\n .await\n })\n .into_py_result()?;\n Ok(())\n })\n }\n\n #[pyo3(signature = (output_mode=None))]\n pub fn get_spec(&self, output_mode: Option>) -> PyResult {\n let mode = output_mode.map_or(OutputMode::Concise, |m| m.into_inner());\n let spec = &self.0.flow.flow_instance;\n let mut sections: IndexMap> = IndexMap::new();\n\n // Sources\n sections.insert(\n \"Source\".to_string(),\n spec.import_ops\n .iter()\n .map(|op| RenderedSpecLine {\n content: format!(\"Import: name={}, {}\", op.name, op.spec.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n // Processing\n fn walk(op: &NamedSpec, mode: OutputMode) -> RenderedSpecLine {\n let content = format!(\"{}: {}\", op.name, op.spec.format(mode));\n\n let children = match &op.spec {\n ReactiveOpSpec::ForEach(fe) => fe\n .op_scope\n .ops\n .iter()\n .map(|nested| walk(nested, mode))\n .collect(),\n _ => vec![],\n };\n\n RenderedSpecLine { content, children }\n }\n\n sections.insert(\n \"Processing\".to_string(),\n spec.reactive_ops.iter().map(|op| walk(op, mode)).collect(),\n );\n\n // Targets\n sections.insert(\n \"Targets\".to_string(),\n spec.export_ops\n .iter()\n .map(|op| RenderedSpecLine {\n content: format!(\"Export: name={}, {}\", op.name, op.spec.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n // Declarations\n sections.insert(\n \"Declarations\".to_string(),\n spec.declarations\n .iter()\n .map(|decl| RenderedSpecLine {\n content: format!(\"Declaration: {}\", decl.format(mode)),\n children: vec![],\n })\n .collect(),\n );\n\n Ok(RenderedSpec {\n sections: sections.into_iter().collect(),\n })\n }\n\n pub fn get_schema(&self) -> Vec<(String, String, String)> {\n let schema = &self.0.flow.data_schema;\n let mut result = Vec::new();\n\n fn process_fields(\n fields: &[FieldSchema],\n prefix: &str,\n result: &mut Vec<(String, String, String)>,\n ) {\n for field in fields {\n let field_name = format!(\"{}{}\", prefix, field.name);\n\n let mut field_type = match &field.value_type.typ {\n ValueType::Basic(basic) => format!(\"{basic}\"),\n ValueType::Table(t) => format!(\"{}\", t.kind),\n ValueType::Struct(_) => \"Struct\".to_string(),\n };\n\n if field.value_type.nullable {\n field_type.push('?');\n }\n\n let attr_str = if field.value_type.attrs.is_empty() {\n String::new()\n } else {\n field\n .value_type\n .attrs\n .keys()\n .map(|k| k.to_string())\n .collect::>()\n .join(\", \")\n };\n\n result.push((field_name.clone(), field_type, attr_str));\n\n match &field.value_type.typ {\n ValueType::Struct(s) => {\n process_fields(&s.fields, &format!(\"{field_name}.\"), result);\n }\n ValueType::Table(t) => {\n process_fields(&t.row.fields, &format!(\"{field_name}[].\"), result);\n }\n ValueType::Basic(_) => {}\n }\n }\n }\n\n process_fields(&schema.schema.fields, \"\", &mut result);\n result\n }\n\n pub fn make_setup_action(&self) -> SetupChangeBundle {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Setup,\n flow_names: vec![self.name().to_string()],\n };\n SetupChangeBundle(Arc::new(bundle))\n }\n\n pub fn make_drop_action(&self) -> SetupChangeBundle {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Drop,\n flow_names: vec![self.name().to_string()],\n };\n SetupChangeBundle(Arc::new(bundle))\n }\n}\n\n#[pyclass]\npub struct TransientFlow(pub Arc);\n\n#[pymethods]\nimpl TransientFlow {\n pub fn __str__(&self) -> String {\n serde_json::to_string_pretty(&self.0.transient_flow_instance).unwrap()\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn evaluate_async<'py>(\n &self,\n py: Python<'py>,\n args: Vec>,\n ) -> PyResult> {\n let flow = self.0.clone();\n let input_values: Vec = std::iter::zip(\n self.0.transient_flow_instance.input_fields.iter(),\n args.into_iter(),\n )\n .map(|(input_schema, arg)| value_from_py_object(&input_schema.value_type.typ, &arg))\n .collect::>()?;\n\n future_into_py(py, async move {\n let result = evaluate_transient_flow(&flow, &input_values)\n .await\n .into_py_result()?;\n Python::with_gil(|py| value_to_py_object(py, &result)?.into_py_any(py))\n })\n }\n}\n\n#[pyclass]\npub struct SetupChangeBundle(Arc);\n\n#[pymethods]\nimpl SetupChangeBundle {\n pub fn describe_async<'py>(&self, py: Python<'py>) -> PyResult> {\n let lib_context = get_lib_context().into_py_result()?;\n let bundle = self.0.clone();\n future_into_py(py, async move {\n bundle.describe(&lib_context).await.into_py_result()\n })\n }\n\n pub fn apply_async<'py>(\n &self,\n py: Python<'py>,\n report_to_stdout: bool,\n ) -> PyResult> {\n let lib_context = get_lib_context().into_py_result()?;\n let bundle = self.0.clone();\n\n future_into_py(py, async move {\n let mut stdout = None;\n let mut sink = None;\n bundle\n .apply(\n &lib_context,\n if report_to_stdout {\n stdout.insert(std::io::stdout())\n } else {\n sink.insert(std::io::sink())\n },\n )\n .await\n .into_py_result()\n })\n }\n}\n\n#[pyfunction]\nfn flow_names_with_setup_async(py: Python<'_>) -> PyResult> {\n future_into_py(py, async move {\n let lib_context = get_lib_context().into_py_result()?;\n let setup_ctx = lib_context\n .require_persistence_ctx()\n .into_py_result()?\n .setup_ctx\n .read()\n .await;\n let flow_names: Vec = setup_ctx.all_setup_states.flows.keys().cloned().collect();\n PyResult::Ok(flow_names)\n })\n}\n\n#[pyfunction]\nfn make_setup_bundle(flow_names: Vec) -> PyResult {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Setup,\n flow_names,\n };\n Ok(SetupChangeBundle(Arc::new(bundle)))\n}\n\n#[pyfunction]\nfn make_drop_bundle(flow_names: Vec) -> PyResult {\n let bundle = setup::SetupChangeBundle {\n action: setup::FlowSetupChangeAction::Drop,\n flow_names,\n };\n Ok(SetupChangeBundle(Arc::new(bundle)))\n}\n\n#[pyfunction]\nfn remove_flow_context(flow_name: String) {\n let lib_context_locked = crate::lib_context::LIB_CONTEXT.read().unwrap();\n if let Some(lib_context) = lib_context_locked.as_ref() {\n lib_context.remove_flow_context(&flow_name)\n }\n}\n\n#[pyfunction]\nfn add_auth_entry(key: String, value: Pythonized) -> PyResult<()> {\n get_auth_registry()\n .add(key, value.into_inner())\n .into_py_result()?;\n Ok(())\n}\n\n#[pyfunction]\nfn seder_roundtrip<'py>(\n py: Python<'py>,\n value: Bound<'py, PyAny>,\n typ: Pythonized,\n) -> PyResult> {\n let typ = typ.into_inner();\n let value = value_from_py_object(&typ, &value)?;\n let value = value::test_util::seder_roundtrip(&value, &typ).into_py_result()?;\n value_to_py_object(py, &value)\n}\n\n/// A Python module implemented in Rust.\n#[pymodule]\n#[pyo3(name = \"_engine\")]\nfn cocoindex_engine(m: &Bound<'_, PyModule>) -> PyResult<()> {\n m.add_function(wrap_pyfunction!(init, m)?)?;\n m.add_function(wrap_pyfunction!(start_server, m)?)?;\n m.add_function(wrap_pyfunction!(stop, m)?)?;\n m.add_function(wrap_pyfunction!(register_function_factory, m)?)?;\n m.add_function(wrap_pyfunction!(register_target_connector, m)?)?;\n m.add_function(wrap_pyfunction!(flow_names_with_setup_async, m)?)?;\n m.add_function(wrap_pyfunction!(make_setup_bundle, m)?)?;\n m.add_function(wrap_pyfunction!(make_drop_bundle, m)?)?;\n m.add_function(wrap_pyfunction!(remove_flow_context, m)?)?;\n m.add_function(wrap_pyfunction!(add_auth_entry, m)?)?;\n\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n m.add_class::()?;\n\n let testutil_module = PyModule::new(m.py(), \"testutil\")?;\n testutil_module.add_function(wrap_pyfunction!(seder_roundtrip, &testutil_module)?)?;\n m.add_submodule(&testutil_module)?;\n\n Ok(())\n}\n"], ["/cocoindex/src/base/value.rs", "use super::schema::*;\nuse crate::base::duration::parse_duration;\nuse crate::prelude::invariance_violation;\nuse crate::{api_bail, api_error};\nuse anyhow::Result;\nuse base64::prelude::*;\nuse bytes::Bytes;\nuse chrono::Offset;\nuse log::warn;\nuse serde::{\n Deserialize, Serialize,\n de::{SeqAccess, Visitor},\n ser::{SerializeMap, SerializeSeq, SerializeTuple},\n};\nuse std::{collections::BTreeMap, ops::Deref, sync::Arc};\n\npub trait EstimatedByteSize: Sized {\n fn estimated_detached_byte_size(&self) -> usize;\n\n fn estimated_byte_size(&self) -> usize {\n self.estimated_detached_byte_size() + std::mem::size_of::()\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]\npub struct RangeValue {\n pub start: usize,\n pub end: usize,\n}\n\nimpl RangeValue {\n pub fn new(start: usize, end: usize) -> Self {\n RangeValue { start, end }\n }\n\n pub fn len(&self) -> usize {\n self.end - self.start\n }\n\n pub fn extract_str<'s>(&self, s: &'s (impl AsRef + ?Sized)) -> &'s str {\n let s = s.as_ref();\n &s[self.start..self.end]\n }\n}\n\nimpl Serialize for RangeValue {\n fn serialize(&self, serializer: S) -> Result {\n let mut tuple = serializer.serialize_tuple(2)?;\n tuple.serialize_element(&self.start)?;\n tuple.serialize_element(&self.end)?;\n tuple.end()\n }\n}\n\nimpl<'de> Deserialize<'de> for RangeValue {\n fn deserialize>(deserializer: D) -> Result {\n struct RangeVisitor;\n\n impl<'de> Visitor<'de> for RangeVisitor {\n type Value = RangeValue;\n\n fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {\n formatter.write_str(\"a tuple of two u64\")\n }\n\n fn visit_seq(self, mut seq: V) -> Result\n where\n V: SeqAccess<'de>,\n {\n let start = seq\n .next_element()?\n .ok_or_else(|| serde::de::Error::missing_field(\"missing begin\"))?;\n let end = seq\n .next_element()?\n .ok_or_else(|| serde::de::Error::missing_field(\"missing end\"))?;\n Ok(RangeValue { start, end })\n }\n }\n deserializer.deserialize_tuple(2, RangeVisitor)\n }\n}\n\n/// Value of key.\n#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize)]\npub enum KeyValue {\n Bytes(Bytes),\n Str(Arc),\n Bool(bool),\n Int64(i64),\n Range(RangeValue),\n Uuid(uuid::Uuid),\n Date(chrono::NaiveDate),\n Struct(Vec),\n}\n\nimpl From for KeyValue {\n fn from(value: Bytes) -> Self {\n KeyValue::Bytes(value)\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Vec) -> Self {\n KeyValue::Bytes(Bytes::from(value))\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Arc) -> Self {\n KeyValue::Str(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: String) -> Self {\n KeyValue::Str(Arc::from(value))\n }\n}\n\nimpl From for KeyValue {\n fn from(value: bool) -> Self {\n KeyValue::Bool(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: i64) -> Self {\n KeyValue::Int64(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: RangeValue) -> Self {\n KeyValue::Range(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: uuid::Uuid) -> Self {\n KeyValue::Uuid(value)\n }\n}\n\nimpl From for KeyValue {\n fn from(value: chrono::NaiveDate) -> Self {\n KeyValue::Date(value)\n }\n}\n\nimpl From> for KeyValue {\n fn from(value: Vec) -> Self {\n KeyValue::Struct(value)\n }\n}\n\nimpl serde::Serialize for KeyValue {\n fn serialize(&self, serializer: S) -> Result {\n Value::from(self.clone()).serialize(serializer)\n }\n}\n\nimpl std::fmt::Display for KeyValue {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n KeyValue::Bytes(v) => write!(f, \"{}\", BASE64_STANDARD.encode(v)),\n KeyValue::Str(v) => write!(f, \"\\\"{}\\\"\", v.escape_default()),\n KeyValue::Bool(v) => write!(f, \"{v}\"),\n KeyValue::Int64(v) => write!(f, \"{v}\"),\n KeyValue::Range(v) => write!(f, \"[{}, {})\", v.start, v.end),\n KeyValue::Uuid(v) => write!(f, \"{v}\"),\n KeyValue::Date(v) => write!(f, \"{v}\"),\n KeyValue::Struct(v) => {\n write!(\n f,\n \"[{}]\",\n v.iter()\n .map(|v| v.to_string())\n .collect::>()\n .join(\", \")\n )\n }\n }\n }\n}\n\nimpl KeyValue {\n pub fn from_json(value: serde_json::Value, fields_schema: &[FieldSchema]) -> Result {\n let value = if fields_schema.len() == 1 {\n Value::from_json(value, &fields_schema[0].value_type.typ)?\n } else {\n let field_values: FieldValues = FieldValues::from_json(value, fields_schema)?;\n Value::Struct(field_values)\n };\n value.as_key()\n }\n\n pub fn from_values<'a>(values: impl ExactSizeIterator) -> Result {\n let key = if values.len() == 1 {\n let mut values = values;\n values.next().ok_or_else(invariance_violation)?.as_key()?\n } else {\n KeyValue::Struct(values.map(|v| v.as_key()).collect::>>()?)\n };\n Ok(key)\n }\n\n pub fn fields_iter(&self, num_fields: usize) -> Result> {\n let slice = if num_fields == 1 {\n std::slice::from_ref(self)\n } else {\n match self {\n KeyValue::Struct(v) => v,\n _ => api_bail!(\"Invalid key value type\"),\n }\n };\n Ok(slice.iter())\n }\n\n fn parts_from_str(\n values_iter: &mut impl Iterator,\n schema: &ValueType,\n ) -> Result {\n let result = match schema {\n ValueType::Basic(basic_type) => {\n let v = values_iter\n .next()\n .ok_or_else(|| api_error!(\"Key parts less than expected\"))?;\n match basic_type {\n BasicValueType::Bytes => {\n KeyValue::Bytes(Bytes::from(BASE64_STANDARD.decode(v)?))\n }\n BasicValueType::Str => KeyValue::Str(Arc::from(v)),\n BasicValueType::Bool => KeyValue::Bool(v.parse()?),\n BasicValueType::Int64 => KeyValue::Int64(v.parse()?),\n BasicValueType::Range => {\n let v2 = values_iter\n .next()\n .ok_or_else(|| api_error!(\"Key parts less than expected\"))?;\n KeyValue::Range(RangeValue {\n start: v.parse()?,\n end: v2.parse()?,\n })\n }\n BasicValueType::Uuid => KeyValue::Uuid(v.parse()?),\n BasicValueType::Date => KeyValue::Date(v.parse()?),\n schema => api_bail!(\"Invalid key type {schema}\"),\n }\n }\n ValueType::Struct(s) => KeyValue::Struct(\n s.fields\n .iter()\n .map(|f| KeyValue::parts_from_str(values_iter, &f.value_type.typ))\n .collect::>>()?,\n ),\n _ => api_bail!(\"Invalid key type {schema}\"),\n };\n Ok(result)\n }\n\n fn parts_to_strs(&self, output: &mut Vec) {\n match self {\n KeyValue::Bytes(v) => output.push(BASE64_STANDARD.encode(v)),\n KeyValue::Str(v) => output.push(v.to_string()),\n KeyValue::Bool(v) => output.push(v.to_string()),\n KeyValue::Int64(v) => output.push(v.to_string()),\n KeyValue::Range(v) => {\n output.push(v.start.to_string());\n output.push(v.end.to_string());\n }\n KeyValue::Uuid(v) => output.push(v.to_string()),\n KeyValue::Date(v) => output.push(v.to_string()),\n KeyValue::Struct(v) => {\n for part in v {\n part.parts_to_strs(output);\n }\n }\n }\n }\n\n pub fn from_strs(value: impl IntoIterator, schema: &ValueType) -> Result {\n let mut values_iter = value.into_iter();\n let result = Self::parts_from_str(&mut values_iter, schema)?;\n if values_iter.next().is_some() {\n api_bail!(\"Key parts more than expected\");\n }\n Ok(result)\n }\n\n pub fn to_strs(&self) -> Vec {\n let mut output = Vec::with_capacity(self.num_parts());\n self.parts_to_strs(&mut output);\n output\n }\n\n pub fn kind_str(&self) -> &'static str {\n match self {\n KeyValue::Bytes(_) => \"bytes\",\n KeyValue::Str(_) => \"str\",\n KeyValue::Bool(_) => \"bool\",\n KeyValue::Int64(_) => \"int64\",\n KeyValue::Range { .. } => \"range\",\n KeyValue::Uuid(_) => \"uuid\",\n KeyValue::Date(_) => \"date\",\n KeyValue::Struct(_) => \"struct\",\n }\n }\n\n pub fn bytes_value(&self) -> Result<&Bytes> {\n match self {\n KeyValue::Bytes(v) => Ok(v),\n _ => anyhow::bail!(\"expected bytes value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn str_value(&self) -> Result<&Arc> {\n match self {\n KeyValue::Str(v) => Ok(v),\n _ => anyhow::bail!(\"expected str value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn bool_value(&self) -> Result {\n match self {\n KeyValue::Bool(v) => Ok(*v),\n _ => anyhow::bail!(\"expected bool value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn int64_value(&self) -> Result {\n match self {\n KeyValue::Int64(v) => Ok(*v),\n _ => anyhow::bail!(\"expected int64 value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn range_value(&self) -> Result {\n match self {\n KeyValue::Range(v) => Ok(*v),\n _ => anyhow::bail!(\"expected range value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn uuid_value(&self) -> Result {\n match self {\n KeyValue::Uuid(v) => Ok(*v),\n _ => anyhow::bail!(\"expected uuid value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn date_value(&self) -> Result {\n match self {\n KeyValue::Date(v) => Ok(*v),\n _ => anyhow::bail!(\"expected date value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn struct_value(&self) -> Result<&Vec> {\n match self {\n KeyValue::Struct(v) => Ok(v),\n _ => anyhow::bail!(\"expected struct value, but got {}\", self.kind_str()),\n }\n }\n\n pub fn num_parts(&self) -> usize {\n match self {\n KeyValue::Range(_) => 2,\n KeyValue::Struct(v) => v.iter().map(|v| v.num_parts()).sum(),\n _ => 1,\n }\n }\n\n fn estimated_detached_byte_size(&self) -> usize {\n match self {\n KeyValue::Bytes(v) => v.len(),\n KeyValue::Str(v) => v.len(),\n KeyValue::Struct(v) => {\n v.iter()\n .map(KeyValue::estimated_detached_byte_size)\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n KeyValue::Bool(_)\n | KeyValue::Int64(_)\n | KeyValue::Range(_)\n | KeyValue::Uuid(_)\n | KeyValue::Date(_) => 0,\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Deserialize)]\npub enum BasicValue {\n Bytes(Bytes),\n Str(Arc),\n Bool(bool),\n Int64(i64),\n Float32(f32),\n Float64(f64),\n Range(RangeValue),\n Uuid(uuid::Uuid),\n Date(chrono::NaiveDate),\n Time(chrono::NaiveTime),\n LocalDateTime(chrono::NaiveDateTime),\n OffsetDateTime(chrono::DateTime),\n TimeDelta(chrono::Duration),\n Json(Arc),\n Vector(Arc<[BasicValue]>),\n UnionVariant {\n tag_id: usize,\n value: Box,\n },\n}\n\nimpl From for BasicValue {\n fn from(value: Bytes) -> Self {\n BasicValue::Bytes(value)\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: Vec) -> Self {\n BasicValue::Bytes(Bytes::from(value))\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: Arc) -> Self {\n BasicValue::Str(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: String) -> Self {\n BasicValue::Str(Arc::from(value))\n }\n}\n\nimpl From for BasicValue {\n fn from(value: bool) -> Self {\n BasicValue::Bool(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: i64) -> Self {\n BasicValue::Int64(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: f32) -> Self {\n BasicValue::Float32(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: f64) -> Self {\n BasicValue::Float64(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: uuid::Uuid) -> Self {\n BasicValue::Uuid(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveDate) -> Self {\n BasicValue::Date(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveTime) -> Self {\n BasicValue::Time(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::NaiveDateTime) -> Self {\n BasicValue::LocalDateTime(value)\n }\n}\n\nimpl From> for BasicValue {\n fn from(value: chrono::DateTime) -> Self {\n BasicValue::OffsetDateTime(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: chrono::Duration) -> Self {\n BasicValue::TimeDelta(value)\n }\n}\n\nimpl From for BasicValue {\n fn from(value: serde_json::Value) -> Self {\n BasicValue::Json(Arc::from(value))\n }\n}\n\nimpl> From> for BasicValue {\n fn from(value: Vec) -> Self {\n BasicValue::Vector(Arc::from(\n value.into_iter().map(|v| v.into()).collect::>(),\n ))\n }\n}\n\nimpl BasicValue {\n pub fn into_key(self) -> Result {\n let result = match self {\n BasicValue::Bytes(v) => KeyValue::Bytes(v),\n BasicValue::Str(v) => KeyValue::Str(v),\n BasicValue::Bool(v) => KeyValue::Bool(v),\n BasicValue::Int64(v) => KeyValue::Int64(v),\n BasicValue::Range(v) => KeyValue::Range(v),\n BasicValue::Uuid(v) => KeyValue::Uuid(v),\n BasicValue::Date(v) => KeyValue::Date(v),\n BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_)\n | BasicValue::Json(_)\n | BasicValue::Vector(_)\n | BasicValue::UnionVariant { .. } => api_bail!(\"invalid key value type\"),\n };\n Ok(result)\n }\n\n pub fn as_key(&self) -> Result {\n let result = match self {\n BasicValue::Bytes(v) => KeyValue::Bytes(v.clone()),\n BasicValue::Str(v) => KeyValue::Str(v.clone()),\n BasicValue::Bool(v) => KeyValue::Bool(*v),\n BasicValue::Int64(v) => KeyValue::Int64(*v),\n BasicValue::Range(v) => KeyValue::Range(*v),\n BasicValue::Uuid(v) => KeyValue::Uuid(*v),\n BasicValue::Date(v) => KeyValue::Date(*v),\n BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_)\n | BasicValue::Json(_)\n | BasicValue::Vector(_)\n | BasicValue::UnionVariant { .. } => api_bail!(\"invalid key value type\"),\n };\n Ok(result)\n }\n\n pub fn kind(&self) -> &'static str {\n match &self {\n BasicValue::Bytes(_) => \"bytes\",\n BasicValue::Str(_) => \"str\",\n BasicValue::Bool(_) => \"bool\",\n BasicValue::Int64(_) => \"int64\",\n BasicValue::Float32(_) => \"float32\",\n BasicValue::Float64(_) => \"float64\",\n BasicValue::Range(_) => \"range\",\n BasicValue::Uuid(_) => \"uuid\",\n BasicValue::Date(_) => \"date\",\n BasicValue::Time(_) => \"time\",\n BasicValue::LocalDateTime(_) => \"local_datetime\",\n BasicValue::OffsetDateTime(_) => \"offset_datetime\",\n BasicValue::TimeDelta(_) => \"timedelta\",\n BasicValue::Json(_) => \"json\",\n BasicValue::Vector(_) => \"vector\",\n BasicValue::UnionVariant { .. } => \"union\",\n }\n }\n\n /// Returns the estimated byte size of the value, for detached data (i.e. allocated on heap).\n fn estimated_detached_byte_size(&self) -> usize {\n fn json_estimated_detached_byte_size(val: &serde_json::Value) -> usize {\n match val {\n serde_json::Value::String(s) => s.len(),\n serde_json::Value::Array(arr) => {\n arr.iter()\n .map(json_estimated_detached_byte_size)\n .sum::()\n + arr.len() * std::mem::size_of::()\n }\n serde_json::Value::Object(map) => map\n .iter()\n .map(|(k, v)| {\n std::mem::size_of::()\n + k.len()\n + json_estimated_detached_byte_size(v)\n })\n .sum(),\n serde_json::Value::Null\n | serde_json::Value::Bool(_)\n | serde_json::Value::Number(_) => 0,\n }\n }\n match self {\n BasicValue::Bytes(v) => v.len(),\n BasicValue::Str(v) => v.len(),\n BasicValue::Json(v) => json_estimated_detached_byte_size(v),\n BasicValue::Vector(v) => {\n v.iter()\n .map(BasicValue::estimated_detached_byte_size)\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n BasicValue::UnionVariant { value, .. } => {\n value.estimated_detached_byte_size() + std::mem::size_of::()\n }\n BasicValue::Bool(_)\n | BasicValue::Int64(_)\n | BasicValue::Float32(_)\n | BasicValue::Float64(_)\n | BasicValue::Range(_)\n | BasicValue::Uuid(_)\n | BasicValue::Date(_)\n | BasicValue::Time(_)\n | BasicValue::LocalDateTime(_)\n | BasicValue::OffsetDateTime(_)\n | BasicValue::TimeDelta(_) => 0,\n }\n }\n}\n\n#[derive(Debug, Clone, Default, PartialEq, Deserialize)]\npub enum Value {\n #[default]\n Null,\n Basic(BasicValue),\n Struct(FieldValues),\n UTable(Vec),\n KTable(BTreeMap),\n LTable(Vec),\n}\n\nimpl> From for Value {\n fn from(value: T) -> Self {\n Value::Basic(value.into())\n }\n}\n\nimpl From for Value {\n fn from(value: KeyValue) -> Self {\n match value {\n KeyValue::Bytes(v) => Value::Basic(BasicValue::Bytes(v)),\n KeyValue::Str(v) => Value::Basic(BasicValue::Str(v)),\n KeyValue::Bool(v) => Value::Basic(BasicValue::Bool(v)),\n KeyValue::Int64(v) => Value::Basic(BasicValue::Int64(v)),\n KeyValue::Range(v) => Value::Basic(BasicValue::Range(v)),\n KeyValue::Uuid(v) => Value::Basic(BasicValue::Uuid(v)),\n KeyValue::Date(v) => Value::Basic(BasicValue::Date(v)),\n KeyValue::Struct(v) => Value::Struct(FieldValues {\n fields: v.into_iter().map(Value::from).collect(),\n }),\n }\n }\n}\n\nimpl From<&KeyValue> for Value {\n fn from(value: &KeyValue) -> Self {\n match value {\n KeyValue::Bytes(v) => Value::Basic(BasicValue::Bytes(v.clone())),\n KeyValue::Str(v) => Value::Basic(BasicValue::Str(v.clone())),\n KeyValue::Bool(v) => Value::Basic(BasicValue::Bool(*v)),\n KeyValue::Int64(v) => Value::Basic(BasicValue::Int64(*v)),\n KeyValue::Range(v) => Value::Basic(BasicValue::Range(*v)),\n KeyValue::Uuid(v) => Value::Basic(BasicValue::Uuid(*v)),\n KeyValue::Date(v) => Value::Basic(BasicValue::Date(*v)),\n KeyValue::Struct(v) => Value::Struct(FieldValues {\n fields: v.iter().map(Value::from).collect(),\n }),\n }\n }\n}\n\nimpl From for Value {\n fn from(value: FieldValues) -> Self {\n Value::Struct(value)\n }\n}\n\nimpl> From> for Value {\n fn from(value: Option) -> Self {\n match value {\n Some(v) => v.into(),\n None => Value::Null,\n }\n }\n}\n\nimpl Value {\n pub fn from_alternative(value: Value) -> Self\n where\n AltVS: Into,\n {\n match value {\n Value::Null => Value::Null,\n Value::Basic(v) => Value::Basic(v),\n Value::Struct(v) => Value::Struct(FieldValues:: {\n fields: v\n .fields\n .into_iter()\n .map(|v| Value::::from_alternative(v))\n .collect(),\n }),\n Value::UTable(v) => Value::UTable(v.into_iter().map(|v| v.into()).collect()),\n Value::KTable(v) => {\n Value::KTable(v.into_iter().map(|(k, v)| (k.clone(), v.into())).collect())\n }\n Value::LTable(v) => Value::LTable(v.into_iter().map(|v| v.into()).collect()),\n }\n }\n\n pub fn from_alternative_ref(value: &Value) -> Self\n where\n for<'a> &'a AltVS: Into,\n {\n match value {\n Value::Null => Value::Null,\n Value::Basic(v) => Value::Basic(v.clone()),\n Value::Struct(v) => Value::Struct(FieldValues:: {\n fields: v\n .fields\n .iter()\n .map(|v| Value::::from_alternative_ref(v))\n .collect(),\n }),\n Value::UTable(v) => Value::UTable(v.iter().map(|v| v.into()).collect()),\n Value::KTable(v) => {\n Value::KTable(v.iter().map(|(k, v)| (k.clone(), v.into())).collect())\n }\n Value::LTable(v) => Value::LTable(v.iter().map(|v| v.into()).collect()),\n }\n }\n\n pub fn is_null(&self) -> bool {\n matches!(self, Value::Null)\n }\n\n pub fn into_key(self) -> Result {\n let result = match self {\n Value::Basic(v) => v.into_key()?,\n Value::Struct(v) => KeyValue::Struct(\n v.fields\n .into_iter()\n .map(|v| v.into_key())\n .collect::>>()?,\n ),\n Value::Null | Value::UTable(_) | Value::KTable(_) | Value::LTable(_) => {\n anyhow::bail!(\"invalid key value type\")\n }\n };\n Ok(result)\n }\n\n pub fn as_key(&self) -> Result {\n let result = match self {\n Value::Basic(v) => v.as_key()?,\n Value::Struct(v) => KeyValue::Struct(\n v.fields\n .iter()\n .map(|v| v.as_key())\n .collect::>>()?,\n ),\n Value::Null | Value::UTable(_) | Value::KTable(_) | Value::LTable(_) => {\n anyhow::bail!(\"invalid key value type\")\n }\n };\n Ok(result)\n }\n\n pub fn kind(&self) -> &'static str {\n match self {\n Value::Null => \"null\",\n Value::Basic(v) => v.kind(),\n Value::Struct(_) => \"Struct\",\n Value::UTable(_) => \"UTable\",\n Value::KTable(_) => \"KTable\",\n Value::LTable(_) => \"LTable\",\n }\n }\n\n pub fn optional(&self) -> Option<&Self> {\n match self {\n Value::Null => None,\n _ => Some(self),\n }\n }\n\n pub fn as_bytes(&self) -> Result<&Bytes> {\n match self {\n Value::Basic(BasicValue::Bytes(v)) => Ok(v),\n _ => anyhow::bail!(\"expected bytes value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_str(&self) -> Result<&Arc> {\n match self {\n Value::Basic(BasicValue::Str(v)) => Ok(v),\n _ => anyhow::bail!(\"expected str value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_bool(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Bool(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected bool value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_int64(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Int64(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected int64 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_float32(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Float32(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected float32 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_float64(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Float64(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected float64 value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_range(&self) -> Result {\n match self {\n Value::Basic(BasicValue::Range(v)) => Ok(*v),\n _ => anyhow::bail!(\"expected range value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_json(&self) -> Result<&Arc> {\n match self {\n Value::Basic(BasicValue::Json(v)) => Ok(v),\n _ => anyhow::bail!(\"expected json value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_vector(&self) -> Result<&Arc<[BasicValue]>> {\n match self {\n Value::Basic(BasicValue::Vector(v)) => Ok(v),\n _ => anyhow::bail!(\"expected vector value, but got {}\", self.kind()),\n }\n }\n\n pub fn as_struct(&self) -> Result<&FieldValues> {\n match self {\n Value::Struct(v) => Ok(v),\n _ => anyhow::bail!(\"expected struct value, but got {}\", self.kind()),\n }\n }\n}\n\nimpl Value {\n pub fn estimated_byte_size(&self) -> usize {\n std::mem::size_of::()\n + match self {\n Value::Null => 0,\n Value::Basic(v) => v.estimated_detached_byte_size(),\n Value::Struct(v) => v.estimated_detached_byte_size(),\n Value::UTable(v) | Value::LTable(v) => {\n v.iter()\n .map(|v| v.estimated_detached_byte_size())\n .sum::()\n + v.len() * std::mem::size_of::()\n }\n Value::KTable(v) => {\n v.iter()\n .map(|(k, v)| {\n k.estimated_detached_byte_size() + v.estimated_detached_byte_size()\n })\n .sum::()\n + v.len() * std::mem::size_of::<(String, ScopeValue)>()\n }\n }\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Deserialize)]\npub struct FieldValues {\n pub fields: Vec>,\n}\n\nimpl EstimatedByteSize for FieldValues {\n fn estimated_detached_byte_size(&self) -> usize {\n self.fields\n .iter()\n .map(Value::::estimated_byte_size)\n .sum::()\n + self.fields.len() * std::mem::size_of::>()\n }\n}\n\nimpl serde::Serialize for FieldValues {\n fn serialize(&self, serializer: S) -> Result {\n self.fields.serialize(serializer)\n }\n}\n\nimpl FieldValues\nwhere\n FieldValues: Into,\n{\n pub fn new(num_fields: usize) -> Self {\n let mut fields = Vec::with_capacity(num_fields);\n fields.resize(num_fields, Value::::Null);\n Self { fields }\n }\n\n fn from_json_values<'a>(\n fields: impl Iterator,\n ) -> Result {\n Ok(Self {\n fields: fields\n .map(|(s, v)| {\n let value = Value::::from_json(v, &s.value_type.typ)?;\n if value.is_null() && !s.value_type.nullable {\n api_bail!(\"expected non-null value for `{}`\", s.name);\n }\n Ok(value)\n })\n .collect::>>()?,\n })\n }\n\n fn from_json_object<'a>(\n values: serde_json::Map,\n fields_schema: impl Iterator,\n ) -> Result {\n let mut values = values;\n Ok(Self {\n fields: fields_schema\n .map(|field| {\n let value = match values.get_mut(&field.name) {\n Some(v) => {\n Value::::from_json(std::mem::take(v), &field.value_type.typ)?\n }\n None => Value::::default(),\n };\n if value.is_null() && !field.value_type.nullable {\n api_bail!(\"expected non-null value for `{}`\", field.name);\n }\n Ok(value)\n })\n .collect::>>()?,\n })\n }\n\n pub fn from_json(value: serde_json::Value, fields_schema: &[FieldSchema]) -> Result {\n match value {\n serde_json::Value::Array(v) => {\n if v.len() != fields_schema.len() {\n api_bail!(\"unmatched value length\");\n }\n Self::from_json_values(fields_schema.iter().zip(v))\n }\n serde_json::Value::Object(v) => Self::from_json_object(v, fields_schema.iter()),\n _ => api_bail!(\"invalid value type\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct ScopeValue(pub FieldValues);\n\nimpl EstimatedByteSize for ScopeValue {\n fn estimated_detached_byte_size(&self) -> usize {\n self.0.estimated_detached_byte_size()\n }\n}\n\nimpl Deref for ScopeValue {\n type Target = FieldValues;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl From for ScopeValue {\n fn from(value: FieldValues) -> Self {\n Self(value)\n }\n}\n\nimpl serde::Serialize for BasicValue {\n fn serialize(&self, serializer: S) -> Result {\n match self {\n BasicValue::Bytes(v) => serializer.serialize_str(&BASE64_STANDARD.encode(v)),\n BasicValue::Str(v) => serializer.serialize_str(v),\n BasicValue::Bool(v) => serializer.serialize_bool(*v),\n BasicValue::Int64(v) => serializer.serialize_i64(*v),\n BasicValue::Float32(v) => serializer.serialize_f32(*v),\n BasicValue::Float64(v) => serializer.serialize_f64(*v),\n BasicValue::Range(v) => v.serialize(serializer),\n BasicValue::Uuid(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Date(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Time(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::LocalDateTime(v) => {\n serializer.serialize_str(&v.format(\"%Y-%m-%dT%H:%M:%S%.6f\").to_string())\n }\n BasicValue::OffsetDateTime(v) => {\n serializer.serialize_str(&v.to_rfc3339_opts(chrono::SecondsFormat::AutoSi, true))\n }\n BasicValue::TimeDelta(v) => serializer.serialize_str(&v.to_string()),\n BasicValue::Json(v) => v.serialize(serializer),\n BasicValue::Vector(v) => v.serialize(serializer),\n BasicValue::UnionVariant { tag_id, value } => {\n let mut s = serializer.serialize_tuple(2)?;\n s.serialize_element(tag_id)?;\n s.serialize_element(value)?;\n s.end()\n }\n }\n }\n}\n\nimpl BasicValue {\n pub fn from_json(value: serde_json::Value, schema: &BasicValueType) -> Result {\n let result = match (value, schema) {\n (serde_json::Value::String(v), BasicValueType::Bytes) => {\n BasicValue::Bytes(Bytes::from(BASE64_STANDARD.decode(v)?))\n }\n (serde_json::Value::String(v), BasicValueType::Str) => BasicValue::Str(Arc::from(v)),\n (serde_json::Value::Bool(v), BasicValueType::Bool) => BasicValue::Bool(v),\n (serde_json::Value::Number(v), BasicValueType::Int64) => BasicValue::Int64(\n v.as_i64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid int64 value {v}\"))?,\n ),\n (serde_json::Value::Number(v), BasicValueType::Float32) => BasicValue::Float32(\n v.as_f64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid fp32 value {v}\"))?\n as f32,\n ),\n (serde_json::Value::Number(v), BasicValueType::Float64) => BasicValue::Float64(\n v.as_f64()\n .ok_or_else(|| anyhow::anyhow!(\"invalid fp64 value {v}\"))?,\n ),\n (v, BasicValueType::Range) => BasicValue::Range(serde_json::from_value(v)?),\n (serde_json::Value::String(v), BasicValueType::Uuid) => BasicValue::Uuid(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::Date) => BasicValue::Date(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::Time) => BasicValue::Time(v.parse()?),\n (serde_json::Value::String(v), BasicValueType::LocalDateTime) => {\n BasicValue::LocalDateTime(v.parse()?)\n }\n (serde_json::Value::String(v), BasicValueType::OffsetDateTime) => {\n match chrono::DateTime::parse_from_rfc3339(&v) {\n Ok(dt) => BasicValue::OffsetDateTime(dt),\n Err(e) => {\n if let Ok(dt) = v.parse::() {\n warn!(\"Datetime without timezone offset, assuming UTC\");\n BasicValue::OffsetDateTime(chrono::DateTime::from_naive_utc_and_offset(\n dt,\n chrono::Utc.fix(),\n ))\n } else {\n Err(e)?\n }\n }\n }\n }\n (serde_json::Value::String(v), BasicValueType::TimeDelta) => {\n BasicValue::TimeDelta(parse_duration(&v)?)\n }\n (v, BasicValueType::Json) => BasicValue::Json(Arc::from(v)),\n (\n serde_json::Value::Array(v),\n BasicValueType::Vector(VectorTypeSchema { element_type, .. }),\n ) => {\n let vec = v\n .into_iter()\n .map(|v| BasicValue::from_json(v, element_type))\n .collect::>>()?;\n BasicValue::Vector(Arc::from(vec))\n }\n (v, BasicValueType::Union(typ)) => {\n let arr = match v {\n serde_json::Value::Array(arr) => arr,\n _ => anyhow::bail!(\"Invalid JSON value for union, expect array\"),\n };\n\n if arr.len() != 2 {\n anyhow::bail!(\n \"Invalid union tuple: expect 2 values, received {}\",\n arr.len()\n );\n }\n\n let mut obj_iter = arr.into_iter();\n\n // Take first element\n let tag_id = obj_iter\n .next()\n .and_then(|value| value.as_u64().map(|num_u64| num_u64 as usize))\n .unwrap();\n\n // Take second element\n let value = obj_iter.next().unwrap();\n\n let cur_type = typ\n .types\n .get(tag_id)\n .ok_or_else(|| anyhow::anyhow!(\"No type in `tag_id` \\\"{tag_id}\\\" found\"))?;\n\n BasicValue::UnionVariant {\n tag_id,\n value: Box::new(BasicValue::from_json(value, cur_type)?),\n }\n }\n (v, t) => {\n anyhow::bail!(\"Value and type not matched.\\nTarget type {t:?}\\nJSON value: {v}\\n\")\n }\n };\n Ok(result)\n }\n}\n\nstruct TableEntry<'a>(&'a KeyValue, &'a ScopeValue);\n\nimpl serde::Serialize for Value {\n fn serialize(&self, serializer: S) -> Result {\n match self {\n Value::Null => serializer.serialize_none(),\n Value::Basic(v) => v.serialize(serializer),\n Value::Struct(v) => v.serialize(serializer),\n Value::UTable(v) => v.serialize(serializer),\n Value::KTable(m) => {\n let mut seq = serializer.serialize_seq(Some(m.len()))?;\n for (k, v) in m.iter() {\n seq.serialize_element(&TableEntry(k, v))?;\n }\n seq.end()\n }\n Value::LTable(v) => v.serialize(serializer),\n }\n }\n}\n\nimpl serde::Serialize for TableEntry<'_> {\n fn serialize(&self, serializer: S) -> Result {\n let &TableEntry(key, value) = self;\n let mut seq = serializer.serialize_seq(Some(value.0.fields.len() + 1))?;\n seq.serialize_element(key)?;\n for item in value.0.fields.iter() {\n seq.serialize_element(item)?;\n }\n seq.end()\n }\n}\n\nimpl Value\nwhere\n FieldValues: Into,\n{\n pub fn from_json(value: serde_json::Value, schema: &ValueType) -> Result {\n let result = match (value, schema) {\n (serde_json::Value::Null, _) => Value::::Null,\n (v, ValueType::Basic(t)) => Value::::Basic(BasicValue::from_json(v, t)?),\n (v, ValueType::Struct(s)) => {\n Value::::Struct(FieldValues::::from_json(v, &s.fields)?)\n }\n (serde_json::Value::Array(v), ValueType::Table(s)) => match s.kind {\n TableKind::UTable => {\n let rows = v\n .into_iter()\n .map(|v| Ok(FieldValues::from_json(v, &s.row.fields)?.into()))\n .collect::>>()?;\n Value::LTable(rows)\n }\n TableKind::KTable => {\n let rows = v\n .into_iter()\n .map(|v| {\n let mut fields_iter = s.row.fields.iter();\n let key_field = fields_iter\n .next()\n .ok_or_else(|| api_error!(\"Empty struct field values\"))?;\n\n match v {\n serde_json::Value::Array(v) => {\n let mut field_vals_iter = v.into_iter();\n let key = Self::from_json(\n field_vals_iter.next().ok_or_else(|| {\n api_error!(\"Empty struct field values\")\n })?,\n &key_field.value_type.typ,\n )?\n .into_key()?;\n let values = FieldValues::from_json_values(\n fields_iter.zip(field_vals_iter),\n )?;\n Ok((key, values.into()))\n }\n serde_json::Value::Object(mut v) => {\n let key = Self::from_json(\n std::mem::take(v.get_mut(&key_field.name).ok_or_else(\n || {\n api_error!(\n \"key field `{}` doesn't exist in value\",\n key_field.name\n )\n },\n )?),\n &key_field.value_type.typ,\n )?\n .into_key()?;\n let values = FieldValues::from_json_object(v, fields_iter)?;\n Ok((key, values.into()))\n }\n _ => api_bail!(\"Table value must be a JSON array or object\"),\n }\n })\n .collect::>>()?;\n Value::KTable(rows)\n }\n TableKind::LTable => {\n let rows = v\n .into_iter()\n .map(|v| Ok(FieldValues::from_json(v, &s.row.fields)?.into()))\n .collect::>>()?;\n Value::LTable(rows)\n }\n },\n (v, t) => {\n anyhow::bail!(\"Value and type not matched.\\nTarget type {t:?}\\nJSON value: {v}\\n\")\n }\n };\n Ok(result)\n }\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct TypedValue<'a> {\n pub t: &'a ValueType,\n pub v: &'a Value,\n}\n\nimpl Serialize for TypedValue<'_> {\n fn serialize(&self, serializer: S) -> Result {\n match (self.t, self.v) {\n (_, Value::Null) => serializer.serialize_none(),\n (ValueType::Basic(t), v) => match t {\n BasicValueType::Union(_) => match v {\n Value::Basic(BasicValue::UnionVariant { value, .. }) => {\n value.serialize(serializer)\n }\n _ => Err(serde::ser::Error::custom(\n \"Unmatched union type and value for `TypedValue`\",\n )),\n },\n _ => v.serialize(serializer),\n },\n (ValueType::Struct(s), Value::Struct(field_values)) => TypedFieldsValue {\n schema: &s.fields,\n values_iter: field_values.fields.iter(),\n }\n .serialize(serializer),\n (ValueType::Table(c), Value::UTable(rows) | Value::LTable(rows)) => {\n let mut seq = serializer.serialize_seq(Some(rows.len()))?;\n for row in rows {\n seq.serialize_element(&TypedFieldsValue {\n schema: &c.row.fields,\n values_iter: row.fields.iter(),\n })?;\n }\n seq.end()\n }\n (ValueType::Table(c), Value::KTable(rows)) => {\n let mut seq = serializer.serialize_seq(Some(rows.len()))?;\n for (k, v) in rows {\n seq.serialize_element(&TypedFieldsValue {\n schema: &c.row.fields,\n values_iter: std::iter::once(&Value::from(k.clone()))\n .chain(v.fields.iter()),\n })?;\n }\n seq.end()\n }\n _ => Err(serde::ser::Error::custom(format!(\n \"Incompatible value type: {:?} {:?}\",\n self.t, self.v\n ))),\n }\n }\n}\n\npub struct TypedFieldsValue<'a, I: Iterator + Clone> {\n pub schema: &'a [FieldSchema],\n pub values_iter: I,\n}\n\nimpl<'a, I: Iterator + Clone> Serialize for TypedFieldsValue<'a, I> {\n fn serialize(&self, serializer: S) -> Result {\n let mut map = serializer.serialize_map(Some(self.schema.len()))?;\n let values_iter = self.values_iter.clone();\n for (field, value) in self.schema.iter().zip(values_iter) {\n map.serialize_entry(\n &field.name,\n &TypedValue {\n t: &field.value_type.typ,\n v: value,\n },\n )?;\n }\n map.end()\n }\n}\n\npub mod test_util {\n use super::*;\n\n pub fn seder_roundtrip(value: &Value, typ: &ValueType) -> Result {\n let json_value = serde_json::to_value(value)?;\n let roundtrip_value = Value::from_json(json_value, typ)?;\n Ok(roundtrip_value)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use std::collections::BTreeMap;\n\n #[test]\n fn test_estimated_byte_size_null() {\n let value = Value::::Null;\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_primitive() {\n // Test primitives that should have 0 detached byte size\n let value = Value::::Basic(BasicValue::Bool(true));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n let value = Value::::Basic(BasicValue::Int64(42));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n let value = Value::::Basic(BasicValue::Float64(3.14));\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_string() {\n let test_str = \"hello world\";\n let value = Value::::Basic(BasicValue::Str(Arc::from(test_str)));\n let size = value.estimated_byte_size();\n\n let expected_size = std::mem::size_of::>() + test_str.len();\n assert_eq!(size, expected_size);\n }\n\n #[test]\n fn test_estimated_byte_size_basic_bytes() {\n let test_bytes = b\"hello world\";\n let value = Value::::Basic(BasicValue::Bytes(Bytes::from(test_bytes.to_vec())));\n let size = value.estimated_byte_size();\n\n let expected_size = std::mem::size_of::>() + test_bytes.len();\n assert_eq!(size, expected_size);\n }\n\n #[test]\n fn test_estimated_byte_size_basic_json() {\n let json_val = serde_json::json!({\"key\": \"value\", \"number\": 42});\n let value = Value::::Basic(BasicValue::Json(Arc::from(json_val)));\n let size = value.estimated_byte_size();\n\n // Should include the size of the JSON structure\n // The exact size depends on the internal JSON representation\n assert!(size > std::mem::size_of::>());\n }\n\n #[test]\n fn test_estimated_byte_size_basic_vector() {\n let vec_elements = vec![\n BasicValue::Str(Arc::from(\"hello\")),\n BasicValue::Str(Arc::from(\"world\")),\n BasicValue::Int64(42),\n ];\n let value = Value::::Basic(BasicValue::Vector(Arc::from(vec_elements)));\n let size = value.estimated_byte_size();\n\n // Should include the size of the vector elements\n let expected_min_size = std::mem::size_of::>()\n + \"hello\".len()\n + \"world\".len()\n + 3 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_struct() {\n let fields = vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"test\"))),\n Value::::Basic(BasicValue::Int64(123)),\n ];\n let field_values = FieldValues { fields };\n let value = Value::::Struct(field_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"test\".len()\n + 2 * std::mem::size_of::>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_utable() {\n let scope_values = vec![\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"item1\",\n )))],\n }),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"item2\",\n )))],\n }),\n ];\n let value = Value::::UTable(scope_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"item1\".len()\n + \"item2\".len()\n + 2 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_ltable() {\n let scope_values = vec![\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"list1\",\n )))],\n }),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"list2\",\n )))],\n }),\n ];\n let value = Value::::LTable(scope_values);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"list1\".len()\n + \"list2\".len()\n + 2 * std::mem::size_of::();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_ktable() {\n let mut map = BTreeMap::new();\n map.insert(\n KeyValue::Str(Arc::from(\"key1\")),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"value1\",\n )))],\n }),\n );\n map.insert(\n KeyValue::Str(Arc::from(\"key2\")),\n ScopeValue(FieldValues {\n fields: vec![Value::::Basic(BasicValue::Str(Arc::from(\n \"value2\",\n )))],\n }),\n );\n let value = Value::::KTable(map);\n let size = value.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"key1\".len()\n + \"key2\".len()\n + \"value1\".len()\n + \"value2\".len()\n + 2 * std::mem::size_of::<(String, ScopeValue)>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_nested_struct() {\n let inner_struct = Value::::Struct(FieldValues {\n fields: vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"inner\"))),\n Value::::Basic(BasicValue::Int64(456)),\n ],\n });\n\n let outer_struct = Value::::Struct(FieldValues {\n fields: vec![\n Value::::Basic(BasicValue::Str(Arc::from(\"outer\"))),\n inner_struct,\n ],\n });\n\n let size = outer_struct.estimated_byte_size();\n\n let expected_min_size = std::mem::size_of::>()\n + \"outer\".len()\n + \"inner\".len()\n + 4 * std::mem::size_of::>();\n assert!(size >= expected_min_size);\n }\n\n #[test]\n fn test_estimated_byte_size_empty_collections() {\n // Empty UTable\n let value = Value::::UTable(vec![]);\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty LTable\n let value = Value::::LTable(vec![]);\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty KTable\n let value = Value::::KTable(BTreeMap::new());\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n\n // Empty Struct\n let value = Value::::Struct(FieldValues { fields: vec![] });\n let size = value.estimated_byte_size();\n assert_eq!(size, std::mem::size_of::>());\n }\n}\n"], ["/cocoindex/src/ops/targets/shared/property_graph.rs", "use crate::prelude::*;\n\nuse crate::ops::sdk::{AuthEntryReference, FieldSchema};\n\n#[derive(Debug, Deserialize)]\npub struct TargetFieldMapping {\n pub source: spec::FieldName,\n\n /// Field name for the node in the Knowledge Graph.\n /// If unspecified, it's the same as `field_name`.\n #[serde(default)]\n pub target: Option,\n}\n\nimpl TargetFieldMapping {\n pub fn get_target(&self) -> &spec::FieldName {\n self.target.as_ref().unwrap_or(&self.source)\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct NodeFromFieldsSpec {\n pub label: String,\n pub fields: Vec,\n}\n\n#[derive(Debug, Deserialize)]\npub struct NodesSpec {\n pub label: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct RelationshipsSpec {\n pub rel_type: String,\n pub source: NodeFromFieldsSpec,\n pub target: NodeFromFieldsSpec,\n}\n\n#[derive(Debug, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum GraphElementMapping {\n Relationship(RelationshipsSpec),\n Node(NodesSpec),\n}\n\n#[derive(Debug, Deserialize)]\npub struct GraphDeclaration {\n pub nodes_label: String,\n\n #[serde(flatten)]\n pub index_options: spec::IndexOptions,\n}\n\n#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Clone)]\npub enum ElementType {\n Node(String),\n Relationship(String),\n}\n\nimpl ElementType {\n pub fn label(&self) -> &str {\n match self {\n ElementType::Node(label) => label,\n ElementType::Relationship(label) => label,\n }\n }\n\n pub fn from_mapping_spec(spec: &GraphElementMapping) -> Self {\n match spec {\n GraphElementMapping::Relationship(spec) => {\n ElementType::Relationship(spec.rel_type.clone())\n }\n GraphElementMapping::Node(spec) => ElementType::Node(spec.label.clone()),\n }\n }\n\n pub fn matcher(&self, var_name: &str) -> String {\n match self {\n ElementType::Relationship(label) => format!(\"()-[{var_name}:{label}]->()\"),\n ElementType::Node(label) => format!(\"({var_name}:{label})\"),\n }\n }\n}\n\nimpl std::fmt::Display for ElementType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ElementType::Node(label) => write!(f, \"Node(label:{label})\"),\n ElementType::Relationship(rel_type) => write!(f, \"Relationship(type:{rel_type})\"),\n }\n }\n}\n\n#[derive(Debug, Serialize, Deserialize, Derivative)]\n#[derivative(\n Clone(bound = \"\"),\n PartialEq(bound = \"\"),\n Eq(bound = \"\"),\n Hash(bound = \"\")\n)]\npub struct GraphElementType {\n #[serde(bound = \"\")]\n pub connection: AuthEntryReference,\n pub typ: ElementType,\n}\n\nimpl std::fmt::Display for GraphElementType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}/{}\", self.connection.key, self.typ)\n }\n}\n\npub struct GraphElementSchema {\n pub elem_type: ElementType,\n pub key_fields: Vec,\n pub value_fields: Vec,\n}\n\npub struct GraphElementInputFieldsIdx {\n pub key: Vec,\n pub value: Vec,\n}\n\nimpl GraphElementInputFieldsIdx {\n pub fn extract_key(&self, fields: &[value::Value]) -> Result {\n value::KeyValue::from_values(self.key.iter().map(|idx| &fields[*idx]))\n }\n}\n\npub struct AnalyzedGraphElementFieldMapping {\n pub schema: Arc,\n pub fields_input_idx: GraphElementInputFieldsIdx,\n}\n\nimpl AnalyzedGraphElementFieldMapping {\n pub fn has_value_fields(&self) -> bool {\n !self.fields_input_idx.value.is_empty()\n }\n}\n\npub struct AnalyzedRelationshipInfo {\n pub source: AnalyzedGraphElementFieldMapping,\n pub target: AnalyzedGraphElementFieldMapping,\n}\n\npub struct AnalyzedDataCollection {\n pub schema: Arc,\n pub value_fields_input_idx: Vec,\n\n pub rel: Option,\n}\n\nimpl AnalyzedDataCollection {\n pub fn dependent_node_labels(&self) -> IndexSet<&str> {\n let mut dependent_node_labels = IndexSet::new();\n if let Some(rel) = &self.rel {\n dependent_node_labels.insert(rel.source.schema.elem_type.label());\n dependent_node_labels.insert(rel.target.schema.elem_type.label());\n }\n dependent_node_labels\n }\n}\n\nstruct GraphElementSchemaBuilder {\n elem_type: ElementType,\n key_fields: Vec,\n value_fields: Vec,\n}\n\nimpl GraphElementSchemaBuilder {\n fn new(elem_type: ElementType) -> Self {\n Self {\n elem_type,\n key_fields: vec![],\n value_fields: vec![],\n }\n }\n\n fn merge_fields(\n elem_type: &ElementType,\n kind: &str,\n existing_fields: &mut Vec,\n fields: Vec<(usize, schema::FieldSchema)>,\n ) -> Result> {\n if fields.is_empty() {\n return Ok(vec![]);\n }\n let result: Vec = if existing_fields.is_empty() {\n let fields_idx: Vec = fields.iter().map(|(idx, _)| *idx).collect();\n existing_fields.extend(fields.into_iter().map(|(_, f)| f));\n fields_idx\n } else {\n if existing_fields.len() != fields.len() {\n bail!(\n \"{elem_type} {kind} fields number mismatch: {} vs {}\",\n existing_fields.len(),\n fields.len()\n );\n }\n let mut fields_map: HashMap<_, _> = fields\n .into_iter()\n .map(|(idx, schema)| (schema.name, (idx, schema.value_type)))\n .collect();\n // Follow the order of existing fields\n existing_fields\n .iter()\n .map(|existing_field| {\n let (idx, typ) = fields_map.remove(&existing_field.name).ok_or_else(|| {\n anyhow!(\n \"{elem_type} {kind} field `{}` not found in some collector\",\n existing_field.name\n )\n })?;\n if typ != existing_field.value_type {\n bail!(\n \"{elem_type} {kind} field `{}` type mismatch: {} vs {}\",\n existing_field.name,\n typ,\n existing_field.value_type\n )\n }\n Ok(idx)\n })\n .collect::>>()?\n };\n Ok(result)\n }\n\n fn merge(\n &mut self,\n key_fields: Vec<(usize, schema::FieldSchema)>,\n value_fields: Vec<(usize, schema::FieldSchema)>,\n ) -> Result {\n let key_fields_idx =\n Self::merge_fields(&self.elem_type, \"key\", &mut self.key_fields, key_fields)?;\n let value_fields_idx = Self::merge_fields(\n &self.elem_type,\n \"value\",\n &mut self.value_fields,\n value_fields,\n )?;\n Ok(GraphElementInputFieldsIdx {\n key: key_fields_idx,\n value: value_fields_idx,\n })\n }\n\n fn build_schema(self) -> Result {\n if self.key_fields.is_empty() {\n bail!(\n \"No key fields specified for Node label `{}`\",\n self.elem_type\n );\n }\n Ok(GraphElementSchema {\n elem_type: self.elem_type,\n key_fields: self.key_fields,\n value_fields: self.value_fields,\n })\n }\n}\nstruct DependentNodeLabelAnalyzer<'a, AuthEntry> {\n graph_elem_type: GraphElementType,\n fields: IndexMap,\n remaining_fields: HashMap<&'a str, &'a TargetFieldMapping>,\n primary_key_fields: &'a [String],\n}\n\nimpl<'a, AuthEntry> DependentNodeLabelAnalyzer<'a, AuthEntry> {\n fn new(\n conn: &'a spec::AuthEntryReference,\n rel_end_spec: &'a NodeFromFieldsSpec,\n primary_key_fields_map: &'a HashMap<&'a GraphElementType, &'a [String]>,\n ) -> Result {\n let graph_elem_type = GraphElementType {\n connection: conn.clone(),\n typ: ElementType::Node(rel_end_spec.label.clone()),\n };\n let primary_key_fields = primary_key_fields_map\n .get(&graph_elem_type)\n .ok_or_else(invariance_violation)?;\n Ok(Self {\n graph_elem_type,\n fields: IndexMap::new(),\n remaining_fields: rel_end_spec\n .fields\n .iter()\n .map(|f| (f.source.as_str(), f))\n .collect(),\n primary_key_fields,\n })\n }\n\n fn process_field(&mut self, field_idx: usize, field_schema: &schema::FieldSchema) -> bool {\n let field_mapping = match self.remaining_fields.remove(field_schema.name.as_str()) {\n Some(field_mapping) => field_mapping,\n None => return false,\n };\n self.fields.insert(\n field_mapping.get_target().clone(),\n (field_idx, field_schema.value_type.clone()),\n );\n true\n }\n\n fn build(\n self,\n schema_builders: &mut HashMap, GraphElementSchemaBuilder>,\n ) -> Result<(GraphElementType, GraphElementInputFieldsIdx)> {\n if !self.remaining_fields.is_empty() {\n anyhow::bail!(\n \"Fields not mapped for {}: {}\",\n self.graph_elem_type,\n self.remaining_fields.keys().join(\", \")\n );\n }\n\n let (mut key_fields, value_fields): (Vec<_>, Vec<_>) = self\n .fields\n .into_iter()\n .map(|(field_name, (idx, typ))| (idx, FieldSchema::new(field_name, typ)))\n .partition(|(_, f)| self.primary_key_fields.contains(&f.name));\n if key_fields.len() != self.primary_key_fields.len() {\n bail!(\n \"Primary key fields number mismatch: {} vs {}\",\n key_fields.iter().map(|(_, f)| &f.name).join(\", \"),\n self.primary_key_fields.iter().join(\", \")\n );\n }\n key_fields.sort_by_key(|(_, f)| {\n self.primary_key_fields\n .iter()\n .position(|k| k == &f.name)\n .unwrap()\n });\n\n let fields_idx = schema_builders\n .entry(self.graph_elem_type.clone())\n .or_insert_with(|| GraphElementSchemaBuilder::new(self.graph_elem_type.typ.clone()))\n .merge(key_fields, value_fields)?;\n Ok((self.graph_elem_type, fields_idx))\n }\n}\n\npub struct DataCollectionGraphMappingInput<'a, AuthEntry> {\n pub auth_ref: &'a spec::AuthEntryReference,\n pub mapping: &'a GraphElementMapping,\n pub index_options: &'a spec::IndexOptions,\n\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n}\n\npub fn analyze_graph_mappings<'a, AuthEntry: 'a>(\n data_coll_inputs: impl Iterator>,\n declarations: impl Iterator<\n Item = (\n &'a spec::AuthEntryReference,\n &'a GraphDeclaration,\n ),\n >,\n) -> Result<(Vec, Vec>)> {\n let data_coll_inputs: Vec<_> = data_coll_inputs.collect();\n let decls: Vec<_> = declarations.collect();\n\n // 1a. Prepare graph element types\n let graph_elem_types = data_coll_inputs\n .iter()\n .map(|d| GraphElementType {\n connection: d.auth_ref.clone(),\n typ: ElementType::from_mapping_spec(d.mapping),\n })\n .collect::>();\n let decl_graph_elem_types = decls\n .iter()\n .map(|(auth_ref, decl)| GraphElementType {\n connection: (*auth_ref).clone(),\n typ: ElementType::Node(decl.nodes_label.clone()),\n })\n .collect::>();\n\n // 1b. Prepare primary key fields map\n let primary_key_fields_map: HashMap<&GraphElementType, &[spec::FieldName]> =\n std::iter::zip(data_coll_inputs.iter(), graph_elem_types.iter())\n .map(|(data_coll_input, graph_elem_type)| {\n (\n graph_elem_type,\n data_coll_input.index_options.primary_key_fields(),\n )\n })\n .chain(\n std::iter::zip(decl_graph_elem_types.iter(), decls.iter()).map(\n |(graph_elem_type, (_, decl))| {\n (graph_elem_type, decl.index_options.primary_key_fields())\n },\n ),\n )\n .map(|(graph_elem_type, primary_key_fields)| {\n Ok((\n graph_elem_type,\n primary_key_fields.with_context(|| {\n format!(\"Primary key fields are not set for {graph_elem_type}\")\n })?,\n ))\n })\n .collect::>()?;\n\n // 2. Analyze data collection graph mappings and build target schema\n let mut node_schema_builders =\n HashMap::, GraphElementSchemaBuilder>::new();\n struct RelationshipProcessedInfo {\n rel_schema: GraphElementSchema,\n source_typ: GraphElementType,\n source_fields_idx: GraphElementInputFieldsIdx,\n target_typ: GraphElementType,\n target_fields_idx: GraphElementInputFieldsIdx,\n }\n struct DataCollectionProcessedInfo {\n value_input_fields_idx: Vec,\n rel_specific: Option>,\n }\n let data_collection_processed_info = std::iter::zip(data_coll_inputs, graph_elem_types.iter())\n .map(|(data_coll_input, graph_elem_type)| -> Result<_> {\n let processed_info = match data_coll_input.mapping {\n GraphElementMapping::Node(_) => {\n let input_fields_idx = node_schema_builders\n .entry(graph_elem_type.clone())\n .or_insert_with_key(|graph_elem| {\n GraphElementSchemaBuilder::new(graph_elem.typ.clone())\n })\n .merge(\n data_coll_input\n .key_fields_schema\n .into_iter()\n .enumerate()\n .collect(),\n data_coll_input\n .value_fields_schema\n .into_iter()\n .enumerate()\n .collect(),\n )?;\n\n if !(0..input_fields_idx.key.len()).eq(input_fields_idx.key.into_iter()) {\n return Err(invariance_violation());\n }\n DataCollectionProcessedInfo {\n value_input_fields_idx: input_fields_idx.value,\n rel_specific: None,\n }\n }\n GraphElementMapping::Relationship(rel_spec) => {\n let mut src_analyzer = DependentNodeLabelAnalyzer::new(\n data_coll_input.auth_ref,\n &rel_spec.source,\n &primary_key_fields_map,\n )?;\n let mut tgt_analyzer = DependentNodeLabelAnalyzer::new(\n data_coll_input.auth_ref,\n &rel_spec.target,\n &primary_key_fields_map,\n )?;\n\n let mut value_fields_schema = vec![];\n let mut value_input_fields_idx = vec![];\n for (field_idx, field_schema) in\n data_coll_input.value_fields_schema.into_iter().enumerate()\n {\n if !src_analyzer.process_field(field_idx, &field_schema)\n && !tgt_analyzer.process_field(field_idx, &field_schema)\n {\n value_fields_schema.push(field_schema.clone());\n value_input_fields_idx.push(field_idx);\n }\n }\n\n let rel_schema = GraphElementSchema {\n elem_type: graph_elem_type.typ.clone(),\n key_fields: data_coll_input.key_fields_schema,\n value_fields: value_fields_schema,\n };\n let (source_typ, source_fields_idx) =\n src_analyzer.build(&mut node_schema_builders)?;\n let (target_typ, target_fields_idx) =\n tgt_analyzer.build(&mut node_schema_builders)?;\n DataCollectionProcessedInfo {\n value_input_fields_idx,\n rel_specific: Some(RelationshipProcessedInfo {\n rel_schema,\n source_typ,\n source_fields_idx,\n target_typ,\n target_fields_idx,\n }),\n }\n }\n };\n Ok(processed_info)\n })\n .collect::>>()?;\n\n let node_schemas: HashMap, Arc> =\n node_schema_builders\n .into_iter()\n .map(|(graph_elem_type, schema_builder)| {\n Ok((graph_elem_type, Arc::new(schema_builder.build_schema()?)))\n })\n .collect::>()?;\n\n // 3. Build output\n let analyzed_data_colls: Vec =\n std::iter::zip(data_collection_processed_info, graph_elem_types.iter())\n .map(|(processed_info, graph_elem_type)| {\n let result = match processed_info.rel_specific {\n // Node\n None => AnalyzedDataCollection {\n schema: node_schemas\n .get(graph_elem_type)\n .ok_or_else(invariance_violation)?\n .clone(),\n value_fields_input_idx: processed_info.value_input_fields_idx,\n rel: None,\n },\n // Relationship\n Some(rel_info) => AnalyzedDataCollection {\n schema: Arc::new(rel_info.rel_schema),\n value_fields_input_idx: processed_info.value_input_fields_idx,\n rel: Some(AnalyzedRelationshipInfo {\n source: AnalyzedGraphElementFieldMapping {\n schema: node_schemas\n .get(&rel_info.source_typ)\n .ok_or_else(invariance_violation)?\n .clone(),\n fields_input_idx: rel_info.source_fields_idx,\n },\n target: AnalyzedGraphElementFieldMapping {\n schema: node_schemas\n .get(&rel_info.target_typ)\n .ok_or_else(invariance_violation)?\n .clone(),\n fields_input_idx: rel_info.target_fields_idx,\n },\n }),\n },\n };\n Ok(result)\n })\n .collect::>()?;\n let decl_schemas: Vec> = decl_graph_elem_types\n .iter()\n .map(|graph_elem_type| {\n Ok(node_schemas\n .get(graph_elem_type)\n .ok_or_else(invariance_violation)?\n .clone())\n })\n .collect::>()?;\n Ok((analyzed_data_colls, decl_schemas))\n}\n"], ["/cocoindex/src/ops/interface.rs", "use std::time::SystemTime;\n\nuse crate::base::{schema::*, spec::IndexOptions, value::*};\nuse crate::prelude::*;\nuse crate::setup;\nuse chrono::TimeZone;\nuse serde::Serialize;\n\npub struct FlowInstanceContext {\n pub flow_instance_name: String,\n pub auth_registry: Arc,\n pub py_exec_ctx: Option>,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Default)]\npub struct Ordinal(pub Option);\n\nimpl Ordinal {\n pub fn unavailable() -> Self {\n Self(None)\n }\n\n pub fn is_available(&self) -> bool {\n self.0.is_some()\n }\n}\n\nimpl From for Option {\n fn from(val: Ordinal) -> Self {\n val.0\n }\n}\n\nimpl TryFrom for Ordinal {\n type Error = anyhow::Error;\n\n fn try_from(time: SystemTime) -> Result {\n let duration = time.duration_since(std::time::UNIX_EPOCH)?;\n Ok(Ordinal(Some(duration.as_micros().try_into()?)))\n }\n}\n\nimpl TryFrom> for Ordinal {\n type Error = anyhow::Error;\n\n fn try_from(time: chrono::DateTime) -> Result {\n Ok(Ordinal(Some(time.timestamp_micros())))\n }\n}\n\npub struct PartialSourceRowMetadata {\n pub key: KeyValue,\n pub ordinal: Option,\n}\n\n#[derive(Debug)]\npub enum SourceValue {\n Existence(FieldValues),\n NonExistence,\n}\n\nimpl SourceValue {\n pub fn is_existent(&self) -> bool {\n matches!(self, Self::Existence(_))\n }\n\n pub fn as_optional(&self) -> Option<&FieldValues> {\n match self {\n Self::Existence(value) => Some(value),\n Self::NonExistence => None,\n }\n }\n\n pub fn into_optional(self) -> Option {\n match self {\n Self::Existence(value) => Some(value),\n Self::NonExistence => None,\n }\n }\n}\n\npub struct SourceData {\n pub value: SourceValue,\n pub ordinal: Ordinal,\n}\n\npub struct SourceChange {\n pub key: KeyValue,\n\n /// If None, the engine will poll to get the latest existence state and value.\n pub data: Option,\n}\n\npub struct SourceChangeMessage {\n pub changes: Vec,\n pub ack_fn: Option BoxFuture<'static, Result<()>> + Send + Sync>>,\n}\n\n#[derive(Debug, Default)]\npub struct SourceExecutorListOptions {\n pub include_ordinal: bool,\n}\n\n#[derive(Debug, Default)]\npub struct SourceExecutorGetOptions {\n pub include_ordinal: bool,\n pub include_value: bool,\n}\n\n#[derive(Debug)]\npub struct PartialSourceRowData {\n pub value: Option,\n pub ordinal: Option,\n}\n\nimpl TryFrom for SourceData {\n type Error = anyhow::Error;\n\n fn try_from(data: PartialSourceRowData) -> Result {\n Ok(Self {\n value: data\n .value\n .ok_or_else(|| anyhow::anyhow!(\"value is missing\"))?,\n ordinal: data\n .ordinal\n .ok_or_else(|| anyhow::anyhow!(\"ordinal is missing\"))?,\n })\n }\n}\n#[async_trait]\npub trait SourceExecutor: Send + Sync {\n /// Get the list of keys for the source.\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>>;\n\n // Get the value for the given key.\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result;\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n Ok(None)\n }\n}\n\n#[async_trait]\npub trait SourceFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )>;\n}\n\n#[async_trait]\npub trait SimpleFunctionExecutor: Send + Sync {\n /// Evaluate the operation.\n async fn evaluate(&self, args: Vec) -> Result;\n\n fn enable_cache(&self) -> bool {\n false\n }\n\n /// Must be Some if `enable_cache` is true.\n /// If it changes, the cache will be invalidated.\n fn behavior_version(&self) -> Option {\n None\n }\n}\n\n#[async_trait]\npub trait SimpleFunctionFactory {\n async fn build(\n self: Arc,\n spec: serde_json::Value,\n input_schema: Vec,\n context: Arc,\n ) -> Result<(\n EnrichedValueType,\n BoxFuture<'static, Result>>,\n )>;\n}\n\n#[derive(Debug)]\npub struct ExportTargetUpsertEntry {\n pub key: KeyValue,\n pub additional_key: serde_json::Value,\n pub value: FieldValues,\n}\n\n#[derive(Debug)]\npub struct ExportTargetDeleteEntry {\n pub key: KeyValue,\n pub additional_key: serde_json::Value,\n}\n\n#[derive(Debug, Default)]\npub struct ExportTargetMutation {\n pub upserts: Vec,\n pub deletes: Vec,\n}\n\nimpl ExportTargetMutation {\n pub fn is_empty(&self) -> bool {\n self.upserts.is_empty() && self.deletes.is_empty()\n }\n}\n\n#[derive(Debug)]\npub struct ExportTargetMutationWithContext<'ctx, T: ?Sized + Send + Sync> {\n pub mutation: ExportTargetMutation,\n pub export_context: &'ctx T,\n}\n\npub struct ResourceSetupChangeItem<'a> {\n pub key: &'a serde_json::Value,\n pub setup_status: &'a dyn setup::ResourceSetupStatus,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum SetupStateCompatibility {\n /// The resource is fully compatible with the desired state.\n /// This means the resource can be updated to the desired state without any loss of data.\n Compatible,\n /// The resource is partially compatible with the desired state.\n /// This means data from some existing fields will be lost after applying the setup change.\n /// But at least their key fields of all rows are still preserved.\n PartialCompatible,\n /// The resource needs to be rebuilt. After applying the setup change, all data will be gone.\n NotCompatible,\n}\n\npub struct ExportDataCollectionBuildOutput {\n pub export_context: BoxFuture<'static, Result>>,\n pub setup_key: serde_json::Value,\n pub desired_setup_state: serde_json::Value,\n}\n\npub struct ExportDataCollectionSpec {\n pub name: String,\n pub spec: serde_json::Value,\n pub key_fields_schema: Vec,\n pub value_fields_schema: Vec,\n pub index_options: IndexOptions,\n}\n\n#[async_trait]\npub trait ExportTargetFactory: Send + Sync {\n async fn build(\n self: Arc,\n data_collections: Vec,\n declarations: Vec,\n context: Arc,\n ) -> Result<(\n Vec,\n Vec<(serde_json::Value, serde_json::Value)>,\n )>;\n\n /// Will not be called if it's setup by user.\n /// It returns an error if the target only supports setup by user.\n async fn check_setup_status(\n &self,\n key: &serde_json::Value,\n desired_state: Option,\n existing_states: setup::CombinedState,\n context: Arc,\n ) -> Result>;\n\n /// Normalize the key. e.g. the JSON format may change (after code change, e.g. new optional field or field ordering), even if the underlying value is not changed.\n /// This should always return the canonical serialized form.\n fn normalize_setup_key(&self, key: &serde_json::Value) -> Result;\n\n fn check_state_compatibility(\n &self,\n desired_state: &serde_json::Value,\n existing_state: &serde_json::Value,\n ) -> Result;\n\n fn describe_resource(&self, key: &serde_json::Value) -> Result;\n\n fn extract_additional_key(\n &self,\n key: &KeyValue,\n value: &FieldValues,\n export_context: &(dyn Any + Send + Sync),\n ) -> Result;\n\n async fn apply_mutation(\n &self,\n mutations: Vec>,\n ) -> Result<()>;\n\n async fn apply_setup_changes(\n &self,\n setup_status: Vec>,\n context: Arc,\n ) -> Result<()>;\n}\n\n#[derive(Clone)]\npub enum ExecutorFactory {\n Source(Arc),\n SimpleFunction(Arc),\n ExportTarget(Arc),\n}\n"], ["/cocoindex/src/builder/exec_ctx.rs", "use crate::prelude::*;\n\nuse crate::execution::db_tracking_setup;\nuse crate::ops::get_executor_factory;\nuse crate::ops::interface::SetupStateCompatibility;\n\npub struct ImportOpExecutionContext {\n pub source_id: i32,\n}\n\npub struct ExportOpExecutionContext {\n pub target_id: i32,\n}\n\npub struct FlowSetupExecutionContext {\n pub setup_state: setup::FlowSetupState,\n pub import_ops: Vec,\n pub export_ops: Vec,\n}\n\npub struct AnalyzedTargetSetupState {\n pub target_kind: String,\n pub setup_key: serde_json::Value,\n pub desired_setup_state: serde_json::Value,\n pub setup_by_user: bool,\n}\n\npub struct AnalyzedSetupState {\n pub targets: Vec,\n pub declarations: Vec,\n}\n\nfn build_import_op_exec_ctx(\n import_field_name: &spec::FieldName,\n import_op_output_type: &schema::EnrichedValueType,\n existing_source_states: Option<&Vec<&setup::SourceSetupState>>,\n metadata: &mut setup::FlowSetupMetadata,\n) -> Result {\n let key_schema_no_attrs = import_op_output_type\n .typ\n .key_type()\n .ok_or_else(|| api_error!(\"Source must produce a type with key\"))?\n .typ\n .without_attrs();\n\n let existing_source_ids = existing_source_states\n .iter()\n .flat_map(|v| v.iter())\n .filter_map(|state| {\n if state.key_schema == key_schema_no_attrs {\n Some(state.source_id)\n } else {\n None\n }\n })\n .collect::>();\n let source_id = if existing_source_ids.len() == 1 {\n existing_source_ids.into_iter().next().unwrap()\n } else {\n if existing_source_ids.len() > 1 {\n warn!(\"Multiple source states with the same key schema found\");\n }\n metadata.last_source_id += 1;\n metadata.last_source_id\n };\n metadata.sources.insert(\n import_field_name.clone(),\n setup::SourceSetupState {\n source_id,\n key_schema: key_schema_no_attrs,\n },\n );\n Ok(ImportOpExecutionContext { source_id })\n}\n\nfn build_target_id(\n analyzed_target_ss: &AnalyzedTargetSetupState,\n existing_target_states: &HashMap<&setup::ResourceIdentifier, Vec<&setup::TargetSetupState>>,\n flow_setup_state: &mut setup::FlowSetupState,\n) -> Result {\n let interface::ExecutorFactory::ExportTarget(target_factory) =\n get_executor_factory(&analyzed_target_ss.target_kind)?\n else {\n api_bail!(\n \"`{}` is not a export target op\",\n analyzed_target_ss.target_kind\n )\n };\n\n let resource_id = setup::ResourceIdentifier {\n key: analyzed_target_ss.setup_key.clone(),\n target_kind: analyzed_target_ss.target_kind.clone(),\n };\n let existing_target_states = existing_target_states.get(&resource_id);\n let mut compatible_target_ids = HashSet::>::new();\n let mut reusable_schema_version_ids = HashSet::>::new();\n for existing_state in existing_target_states.iter().flat_map(|v| v.iter()) {\n let compatibility =\n if analyzed_target_ss.setup_by_user == existing_state.common.setup_by_user {\n target_factory.check_state_compatibility(\n &analyzed_target_ss.desired_setup_state,\n &existing_state.state,\n )?\n } else {\n SetupStateCompatibility::NotCompatible\n };\n let compatible_target_id = if compatibility != SetupStateCompatibility::NotCompatible {\n reusable_schema_version_ids.insert(\n (compatibility == SetupStateCompatibility::Compatible)\n .then_some(existing_state.common.schema_version_id),\n );\n Some(existing_state.common.target_id)\n } else {\n None\n };\n compatible_target_ids.insert(compatible_target_id);\n }\n\n let target_id = if compatible_target_ids.len() == 1 {\n compatible_target_ids.into_iter().next().flatten()\n } else {\n if compatible_target_ids.len() > 1 {\n warn!(\"Multiple target states with the same key schema found\");\n }\n None\n };\n let target_id = target_id.unwrap_or_else(|| {\n flow_setup_state.metadata.last_target_id += 1;\n flow_setup_state.metadata.last_target_id\n });\n let max_schema_version_id = existing_target_states\n .iter()\n .flat_map(|v| v.iter())\n .map(|s| s.common.max_schema_version_id)\n .max()\n .unwrap_or(0);\n let schema_version_id = if reusable_schema_version_ids.len() == 1 {\n reusable_schema_version_ids\n .into_iter()\n .next()\n .unwrap()\n .unwrap_or(max_schema_version_id + 1)\n } else {\n max_schema_version_id + 1\n };\n match flow_setup_state.targets.entry(resource_id) {\n indexmap::map::Entry::Occupied(entry) => {\n api_bail!(\n \"Target resource already exists: kind = {}, key = {}\",\n entry.key().target_kind,\n entry.key().key\n );\n }\n indexmap::map::Entry::Vacant(entry) => {\n entry.insert(setup::TargetSetupState {\n common: setup::TargetSetupStateCommon {\n target_id,\n schema_version_id,\n max_schema_version_id: max_schema_version_id.max(schema_version_id),\n setup_by_user: analyzed_target_ss.setup_by_user,\n },\n state: analyzed_target_ss.desired_setup_state.clone(),\n });\n }\n }\n Ok(target_id)\n}\n\npub fn build_flow_setup_execution_context(\n flow_inst: &spec::FlowInstanceSpec,\n data_schema: &schema::FlowSchema,\n analyzed_ss: &AnalyzedSetupState,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n) -> Result {\n let existing_metadata_versions = || {\n existing_flow_ss\n .iter()\n .flat_map(|flow_ss| flow_ss.metadata.possible_versions())\n };\n\n let mut source_states_by_name = HashMap::<&str, Vec<&setup::SourceSetupState>>::new();\n for metadata_version in existing_metadata_versions() {\n for (source_name, state) in metadata_version.sources.iter() {\n source_states_by_name\n .entry(source_name.as_str())\n .or_default()\n .push(state);\n }\n }\n\n let mut target_states_by_name_type =\n HashMap::<&setup::ResourceIdentifier, Vec<&setup::TargetSetupState>>::new();\n for metadata_version in existing_flow_ss.iter() {\n for (resource_id, target) in metadata_version.targets.iter() {\n target_states_by_name_type\n .entry(resource_id)\n .or_default()\n .extend(target.possible_versions());\n }\n }\n\n let mut setup_state = setup::FlowSetupState:: {\n seen_flow_metadata_version: existing_flow_ss\n .and_then(|flow_ss| flow_ss.seen_flow_metadata_version),\n metadata: setup::FlowSetupMetadata {\n last_source_id: existing_metadata_versions()\n .map(|metadata| metadata.last_source_id)\n .max()\n .unwrap_or(0),\n last_target_id: existing_metadata_versions()\n .map(|metadata| metadata.last_target_id)\n .max()\n .unwrap_or(0),\n sources: BTreeMap::new(),\n },\n tracking_table: db_tracking_setup::TrackingTableSetupState {\n table_name: existing_flow_ss\n .and_then(|flow_ss| {\n flow_ss\n .tracking_table\n .current\n .as_ref()\n .map(|v| v.table_name.clone())\n })\n .unwrap_or_else(|| db_tracking_setup::default_tracking_table_name(&flow_inst.name)),\n version_id: db_tracking_setup::CURRENT_TRACKING_TABLE_VERSION,\n },\n targets: IndexMap::new(),\n };\n\n let import_op_exec_ctx = flow_inst\n .import_ops\n .iter()\n .map(|import_op| {\n let output_type = data_schema\n .root_op_scope\n .op_output_types\n .get(&import_op.name)\n .ok_or_else(invariance_violation)?;\n build_import_op_exec_ctx(\n &import_op.name,\n output_type,\n source_states_by_name.get(&import_op.name.as_str()),\n &mut setup_state.metadata,\n )\n })\n .collect::>>()?;\n\n let export_op_exec_ctx = analyzed_ss\n .targets\n .iter()\n .map(|analyzed_target_ss| {\n let target_id = build_target_id(\n analyzed_target_ss,\n &target_states_by_name_type,\n &mut setup_state,\n )?;\n Ok(ExportOpExecutionContext { target_id })\n })\n .collect::>>()?;\n\n for analyzed_target_ss in analyzed_ss.declarations.iter() {\n build_target_id(\n analyzed_target_ss,\n &target_states_by_name_type,\n &mut setup_state,\n )?;\n }\n\n Ok(FlowSetupExecutionContext {\n setup_state,\n import_ops: import_op_exec_ctx,\n export_ops: export_op_exec_ctx,\n })\n}\n"], ["/cocoindex/src/execution/evaluator.rs", "use crate::prelude::*;\n\nuse anyhow::{Context, Ok};\nuse futures::future::try_join_all;\n\nuse crate::base::value::EstimatedByteSize;\nuse crate::builder::{AnalyzedTransientFlow, plan::*};\nuse crate::py::IntoPyResult;\nuse crate::{\n base::{schema, value},\n utils::immutable::RefList,\n};\n\nuse super::memoization::{EvaluationMemory, EvaluationMemoryOptions, evaluate_with_cell};\n\n#[derive(Debug)]\npub struct ScopeValueBuilder {\n // TODO: Share the same lock for values produced in the same execution scope, for stricter atomicity.\n pub fields: Vec>>,\n}\n\nimpl value::EstimatedByteSize for ScopeValueBuilder {\n fn estimated_detached_byte_size(&self) -> usize {\n self.fields\n .iter()\n .map(|f| f.get().map_or(0, |v| v.estimated_byte_size()))\n .sum()\n }\n}\n\nimpl From<&ScopeValueBuilder> for value::ScopeValue {\n fn from(val: &ScopeValueBuilder) -> Self {\n value::ScopeValue(value::FieldValues {\n fields: val\n .fields\n .iter()\n .map(|f| value::Value::from_alternative_ref(f.get().unwrap()))\n .collect(),\n })\n }\n}\n\nimpl From for value::ScopeValue {\n fn from(val: ScopeValueBuilder) -> Self {\n value::ScopeValue(value::FieldValues {\n fields: val\n .fields\n .into_iter()\n .map(|f| value::Value::from_alternative(f.into_inner().unwrap()))\n .collect(),\n })\n }\n}\n\nimpl ScopeValueBuilder {\n fn new(num_fields: usize) -> Self {\n let mut fields = Vec::with_capacity(num_fields);\n fields.resize_with(num_fields, OnceLock::new);\n Self { fields }\n }\n\n fn augmented_from(source: &value::ScopeValue, schema: &schema::TableSchema) -> Result {\n let val_index_base = if schema.has_key() { 1 } else { 0 };\n let len = schema.row.fields.len() - val_index_base;\n\n let mut builder = Self::new(len);\n\n let value::ScopeValue(source_fields) = source;\n for ((v, t), r) in source_fields\n .fields\n .iter()\n .zip(schema.row.fields[val_index_base..(val_index_base + len)].iter())\n .zip(&mut builder.fields)\n {\n r.set(augmented_value(v, &t.value_type.typ)?)\n .into_py_result()?;\n }\n Ok(builder)\n }\n}\n\nfn augmented_value(\n val: &value::Value,\n val_type: &schema::ValueType,\n) -> Result> {\n let value = match (val, val_type) {\n (value::Value::Null, _) => value::Value::Null,\n (value::Value::Basic(v), _) => value::Value::Basic(v.clone()),\n (value::Value::Struct(v), schema::ValueType::Struct(t)) => {\n value::Value::Struct(value::FieldValues {\n fields: v\n .fields\n .iter()\n .enumerate()\n .map(|(i, v)| augmented_value(v, &t.fields[i].value_type.typ))\n .collect::>>()?,\n })\n }\n (value::Value::UTable(v), schema::ValueType::Table(t)) => value::Value::UTable(\n v.iter()\n .map(|v| ScopeValueBuilder::augmented_from(v, t))\n .collect::>>()?,\n ),\n (value::Value::KTable(v), schema::ValueType::Table(t)) => value::Value::KTable(\n v.iter()\n .map(|(k, v)| Ok((k.clone(), ScopeValueBuilder::augmented_from(v, t)?)))\n .collect::>>()?,\n ),\n (value::Value::LTable(v), schema::ValueType::Table(t)) => value::Value::LTable(\n v.iter()\n .map(|v| ScopeValueBuilder::augmented_from(v, t))\n .collect::>>()?,\n ),\n (val, _) => bail!(\"Value kind doesn't match the type {val_type}: {val:?}\"),\n };\n Ok(value)\n}\n\nenum ScopeKey<'a> {\n /// For root struct and UTable.\n None,\n /// For KTable row.\n MapKey(&'a value::KeyValue),\n /// For LTable row.\n ListIndex(usize),\n}\n\nimpl<'a> ScopeKey<'a> {\n pub fn key(&self) -> Option> {\n match self {\n ScopeKey::None => None,\n ScopeKey::MapKey(k) => Some(Cow::Borrowed(k)),\n ScopeKey::ListIndex(i) => Some(Cow::Owned(value::KeyValue::Int64(*i as i64))),\n }\n }\n\n pub fn value_field_index_base(&self) -> u32 {\n match *self {\n ScopeKey::None => 0,\n ScopeKey::MapKey(_) => 1,\n ScopeKey::ListIndex(_) => 0,\n }\n }\n}\n\nimpl std::fmt::Display for ScopeKey<'_> {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ScopeKey::None => write!(f, \"()\"),\n ScopeKey::MapKey(k) => write!(f, \"{{{k}}}\"),\n ScopeKey::ListIndex(i) => write!(f, \"[{i}]\"),\n }\n }\n}\n\nstruct ScopeEntry<'a> {\n key: ScopeKey<'a>,\n value: &'a ScopeValueBuilder,\n schema: &'a schema::StructSchema,\n collected_values: Vec>>,\n}\n\nimpl<'a> ScopeEntry<'a> {\n fn new(\n key: ScopeKey<'a>,\n value: &'a ScopeValueBuilder,\n schema: &'a schema::StructSchema,\n analyzed_op_scope: &AnalyzedOpScope,\n ) -> Self {\n let mut collected_values = Vec::with_capacity(analyzed_op_scope.collector_len);\n collected_values.resize_with(analyzed_op_scope.collector_len, Default::default);\n\n Self {\n key,\n value,\n schema,\n collected_values,\n }\n }\n\n fn get_local_field_schema<'b>(\n schema: &'b schema::StructSchema,\n indices: &[u32],\n ) -> Result<&'b schema::FieldSchema> {\n let field_idx = indices[0] as usize;\n let field_schema = &schema.fields[field_idx];\n let result = if indices.len() == 1 {\n field_schema\n } else {\n let struct_field_schema = match &field_schema.value_type.typ {\n schema::ValueType::Struct(s) => s,\n _ => bail!(\"Expect struct field\"),\n };\n Self::get_local_field_schema(struct_field_schema, &indices[1..])?\n };\n Ok(result)\n }\n\n fn get_local_key_field<'b>(\n key_val: &'b value::KeyValue,\n indices: &'_ [u32],\n ) -> &'b value::KeyValue {\n if indices.is_empty() {\n key_val\n } else if let value::KeyValue::Struct(fields) = key_val {\n Self::get_local_key_field(&fields[indices[0] as usize], &indices[1..])\n } else {\n panic!(\"Only struct can be accessed by sub field\");\n }\n }\n\n fn get_local_field<'b>(\n val: &'b value::Value,\n indices: &'_ [u32],\n ) -> &'b value::Value {\n if indices.is_empty() {\n val\n } else if let value::Value::Struct(fields) = val {\n Self::get_local_field(&fields.fields[indices[0] as usize], &indices[1..])\n } else {\n panic!(\"Only struct can be accessed by sub field\");\n }\n }\n\n fn get_value_field_builder(\n &self,\n field_ref: &AnalyzedLocalFieldReference,\n ) -> &value::Value {\n let first_index = field_ref.fields_idx[0];\n let index_base = self.key.value_field_index_base();\n let val = self.value.fields[(first_index - index_base) as usize]\n .get()\n .unwrap();\n Self::get_local_field(val, &field_ref.fields_idx[1..])\n }\n\n fn get_field(&self, field_ref: &AnalyzedLocalFieldReference) -> value::Value {\n let first_index = field_ref.fields_idx[0];\n let index_base = self.key.value_field_index_base();\n if first_index < index_base {\n let key_val = self.key.key().unwrap().into_owned();\n let key_part = Self::get_local_key_field(&key_val, &field_ref.fields_idx[1..]);\n key_part.clone().into()\n } else {\n let val = self.value.fields[(first_index - index_base) as usize]\n .get()\n .unwrap();\n let val_part = Self::get_local_field(val, &field_ref.fields_idx[1..]);\n value::Value::from_alternative_ref(val_part)\n }\n }\n\n fn get_field_schema(\n &self,\n field_ref: &AnalyzedLocalFieldReference,\n ) -> Result<&schema::FieldSchema> {\n Ok(Self::get_local_field_schema(\n self.schema,\n &field_ref.fields_idx,\n )?)\n }\n\n fn define_field_w_builder(\n &self,\n output_field: &AnalyzedOpOutput,\n val: value::Value,\n ) -> Result<()> {\n let field_index = output_field.field_idx as usize;\n let index_base = self.key.value_field_index_base() as usize;\n self.value.fields[field_index - index_base].set(val).map_err(|_| {\n anyhow!(\"Field {field_index} for scope is already set, violating single-definition rule.\")\n })?;\n Ok(())\n }\n\n fn define_field(&self, output_field: &AnalyzedOpOutput, val: &value::Value) -> Result<()> {\n let field_index = output_field.field_idx as usize;\n let field_schema = &self.schema.fields[field_index];\n let val = augmented_value(val, &field_schema.value_type.typ)?;\n self.define_field_w_builder(output_field, val)?;\n Ok(())\n }\n}\n\nfn assemble_value(\n value_mapping: &AnalyzedValueMapping,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n) -> value::Value {\n match value_mapping {\n AnalyzedValueMapping::Constant { value } => value.clone(),\n AnalyzedValueMapping::Field(field_ref) => scoped_entries\n .headn(field_ref.scope_up_level as usize)\n .unwrap()\n .get_field(&field_ref.local),\n AnalyzedValueMapping::Struct(mapping) => {\n let fields = mapping\n .fields\n .iter()\n .map(|f| assemble_value(f, scoped_entries))\n .collect();\n value::Value::Struct(value::FieldValues { fields })\n }\n }\n}\n\nfn assemble_input_values<'a>(\n value_mappings: &'a [AnalyzedValueMapping],\n scoped_entries: RefList<'a, &ScopeEntry<'a>>,\n) -> impl Iterator + 'a {\n value_mappings\n .iter()\n .map(move |value_mapping| assemble_value(value_mapping, scoped_entries))\n}\n\nasync fn evaluate_child_op_scope(\n op_scope: &AnalyzedOpScope,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n child_scope_entry: ScopeEntry<'_>,\n concurrency_controller: &concur_control::ConcurrencyController,\n memory: &EvaluationMemory,\n) -> Result<()> {\n let _permit = concurrency_controller\n .acquire(Some(|| {\n child_scope_entry\n .value\n .fields\n .iter()\n .map(|f| f.get().map_or(0, |v| v.estimated_byte_size()))\n .sum()\n }))\n .await?;\n evaluate_op_scope(op_scope, scoped_entries.prepend(&child_scope_entry), memory)\n .await\n .with_context(|| {\n format!(\n \"Evaluating in scope with key {}\",\n match child_scope_entry.key.key() {\n Some(k) => k.to_string(),\n None => \"()\".to_string(),\n }\n )\n })\n}\n\nasync fn evaluate_op_scope(\n op_scope: &AnalyzedOpScope,\n scoped_entries: RefList<'_, &ScopeEntry<'_>>,\n memory: &EvaluationMemory,\n) -> Result<()> {\n let head_scope = *scoped_entries.head().unwrap();\n for reactive_op in op_scope.reactive_ops.iter() {\n match reactive_op {\n AnalyzedReactiveOp::Transform(op) => {\n let mut input_values = Vec::with_capacity(op.inputs.len());\n input_values\n .extend(assemble_input_values(&op.inputs, scoped_entries).collect::>());\n let output_value_cell = memory.get_cache_entry(\n || {\n Ok(op\n .function_exec_info\n .fingerprinter\n .clone()\n .with(&input_values)?\n .into_fingerprint())\n },\n &op.function_exec_info.output_type,\n /*ttl=*/ None,\n )?;\n let output_value = evaluate_with_cell(output_value_cell.as_ref(), move || {\n op.executor.evaluate(input_values)\n })\n .await\n .with_context(|| format!(\"Evaluating Transform op `{}`\", op.name,))?;\n head_scope.define_field(&op.output, &output_value)?;\n }\n\n AnalyzedReactiveOp::ForEach(op) => {\n let target_field_schema = head_scope.get_field_schema(&op.local_field_ref)?;\n let table_schema = match &target_field_schema.value_type.typ {\n schema::ValueType::Table(cs) => cs,\n _ => bail!(\"Expect target field to be a table\"),\n };\n\n let target_field = head_scope.get_value_field_builder(&op.local_field_ref);\n let task_futs = match target_field {\n value::Value::UTable(v) => v\n .iter()\n .map(|item| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::None,\n item,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n value::Value::KTable(v) => v\n .iter()\n .map(|(k, v)| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::MapKey(k),\n v,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n value::Value::LTable(v) => v\n .iter()\n .enumerate()\n .map(|(i, item)| {\n evaluate_child_op_scope(\n &op.op_scope,\n scoped_entries,\n ScopeEntry::new(\n ScopeKey::ListIndex(i),\n item,\n &table_schema.row,\n &op.op_scope,\n ),\n &op.concurrency_controller,\n memory,\n )\n })\n .collect::>(),\n _ => {\n bail!(\"Target field type is expected to be a table\");\n }\n };\n try_join_all(task_futs)\n .await\n .with_context(|| format!(\"Evaluating ForEach op `{}`\", op.name,))?;\n }\n\n AnalyzedReactiveOp::Collect(op) => {\n let mut field_values = Vec::with_capacity(\n op.input.fields.len() + if op.has_auto_uuid_field { 1 } else { 0 },\n );\n let field_values_iter = assemble_input_values(&op.input.fields, scoped_entries);\n if op.has_auto_uuid_field {\n field_values.push(value::Value::Null);\n field_values.extend(field_values_iter);\n let uuid = memory.next_uuid(\n op.fingerprinter\n .clone()\n .with(&field_values[1..])?\n .into_fingerprint(),\n )?;\n field_values[0] = value::Value::Basic(value::BasicValue::Uuid(uuid));\n } else {\n field_values.extend(field_values_iter);\n };\n let collector_entry = scoped_entries\n .headn(op.collector_ref.scope_up_level as usize)\n .ok_or_else(|| anyhow::anyhow!(\"Collector level out of bound\"))?;\n {\n let mut collected_records = collector_entry.collected_values\n [op.collector_ref.local.collector_idx as usize]\n .lock()\n .unwrap();\n collected_records.push(value::FieldValues {\n fields: field_values,\n });\n }\n }\n }\n }\n Ok(())\n}\n\npub struct SourceRowEvaluationContext<'a> {\n pub plan: &'a ExecutionPlan,\n pub import_op: &'a AnalyzedImportOp,\n pub schema: &'a schema::FlowSchema,\n pub key: &'a value::KeyValue,\n pub import_op_idx: usize,\n}\n\n#[derive(Debug)]\npub struct EvaluateSourceEntryOutput {\n pub data_scope: ScopeValueBuilder,\n pub collected_values: Vec>,\n}\n\npub async fn evaluate_source_entry(\n src_eval_ctx: &SourceRowEvaluationContext<'_>,\n source_value: value::FieldValues,\n memory: &EvaluationMemory,\n) -> Result {\n let _permit = src_eval_ctx\n .import_op\n .concurrency_controller\n .acquire_bytes_with_reservation(|| source_value.estimated_byte_size())\n .await?;\n let root_schema = &src_eval_ctx.schema.schema;\n let root_scope_value = ScopeValueBuilder::new(root_schema.fields.len());\n let root_scope_entry = ScopeEntry::new(\n ScopeKey::None,\n &root_scope_value,\n root_schema,\n &src_eval_ctx.plan.op_scope,\n );\n\n let table_schema = match &root_schema.fields[src_eval_ctx.import_op.output.field_idx as usize]\n .value_type\n .typ\n {\n schema::ValueType::Table(cs) => cs,\n _ => {\n bail!(\"Expect source output to be a table\")\n }\n };\n\n let scope_value =\n ScopeValueBuilder::augmented_from(&value::ScopeValue(source_value), table_schema)?;\n root_scope_entry.define_field_w_builder(\n &src_eval_ctx.import_op.output,\n value::Value::KTable(BTreeMap::from([(src_eval_ctx.key.clone(), scope_value)])),\n )?;\n\n evaluate_op_scope(\n &src_eval_ctx.plan.op_scope,\n RefList::Nil.prepend(&root_scope_entry),\n memory,\n )\n .await?;\n let collected_values = root_scope_entry\n .collected_values\n .into_iter()\n .map(|v| v.into_inner().unwrap())\n .collect::>();\n Ok(EvaluateSourceEntryOutput {\n data_scope: root_scope_value,\n collected_values,\n })\n}\n\npub async fn evaluate_transient_flow(\n flow: &AnalyzedTransientFlow,\n input_values: &Vec,\n) -> Result {\n let root_schema = &flow.data_schema.schema;\n let root_scope_value = ScopeValueBuilder::new(root_schema.fields.len());\n let root_scope_entry = ScopeEntry::new(\n ScopeKey::None,\n &root_scope_value,\n root_schema,\n &flow.execution_plan.op_scope,\n );\n\n if input_values.len() != flow.execution_plan.input_fields.len() {\n bail!(\n \"Input values length mismatch: expect {}, got {}\",\n flow.execution_plan.input_fields.len(),\n input_values.len()\n );\n }\n for (field, value) in flow.execution_plan.input_fields.iter().zip(input_values) {\n root_scope_entry.define_field(field, value)?;\n }\n let eval_memory = EvaluationMemory::new(\n chrono::Utc::now(),\n None,\n EvaluationMemoryOptions {\n enable_cache: false,\n evaluation_only: true,\n },\n );\n evaluate_op_scope(\n &flow.execution_plan.op_scope,\n RefList::Nil.prepend(&root_scope_entry),\n &eval_memory,\n )\n .await?;\n let output_value = assemble_value(\n &flow.execution_plan.output_value,\n RefList::Nil.prepend(&root_scope_entry),\n );\n Ok(output_value)\n}\n"], ["/cocoindex/src/base/schema.rs", "use crate::prelude::*;\n\nuse super::spec::*;\nuse crate::builder::plan::AnalyzedValueMapping;\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct VectorTypeSchema {\n pub element_type: Box,\n pub dimension: Option,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct UnionTypeSchema {\n pub types: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(tag = \"kind\")]\npub enum BasicValueType {\n /// A sequence of bytes in binary.\n Bytes,\n\n /// String encoded in UTF-8.\n Str,\n\n /// A boolean value.\n Bool,\n\n /// 64-bit integer.\n Int64,\n\n /// 32-bit floating point number.\n Float32,\n\n /// 64-bit floating point number.\n Float64,\n\n /// A range, with a start offset and a length.\n Range,\n\n /// A UUID.\n Uuid,\n\n /// Date (without time within the current day).\n Date,\n\n /// Time of the day.\n Time,\n\n /// Local date and time, without timezone.\n LocalDateTime,\n\n /// Date and time with timezone.\n OffsetDateTime,\n\n /// A time duration.\n TimeDelta,\n\n /// A JSON value.\n Json,\n\n /// A vector of values (usually numbers, for embeddings).\n Vector(VectorTypeSchema),\n\n /// A union\n Union(UnionTypeSchema),\n}\n\nimpl std::fmt::Display for BasicValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n BasicValueType::Bytes => write!(f, \"Bytes\"),\n BasicValueType::Str => write!(f, \"Str\"),\n BasicValueType::Bool => write!(f, \"Bool\"),\n BasicValueType::Int64 => write!(f, \"Int64\"),\n BasicValueType::Float32 => write!(f, \"Float32\"),\n BasicValueType::Float64 => write!(f, \"Float64\"),\n BasicValueType::Range => write!(f, \"Range\"),\n BasicValueType::Uuid => write!(f, \"Uuid\"),\n BasicValueType::Date => write!(f, \"Date\"),\n BasicValueType::Time => write!(f, \"Time\"),\n BasicValueType::LocalDateTime => write!(f, \"LocalDateTime\"),\n BasicValueType::OffsetDateTime => write!(f, \"OffsetDateTime\"),\n BasicValueType::TimeDelta => write!(f, \"TimeDelta\"),\n BasicValueType::Json => write!(f, \"Json\"),\n BasicValueType::Vector(s) => {\n write!(f, \"Vector[{}\", s.element_type)?;\n if let Some(dimension) = s.dimension {\n write!(f, \", {dimension}\")?;\n }\n write!(f, \"]\")\n }\n BasicValueType::Union(s) => {\n write!(f, \"Union[\")?;\n for (i, typ) in s.types.iter().enumerate() {\n if i > 0 {\n // Add type delimiter\n write!(f, \" | \")?;\n }\n write!(f, \"{typ}\")?;\n }\n write!(f, \"]\")\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]\npub struct StructSchema {\n pub fields: Arc>,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub description: Option>,\n}\n\nimpl StructSchema {\n pub fn without_attrs(&self) -> Self {\n Self {\n fields: Arc::new(self.fields.iter().map(|f| f.without_attrs()).collect()),\n description: None,\n }\n }\n}\n\nimpl std::fmt::Display for StructSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Struct(\")?;\n for (i, field) in self.fields.iter().enumerate() {\n if i > 0 {\n write!(f, \", \")?;\n }\n write!(f, \"{field}\")?;\n }\n write!(f, \")\")\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\n#[allow(clippy::enum_variant_names)]\npub enum TableKind {\n /// An table with unordered rows, without key.\n UTable,\n /// A table's first field is the key.\n #[serde(alias = \"Table\")]\n KTable,\n /// A table whose rows orders are preserved.\n #[serde(alias = \"List\")]\n LTable,\n}\n\nimpl std::fmt::Display for TableKind {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n TableKind::UTable => write!(f, \"Table\"),\n TableKind::KTable => write!(f, \"KTable\"),\n TableKind::LTable => write!(f, \"LTable\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct TableSchema {\n pub kind: TableKind,\n pub row: StructSchema,\n}\n\nimpl TableSchema {\n pub fn has_key(&self) -> bool {\n match self.kind {\n TableKind::KTable => true,\n TableKind::UTable | TableKind::LTable => false,\n }\n }\n\n pub fn key_type(&self) -> Option<&EnrichedValueType> {\n match self.kind {\n TableKind::KTable => self\n .row\n .fields\n .first()\n .as_ref()\n .map(|field| &field.value_type),\n TableKind::UTable | TableKind::LTable => None,\n }\n }\n\n pub fn without_attrs(&self) -> Self {\n Self {\n kind: self.kind,\n row: self.row.without_attrs(),\n }\n }\n}\n\nimpl std::fmt::Display for TableSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}({})\", self.kind, self.row)\n }\n}\n\nimpl TableSchema {\n pub fn new(kind: TableKind, row: StructSchema) -> Self {\n Self { kind, row }\n }\n\n pub fn key_field(&self) -> Option<&FieldSchema> {\n match self.kind {\n TableKind::KTable => Some(self.row.fields.first().unwrap()),\n TableKind::UTable | TableKind::LTable => None,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\n#[serde(tag = \"kind\")]\npub enum ValueType {\n Struct(StructSchema),\n\n #[serde(untagged)]\n Basic(BasicValueType),\n\n #[serde(untagged)]\n Table(TableSchema),\n}\n\nimpl ValueType {\n pub fn key_type(&self) -> Option<&EnrichedValueType> {\n match self {\n ValueType::Basic(_) => None,\n ValueType::Struct(_) => None,\n ValueType::Table(c) => c.key_type(),\n }\n }\n\n // Type equality, ignoring attributes.\n pub fn without_attrs(&self) -> Self {\n match self {\n ValueType::Basic(a) => ValueType::Basic(a.clone()),\n ValueType::Struct(a) => ValueType::Struct(a.without_attrs()),\n ValueType::Table(a) => ValueType::Table(a.without_attrs()),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct EnrichedValueType {\n #[serde(rename = \"type\")]\n pub typ: DataType,\n\n #[serde(default, skip_serializing_if = \"std::ops::Not::not\")]\n pub nullable: bool,\n\n #[serde(default, skip_serializing_if = \"BTreeMap::is_empty\")]\n pub attrs: Arc>,\n}\n\nimpl EnrichedValueType {\n pub fn without_attrs(&self) -> Self {\n Self {\n typ: self.typ.without_attrs(),\n nullable: self.nullable,\n attrs: Default::default(),\n }\n }\n}\n\nimpl EnrichedValueType {\n pub fn from_alternative(\n value_type: &EnrichedValueType,\n ) -> Result\n where\n for<'a> &'a AltDataType: TryInto,\n {\n Ok(Self {\n typ: (&value_type.typ).try_into()?,\n nullable: value_type.nullable,\n attrs: value_type.attrs.clone(),\n })\n }\n\n pub fn with_attr(mut self, key: &str, value: serde_json::Value) -> Self {\n Arc::make_mut(&mut self.attrs).insert(key.to_string(), value);\n self\n }\n}\n\nimpl std::fmt::Display for EnrichedValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.typ)?;\n if self.nullable {\n write!(f, \"?\")?;\n }\n if !self.attrs.is_empty() {\n write!(\n f,\n \" [{}]\",\n self.attrs\n .iter()\n .map(|(k, v)| format!(\"{k}: {v}\"))\n .collect::>()\n .join(\", \")\n )?;\n }\n Ok(())\n }\n}\n\nimpl std::fmt::Display for ValueType {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n ValueType::Basic(b) => write!(f, \"{b}\"),\n ValueType::Struct(s) => write!(f, \"{s}\"),\n ValueType::Table(c) => write!(f, \"{c}\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]\npub struct FieldSchema {\n /// ID is used to identify the field in the schema.\n pub name: FieldName,\n\n #[serde(flatten)]\n pub value_type: EnrichedValueType,\n}\n\nimpl FieldSchema {\n pub fn new(name: impl ToString, value_type: EnrichedValueType) -> Self {\n Self {\n name: name.to_string(),\n value_type,\n }\n }\n\n pub fn without_attrs(&self) -> Self {\n Self {\n name: self.name.clone(),\n value_type: self.value_type.without_attrs(),\n }\n }\n}\n\nimpl FieldSchema {\n pub fn from_alternative(field: &FieldSchema) -> Result\n where\n for<'a> &'a AltDataType: TryInto,\n {\n Ok(Self {\n name: field.name.clone(),\n value_type: EnrichedValueType::from_alternative(&field.value_type)?,\n })\n }\n}\n\nimpl std::fmt::Display for FieldSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.name, self.value_type)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct CollectorSchema {\n pub fields: Vec,\n /// If specified, the collector will have an automatically generated UUID field with the given index.\n pub auto_uuid_field_idx: Option,\n}\n\nimpl std::fmt::Display for CollectorSchema {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Collector(\")?;\n for (i, field) in self.fields.iter().enumerate() {\n if i > 0 {\n write!(f, \", \")?;\n }\n write!(f, \"{field}\")?;\n }\n write!(f, \")\")\n }\n}\n\nimpl CollectorSchema {\n pub fn from_fields(fields: Vec, auto_uuid_field: Option) -> Self {\n let mut fields = fields;\n let auto_uuid_field_idx = if let Some(auto_uuid_field) = auto_uuid_field {\n fields.insert(\n 0,\n FieldSchema::new(\n auto_uuid_field,\n EnrichedValueType {\n typ: ValueType::Basic(BasicValueType::Uuid),\n nullable: false,\n attrs: Default::default(),\n },\n ),\n );\n Some(0)\n } else {\n None\n };\n Self {\n fields,\n auto_uuid_field_idx,\n }\n }\n pub fn without_attrs(&self) -> Self {\n Self {\n fields: self.fields.iter().map(|f| f.without_attrs()).collect(),\n auto_uuid_field_idx: self.auto_uuid_field_idx,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct OpScopeSchema {\n /// Output schema for ops with output.\n pub op_output_types: HashMap,\n\n /// Child op scope for foreach ops.\n pub op_scopes: HashMap>,\n\n /// Collectors for the current scope.\n pub collectors: Vec>>,\n}\n\n/// Top-level schema for a flow instance.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FlowSchema {\n pub schema: StructSchema,\n\n pub root_op_scope: OpScopeSchema,\n}\n\nimpl std::ops::Deref for FlowSchema {\n type Target = StructSchema;\n\n fn deref(&self) -> &Self::Target {\n &self.schema\n }\n}\n\npub struct OpArgSchema {\n pub name: OpArgName,\n pub value_type: EnrichedValueType,\n pub analyzed_value: AnalyzedValueMapping,\n}\n"], ["/cocoindex/src/base/spec.rs", "use crate::prelude::*;\n\nuse super::schema::{EnrichedValueType, FieldSchema};\nuse serde::{Deserialize, Serialize};\nuse std::fmt;\nuse std::ops::Deref;\n\n/// OutputMode enum for displaying spec info in different granularity\n#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]\n#[serde(rename_all = \"lowercase\")]\npub enum OutputMode {\n Concise,\n Verbose,\n}\n\n/// Formatting spec per output mode\npub trait SpecFormatter {\n fn format(&self, mode: OutputMode) -> String;\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum SpecString {\n /// The value comes from the environment variable.\n Env(String),\n /// The value is defined by the literal string.\n #[serde(untagged)]\n Literal(String),\n}\n\npub type ScopeName = String;\n\n/// Used to identify a data field within a flow.\n/// Within a flow, in each specific scope, each field name must be unique.\n/// - A field is defined by `outputs` of an operation. There must be exactly one definition for each field.\n/// - A field can be used as an input for multiple operations.\npub type FieldName = String;\n\npub const ROOT_SCOPE_NAME: &str = \"_root\";\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Default)]\npub struct FieldPath(pub Vec);\n\nimpl Deref for FieldPath {\n type Target = Vec;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl fmt::Display for FieldPath {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if self.is_empty() {\n write!(f, \"*\")\n } else {\n write!(f, \"{}\", self.join(\".\"))\n }\n }\n}\n\n/// Used to identify an input or output argument for an operator.\n/// Useful to identify different inputs/outputs of the same operation. Usually omitted for operations with the same purpose of input/output.\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]\npub struct OpArgName(pub Option);\n\nimpl fmt::Display for OpArgName {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if let Some(arg_name) = &self.0 {\n write!(f, \"${arg_name}\")\n } else {\n write!(f, \"?\")\n }\n }\n}\n\nimpl OpArgName {\n pub fn is_unnamed(&self) -> bool {\n self.0.is_none()\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct NamedSpec {\n pub name: String,\n\n #[serde(flatten)]\n pub spec: T,\n}\n\nimpl fmt::Display for NamedSpec {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}: {}\", self.name, self.spec)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FieldMapping {\n /// If unspecified, means the current scope.\n /// \"_root\" refers to the top-level scope.\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub scope: Option,\n\n pub field_path: FieldPath,\n}\n\nimpl fmt::Display for FieldMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let scope = self.scope.as_deref().unwrap_or(\"\");\n write!(\n f,\n \"{}{}\",\n if scope.is_empty() {\n \"\".to_string()\n } else {\n format!(\"{scope}.\")\n },\n self.field_path\n )\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ConstantMapping {\n pub schema: EnrichedValueType,\n pub value: serde_json::Value,\n}\n\nimpl fmt::Display for ConstantMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let value = serde_json::to_string(&self.value).unwrap_or(\"#serde_error\".to_string());\n write!(f, \"{value}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CollectionMapping {\n pub field: FieldMapping,\n pub scope_name: ScopeName,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StructMapping {\n pub fields: Vec>,\n}\n\nimpl fmt::Display for StructMapping {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let fields = self\n .fields\n .iter()\n .map(|field| field.name.clone())\n .collect::>()\n .join(\",\");\n write!(f, \"{fields}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum ValueMapping {\n Constant(ConstantMapping),\n Field(FieldMapping),\n Struct(StructMapping),\n // TODO: Add support for collections\n}\n\nimpl ValueMapping {\n pub fn is_entire_scope(&self) -> bool {\n match self {\n ValueMapping::Field(FieldMapping {\n scope: None,\n field_path,\n }) => field_path.is_empty(),\n _ => false,\n }\n }\n}\n\nimpl std::fmt::Display for ValueMapping {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result {\n match self {\n ValueMapping::Constant(v) => write!(\n f,\n \"{}\",\n serde_json::to_string(&v.value)\n .unwrap_or_else(|_| \"#(invalid json value)\".to_string())\n ),\n ValueMapping::Field(v) => {\n write!(f, \"{}.{}\", v.scope.as_deref().unwrap_or(\"\"), v.field_path)\n }\n ValueMapping::Struct(v) => write!(\n f,\n \"Struct({})\",\n v.fields\n .iter()\n .map(|f| format!(\"{}={}\", f.name, f.spec))\n .collect::>()\n .join(\", \")\n ),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct OpArgBinding {\n #[serde(default, skip_serializing_if = \"OpArgName::is_unnamed\")]\n pub arg_name: OpArgName,\n\n #[serde(flatten)]\n pub value: ValueMapping,\n}\n\nimpl fmt::Display for OpArgBinding {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n if self.arg_name.is_unnamed() {\n write!(f, \"{}\", self.value)\n } else {\n write!(f, \"{}={}\", self.arg_name, self.value)\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct OpSpec {\n pub kind: String,\n #[serde(flatten, default)]\n pub spec: serde_json::Map,\n}\n\nimpl SpecFormatter for OpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match mode {\n OutputMode::Concise => self.kind.clone(),\n OutputMode::Verbose => {\n let spec_str = serde_json::to_string_pretty(&self.spec)\n .map(|s| {\n let lines: Vec<&str> = s.lines().collect();\n if lines.len() < s.lines().count() {\n lines\n .into_iter()\n .chain([\"...\"])\n .collect::>()\n .join(\"\\n \")\n } else {\n lines.join(\"\\n \")\n }\n })\n .unwrap_or(\"#serde_error\".to_string());\n format!(\"{}({})\", self.kind, spec_str)\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct ExecutionOptions {\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub max_inflight_rows: Option,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub max_inflight_bytes: Option,\n}\n\nimpl ExecutionOptions {\n pub fn get_concur_control_options(&self) -> concur_control::Options {\n concur_control::Options {\n max_inflight_rows: self.max_inflight_rows,\n max_inflight_bytes: self.max_inflight_bytes,\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct SourceRefreshOptions {\n pub refresh_interval: Option,\n}\n\nimpl fmt::Display for SourceRefreshOptions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let refresh = self\n .refresh_interval\n .map(|d| format!(\"{d:?}\"))\n .unwrap_or(\"none\".to_string());\n write!(f, \"{refresh}\")\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ImportOpSpec {\n pub source: OpSpec,\n\n #[serde(default)]\n pub refresh_options: SourceRefreshOptions,\n\n #[serde(default)]\n pub execution_options: ExecutionOptions,\n}\n\nimpl SpecFormatter for ImportOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let source = self.source.format(mode);\n format!(\"source={}, refresh={}\", source, self.refresh_options)\n }\n}\n\nimpl fmt::Display for ImportOpSpec {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}\", self.format(OutputMode::Concise))\n }\n}\n\n/// Transform data using a given operator.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TransformOpSpec {\n pub inputs: Vec,\n pub op: OpSpec,\n}\n\nimpl SpecFormatter for TransformOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let inputs = self\n .inputs\n .iter()\n .map(ToString::to_string)\n .collect::>()\n .join(\",\");\n let op_str = self.op.format(mode);\n match mode {\n OutputMode::Concise => format!(\"op={op_str}, inputs={inputs}\"),\n OutputMode::Verbose => format!(\"op={op_str}, inputs=[{inputs}]\"),\n }\n }\n}\n\n/// Apply reactive operations to each row of the input field.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ForEachOpSpec {\n /// Mapping that provides a table to apply reactive operations to.\n pub field_path: FieldPath,\n pub op_scope: ReactiveOpScope,\n\n #[serde(default)]\n pub execution_options: ExecutionOptions,\n}\n\nimpl ForEachOpSpec {\n pub fn get_label(&self) -> String {\n format!(\"Loop over {}\", self.field_path)\n }\n}\n\nimpl SpecFormatter for ForEachOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match mode {\n OutputMode::Concise => self.get_label(),\n OutputMode::Verbose => format!(\"field={}\", self.field_path),\n }\n }\n}\n\n/// Emit data to a given collector at the given scope.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CollectOpSpec {\n /// Field values to be collected.\n pub input: StructMapping,\n /// Scope for the collector.\n pub scope_name: ScopeName,\n /// Name of the collector.\n pub collector_name: FieldName,\n /// If specified, the collector will have an automatically generated UUID field with the given name.\n /// The uuid will remain stable when collected input values remain unchanged.\n pub auto_uuid_field: Option,\n}\n\nimpl SpecFormatter for CollectOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let uuid = self.auto_uuid_field.as_deref().unwrap_or(\"none\");\n match mode {\n OutputMode::Concise => {\n format!(\n \"collector={}, input={}, uuid={}\",\n self.collector_name, self.input, uuid\n )\n }\n OutputMode::Verbose => {\n format!(\n \"scope={}, collector={}, input=[{}], uuid={}\",\n self.scope_name, self.collector_name, self.input, uuid\n )\n }\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]\npub enum VectorSimilarityMetric {\n CosineSimilarity,\n L2Distance,\n InnerProduct,\n}\n\nimpl fmt::Display for VectorSimilarityMetric {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n match self {\n VectorSimilarityMetric::CosineSimilarity => write!(f, \"Cosine\"),\n VectorSimilarityMetric::L2Distance => write!(f, \"L2\"),\n VectorSimilarityMetric::InnerProduct => write!(f, \"InnerProduct\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct VectorIndexDef {\n pub field_name: FieldName,\n pub metric: VectorSimilarityMetric,\n}\n\nimpl fmt::Display for VectorIndexDef {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}:{}\", self.field_name, self.metric)\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]\npub struct IndexOptions {\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub primary_key_fields: Option>,\n #[serde(default, skip_serializing_if = \"Vec::is_empty\")]\n pub vector_indexes: Vec,\n}\n\nimpl IndexOptions {\n pub fn primary_key_fields(&self) -> Result<&[FieldName]> {\n Ok(self\n .primary_key_fields\n .as_ref()\n .ok_or(api_error!(\"Primary key fields are not set\"))?\n .as_ref())\n }\n}\n\nimpl fmt::Display for IndexOptions {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n let primary_keys = self\n .primary_key_fields\n .as_ref()\n .map(|p| p.join(\",\"))\n .unwrap_or_default();\n let vector_indexes = self\n .vector_indexes\n .iter()\n .map(|v| v.to_string())\n .collect::>()\n .join(\",\");\n write!(f, \"keys={primary_keys}, indexes={vector_indexes}\")\n }\n}\n\n/// Store data to a given sink.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ExportOpSpec {\n pub collector_name: FieldName,\n pub target: OpSpec,\n pub index_options: IndexOptions,\n pub setup_by_user: bool,\n}\n\nimpl SpecFormatter for ExportOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n let target_str = self.target.format(mode);\n let base = format!(\n \"collector={}, target={}, {}\",\n self.collector_name, target_str, self.index_options\n );\n match mode {\n OutputMode::Concise => base,\n OutputMode::Verbose => format!(\"{}, setup_by_user={}\", base, self.setup_by_user),\n }\n }\n}\n\n/// A reactive operation reacts on given input values.\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"action\")]\npub enum ReactiveOpSpec {\n Transform(TransformOpSpec),\n ForEach(ForEachOpSpec),\n Collect(CollectOpSpec),\n}\n\nimpl SpecFormatter for ReactiveOpSpec {\n fn format(&self, mode: OutputMode) -> String {\n match self {\n ReactiveOpSpec::Transform(t) => format!(\"Transform: {}\", t.format(mode)),\n ReactiveOpSpec::ForEach(fe) => match mode {\n OutputMode::Concise => fe.get_label().to_string(),\n OutputMode::Verbose => format!(\"ForEach: {}\", fe.format(mode)),\n },\n ReactiveOpSpec::Collect(c) => format!(\"Collect: {}\", c.format(mode)),\n }\n }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ReactiveOpScope {\n pub name: ScopeName,\n pub ops: Vec>,\n // TODO: Suport collectors\n}\n\nimpl fmt::Display for ReactiveOpScope {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"Scope: name={}\", self.name)\n }\n}\n\n/// A flow defines the rule to sync data from given sources to given sinks with given transformations.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct FlowInstanceSpec {\n /// Name of the flow instance.\n pub name: String,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub import_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub reactive_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub export_ops: Vec>,\n\n #[serde(default = \"Vec::new\", skip_serializing_if = \"Vec::is_empty\")]\n pub declarations: Vec,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct TransientFlowSpec {\n pub name: String,\n pub input_fields: Vec,\n pub reactive_ops: Vec>,\n pub output_value: ValueMapping,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SimpleSemanticsQueryHandlerSpec {\n pub name: String,\n pub flow_instance_name: String,\n pub export_target_name: String,\n pub query_transform_flow: TransientFlowSpec,\n pub default_similarity_metric: VectorSimilarityMetric,\n}\n\npub struct AuthEntryReference {\n pub key: String,\n _phantom: std::marker::PhantomData,\n}\n\nimpl fmt::Debug for AuthEntryReference {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"AuthEntryReference({})\", self.key)\n }\n}\n\nimpl fmt::Display for AuthEntryReference {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"AuthEntryReference({})\", self.key)\n }\n}\n\nimpl Clone for AuthEntryReference {\n fn clone(&self) -> Self {\n Self {\n key: self.key.clone(),\n _phantom: std::marker::PhantomData,\n }\n }\n}\n\n#[derive(Serialize, Deserialize)]\nstruct UntypedAuthEntryReference {\n key: T,\n}\n\nimpl Serialize for AuthEntryReference {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n UntypedAuthEntryReference { key: &self.key }.serialize(serializer)\n }\n}\n\nimpl<'de, T> Deserialize<'de> for AuthEntryReference {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let untyped_ref = UntypedAuthEntryReference::::deserialize(deserializer)?;\n Ok(AuthEntryReference {\n key: untyped_ref.key,\n _phantom: std::marker::PhantomData,\n })\n }\n}\n\nimpl PartialEq for AuthEntryReference {\n fn eq(&self, other: &Self) -> bool {\n self.key == other.key\n }\n}\n\nimpl Eq for AuthEntryReference {}\n\nimpl std::hash::Hash for AuthEntryReference {\n fn hash(&self, state: &mut H) {\n self.key.hash(state);\n }\n}\n"], ["/cocoindex/src/py/convert.rs", "use crate::prelude::*;\n\nuse bytes::Bytes;\nuse numpy::{PyArray1, PyArrayDyn, PyArrayMethods};\nuse pyo3::IntoPyObjectExt;\nuse pyo3::exceptions::PyTypeError;\nuse pyo3::types::PyAny;\nuse pyo3::types::{PyList, PyTuple};\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pythonize::{depythonize, pythonize};\nuse serde::de::DeserializeOwned;\nuse std::ops::Deref;\n\nuse super::IntoPyResult;\n\n#[derive(Debug)]\npub struct Pythonized(pub T);\n\nimpl<'py, T: DeserializeOwned> FromPyObject<'py> for Pythonized {\n fn extract_bound(obj: &Bound<'py, PyAny>) -> PyResult {\n Ok(Pythonized(depythonize(obj).into_py_result()?))\n }\n}\n\nimpl<'py, T: Serialize> IntoPyObject<'py> for &Pythonized {\n type Target = PyAny;\n type Output = Bound<'py, PyAny>;\n type Error = PyErr;\n\n fn into_pyobject(self, py: Python<'py>) -> PyResult {\n pythonize(py, &self.0).into_py_result()\n }\n}\n\nimpl<'py, T: Serialize> IntoPyObject<'py> for Pythonized {\n type Target = PyAny;\n type Output = Bound<'py, PyAny>;\n type Error = PyErr;\n\n fn into_pyobject(self, py: Python<'py>) -> PyResult {\n (&self).into_pyobject(py)\n }\n}\n\nimpl Pythonized {\n pub fn into_inner(self) -> T {\n self.0\n }\n}\n\nimpl Deref for Pythonized {\n type Target = T;\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nfn basic_value_to_py_object<'py>(\n py: Python<'py>,\n v: &value::BasicValue,\n) -> PyResult> {\n let result = match v {\n value::BasicValue::Bytes(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Str(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Bool(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Int64(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Float32(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Float64(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Range(v) => pythonize(py, v).into_py_result()?,\n value::BasicValue::Uuid(uuid_val) => uuid_val.into_bound_py_any(py)?,\n value::BasicValue::Date(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Time(v) => v.into_bound_py_any(py)?,\n value::BasicValue::LocalDateTime(v) => v.into_bound_py_any(py)?,\n value::BasicValue::OffsetDateTime(v) => v.into_bound_py_any(py)?,\n value::BasicValue::TimeDelta(v) => v.into_bound_py_any(py)?,\n value::BasicValue::Json(v) => pythonize(py, v).into_py_result()?,\n value::BasicValue::Vector(v) => handle_vector_to_py(py, v)?,\n value::BasicValue::UnionVariant { tag_id, value } => {\n (*tag_id, basic_value_to_py_object(py, value)?).into_bound_py_any(py)?\n }\n };\n Ok(result)\n}\n\npub fn field_values_to_py_object<'py, 'a>(\n py: Python<'py>,\n values: impl Iterator,\n) -> PyResult> {\n let fields = values\n .map(|v| value_to_py_object(py, v))\n .collect::>>()?;\n Ok(PyTuple::new(py, fields)?.into_any())\n}\n\npub fn value_to_py_object<'py>(py: Python<'py>, v: &value::Value) -> PyResult> {\n let result = match v {\n value::Value::Null => py.None().into_bound(py),\n value::Value::Basic(v) => basic_value_to_py_object(py, v)?,\n value::Value::Struct(v) => field_values_to_py_object(py, v.fields.iter())?,\n value::Value::UTable(v) | value::Value::LTable(v) => {\n let rows = v\n .iter()\n .map(|v| field_values_to_py_object(py, v.0.fields.iter()))\n .collect::>>()?;\n PyList::new(py, rows)?.into_any()\n }\n value::Value::KTable(v) => {\n let rows = v\n .iter()\n .map(|(k, v)| {\n field_values_to_py_object(\n py,\n std::iter::once(&value::Value::from(k.clone())).chain(v.0.fields.iter()),\n )\n })\n .collect::>>()?;\n PyList::new(py, rows)?.into_any()\n }\n };\n Ok(result)\n}\n\nfn basic_value_from_py_object<'py>(\n typ: &schema::BasicValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let result = match typ {\n schema::BasicValueType::Bytes => {\n value::BasicValue::Bytes(Bytes::from(v.extract::>()?))\n }\n schema::BasicValueType::Str => value::BasicValue::Str(Arc::from(v.extract::()?)),\n schema::BasicValueType::Bool => value::BasicValue::Bool(v.extract::()?),\n schema::BasicValueType::Int64 => value::BasicValue::Int64(v.extract::()?),\n schema::BasicValueType::Float32 => value::BasicValue::Float32(v.extract::()?),\n schema::BasicValueType::Float64 => value::BasicValue::Float64(v.extract::()?),\n schema::BasicValueType::Range => value::BasicValue::Range(depythonize(v)?),\n schema::BasicValueType::Uuid => value::BasicValue::Uuid(v.extract::()?),\n schema::BasicValueType::Date => value::BasicValue::Date(v.extract::()?),\n schema::BasicValueType::Time => value::BasicValue::Time(v.extract::()?),\n schema::BasicValueType::LocalDateTime => {\n value::BasicValue::LocalDateTime(v.extract::()?)\n }\n schema::BasicValueType::OffsetDateTime => {\n if v.getattr_opt(\"tzinfo\")?\n .ok_or_else(|| {\n PyErr::new::(format!(\n \"expecting a datetime.datetime value, got {}\",\n v.get_type()\n ))\n })?\n .is_none()\n {\n value::BasicValue::OffsetDateTime(\n v.extract::()?.and_utc().into(),\n )\n } else {\n value::BasicValue::OffsetDateTime(\n v.extract::>()?,\n )\n }\n }\n schema::BasicValueType::TimeDelta => {\n value::BasicValue::TimeDelta(v.extract::()?)\n }\n schema::BasicValueType::Json => {\n value::BasicValue::Json(Arc::from(depythonize::(v)?))\n }\n schema::BasicValueType::Vector(elem) => {\n if let Some(vector) = handle_ndarray_from_py(&elem.element_type, v)? {\n vector\n } else {\n // Fallback to list\n value::BasicValue::Vector(Arc::from(\n v.extract::>>()?\n .into_iter()\n .map(|v| basic_value_from_py_object(&elem.element_type, &v))\n .collect::>>()?,\n ))\n }\n }\n schema::BasicValueType::Union(s) => {\n let mut valid_value = None;\n\n // Try parsing the value\n for (i, typ) in s.types.iter().enumerate() {\n if let Ok(value) = basic_value_from_py_object(typ, v) {\n valid_value = Some(value::BasicValue::UnionVariant {\n tag_id: i,\n value: Box::new(value),\n });\n break;\n }\n }\n\n valid_value.ok_or_else(|| {\n PyErr::new::(format!(\n \"invalid union value: {}, available types: {:?}\",\n v, s.types\n ))\n })?\n }\n };\n Ok(result)\n}\n\n// Helper function to convert PyAny to BasicValue for NDArray\nfn handle_ndarray_from_py<'py>(\n elem_type: &schema::BasicValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult> {\n macro_rules! try_convert {\n ($t:ty, $cast:expr) => {\n if let Ok(array) = v.downcast::>() {\n let data = array.readonly().as_slice()?.to_vec();\n let vec = data.into_iter().map($cast).collect::>();\n return Ok(Some(value::BasicValue::Vector(Arc::from(vec))));\n }\n };\n }\n\n match *elem_type {\n schema::BasicValueType::Float32 => try_convert!(f32, value::BasicValue::Float32),\n schema::BasicValueType::Float64 => try_convert!(f64, value::BasicValue::Float64),\n schema::BasicValueType::Int64 => try_convert!(i64, value::BasicValue::Int64),\n _ => {}\n }\n\n Ok(None)\n}\n\n// Helper function to convert BasicValue::Vector to PyAny\nfn handle_vector_to_py<'py>(\n py: Python<'py>,\n v: &[value::BasicValue],\n) -> PyResult> {\n match v.first() {\n Some(value::BasicValue::Float32(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Float32(f) => Ok(*f),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Float32\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n Some(value::BasicValue::Float64(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Float64(f) => Ok(*f),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Float64\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n Some(value::BasicValue::Int64(_)) => {\n let data = v\n .iter()\n .map(|x| match x {\n value::BasicValue::Int64(i) => Ok(*i),\n _ => Err(PyErr::new::(\n \"Expected all elements to be Int64\",\n )),\n })\n .collect::>>()?;\n\n Ok(PyArray1::from_vec(py, data).into_any())\n }\n _ => Ok(v\n .iter()\n .map(|v| basic_value_to_py_object(py, v))\n .collect::>>()?\n .into_bound_py_any(py)?),\n }\n}\n\nfn field_values_from_py_object<'py>(\n schema: &schema::StructSchema,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let list = v.extract::>>()?;\n if list.len() != schema.fields.len() {\n return Err(PyException::new_err(format!(\n \"struct field number mismatch, expected {}, got {}\",\n schema.fields.len(),\n list.len()\n )));\n }\n\n Ok(value::FieldValues {\n fields: schema\n .fields\n .iter()\n .zip(list.into_iter())\n .map(|(f, v)| value_from_py_object(&f.value_type.typ, &v))\n .collect::>>()?,\n })\n}\n\npub fn value_from_py_object<'py>(\n typ: &schema::ValueType,\n v: &Bound<'py, PyAny>,\n) -> PyResult {\n let result = if v.is_none() {\n value::Value::Null\n } else {\n match typ {\n schema::ValueType::Basic(typ) => {\n value::Value::Basic(basic_value_from_py_object(typ, v)?)\n }\n schema::ValueType::Struct(schema) => {\n value::Value::Struct(field_values_from_py_object(schema, v)?)\n }\n schema::ValueType::Table(schema) => {\n let list = v.extract::>>()?;\n let values = list\n .into_iter()\n .map(|v| field_values_from_py_object(&schema.row, &v))\n .collect::>>()?;\n\n match schema.kind {\n schema::TableKind::UTable => {\n value::Value::UTable(values.into_iter().map(|v| v.into()).collect())\n }\n schema::TableKind::LTable => {\n value::Value::LTable(values.into_iter().map(|v| v.into()).collect())\n }\n\n schema::TableKind::KTable => value::Value::KTable(\n values\n .into_iter()\n .map(|v| {\n let mut iter = v.fields.into_iter();\n let key = iter.next().unwrap().into_key().into_py_result()?;\n Ok((\n key,\n value::ScopeValue(value::FieldValues {\n fields: iter.collect::>(),\n }),\n ))\n })\n .collect::>>()?,\n ),\n }\n }\n }\n };\n Ok(result)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::base::schema;\n use crate::base::value;\n use crate::base::value::ScopeValue;\n use pyo3::Python;\n use std::collections::BTreeMap;\n use std::sync::Arc;\n\n fn assert_roundtrip_conversion(original_value: &value::Value, value_type: &schema::ValueType) {\n Python::with_gil(|py| {\n // Convert Rust value to Python object using value_to_py_object\n let py_object = value_to_py_object(py, original_value)\n .expect(\"Failed to convert Rust value to Python object\");\n\n println!(\"Python object: {py_object:?}\");\n let roundtripped_value = value_from_py_object(value_type, &py_object)\n .expect(\"Failed to convert Python object back to Rust value\");\n\n println!(\"Roundtripped value: {roundtripped_value:?}\");\n assert_eq!(\n original_value, &roundtripped_value,\n \"Value mismatch after roundtrip\"\n );\n });\n }\n\n #[test]\n fn test_roundtrip_basic_values() {\n let values_and_types = vec![\n (\n value::Value::Basic(value::BasicValue::Int64(42)),\n schema::ValueType::Basic(schema::BasicValueType::Int64),\n ),\n (\n value::Value::Basic(value::BasicValue::Float64(3.14)),\n schema::ValueType::Basic(schema::BasicValueType::Float64),\n ),\n (\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"hello\"))),\n schema::ValueType::Basic(schema::BasicValueType::Str),\n ),\n (\n value::Value::Basic(value::BasicValue::Bool(true)),\n schema::ValueType::Basic(schema::BasicValueType::Bool),\n ),\n ];\n\n for (val, typ) in values_and_types {\n assert_roundtrip_conversion(&val, &typ);\n }\n }\n\n #[test]\n fn test_roundtrip_struct() {\n let struct_schema = schema::StructSchema {\n description: Some(Arc::from(\"Test struct description\")),\n fields: Arc::new(vec![\n schema::FieldSchema {\n name: \"a\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Int64),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"b\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Str),\n nullable: false,\n attrs: Default::default(),\n },\n },\n ]),\n };\n\n let struct_val_data = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(10)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"world\"))),\n ],\n };\n\n let struct_val = value::Value::Struct(struct_val_data);\n let struct_typ = schema::ValueType::Struct(struct_schema); // No clone needed\n\n assert_roundtrip_conversion(&struct_val, &struct_typ);\n }\n\n #[test]\n fn test_roundtrip_table_types() {\n let row_schema_struct = Arc::new(schema::StructSchema {\n description: Some(Arc::from(\"Test table row description\")),\n fields: Arc::new(vec![\n schema::FieldSchema {\n name: \"key_col\".to_string(), // Will be used as key for KTable implicitly\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Int64),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"data_col_1\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Str),\n nullable: false,\n attrs: Default::default(),\n },\n },\n schema::FieldSchema {\n name: \"data_col_2\".to_string(),\n value_type: schema::EnrichedValueType {\n typ: schema::ValueType::Basic(schema::BasicValueType::Bool),\n nullable: false,\n attrs: Default::default(),\n },\n },\n ]),\n });\n\n let row1_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(1)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row1_data\"))),\n value::Value::Basic(value::BasicValue::Bool(true)),\n ],\n };\n let row1_scope_val: value::ScopeValue = row1_fields.into();\n\n let row2_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Int64(2)),\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row2_data\"))),\n value::Value::Basic(value::BasicValue::Bool(false)),\n ],\n };\n let row2_scope_val: value::ScopeValue = row2_fields.into();\n\n // UTable\n let utable_schema = schema::TableSchema {\n kind: schema::TableKind::UTable,\n row: (*row_schema_struct).clone(),\n };\n let utable_val = value::Value::UTable(vec![row1_scope_val.clone(), row2_scope_val.clone()]);\n let utable_typ = schema::ValueType::Table(utable_schema);\n assert_roundtrip_conversion(&utable_val, &utable_typ);\n\n // LTable\n let ltable_schema = schema::TableSchema {\n kind: schema::TableKind::LTable,\n row: (*row_schema_struct).clone(),\n };\n let ltable_val = value::Value::LTable(vec![row1_scope_val.clone(), row2_scope_val.clone()]);\n let ltable_typ = schema::ValueType::Table(ltable_schema);\n assert_roundtrip_conversion(<able_val, <able_typ);\n\n // KTable\n let ktable_schema = schema::TableSchema {\n kind: schema::TableKind::KTable,\n row: (*row_schema_struct).clone(),\n };\n let mut ktable_data = BTreeMap::new();\n\n // Create KTable entries where the ScopeValue doesn't include the key field\n // This matches how the Python code will serialize/deserialize\n let row1_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row1_data\"))),\n value::Value::Basic(value::BasicValue::Bool(true)),\n ],\n };\n let row1_scope_val: value::ScopeValue = row1_fields.into();\n\n let row2_fields = value::FieldValues {\n fields: vec![\n value::Value::Basic(value::BasicValue::Str(Arc::from(\"row2_data\"))),\n value::Value::Basic(value::BasicValue::Bool(false)),\n ],\n };\n let row2_scope_val: value::ScopeValue = row2_fields.into();\n\n // For KTable, the key is extracted from the first field of ScopeValue based on current serialization\n let key1 = value::Value::::Basic(value::BasicValue::Int64(1))\n .into_key()\n .unwrap();\n let key2 = value::Value::::Basic(value::BasicValue::Int64(2))\n .into_key()\n .unwrap();\n\n ktable_data.insert(key1, row1_scope_val.clone());\n ktable_data.insert(key2, row2_scope_val.clone());\n\n let ktable_val = value::Value::KTable(ktable_data);\n let ktable_typ = schema::ValueType::Table(ktable_schema);\n assert_roundtrip_conversion(&ktable_val, &ktable_typ);\n }\n}\n"], ["/cocoindex/src/builder/flow_builder.rs", "use crate::{prelude::*, py::Pythonized};\n\nuse pyo3::{exceptions::PyException, prelude::*};\nuse pyo3_async_runtimes::tokio::future_into_py;\nuse std::{collections::btree_map, ops::Deref};\nuse tokio::task::LocalSet;\n\nuse super::analyzer::{\n AnalyzerContext, CollectorBuilder, DataScopeBuilder, OpScope, build_flow_instance_context,\n};\nuse crate::{\n base::{\n schema::{CollectorSchema, FieldSchema},\n spec::{FieldName, NamedSpec},\n },\n lib_context::LibContext,\n ops::interface::FlowInstanceContext,\n py::IntoPyResult,\n};\nuse crate::{lib_context::FlowContext, py};\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct OpScopeRef(Arc);\n\nimpl From> for OpScopeRef {\n fn from(scope: Arc) -> Self {\n Self(scope)\n }\n}\n\nimpl Deref for OpScopeRef {\n type Target = Arc;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\nimpl std::fmt::Display for OpScopeRef {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.0)\n }\n}\n\n#[pymethods]\nimpl OpScopeRef {\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn add_collector(&mut self, name: String) -> PyResult {\n let collector = DataCollector {\n name,\n scope: self.0.clone(),\n collector: Mutex::new(None),\n };\n Ok(collector)\n }\n}\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct DataType {\n schema: schema::EnrichedValueType,\n}\n\nimpl From for DataType {\n fn from(schema: schema::EnrichedValueType) -> Self {\n Self { schema }\n }\n}\n\n#[pymethods]\nimpl DataType {\n pub fn __str__(&self) -> String {\n format!(\"{}\", self.schema)\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn schema(&self) -> Pythonized {\n Pythonized(self.schema.clone())\n }\n}\n\n#[pyclass]\n#[derive(Debug, Clone)]\npub struct DataSlice {\n scope: Arc,\n value: Arc,\n data_type: DataType,\n}\n\n#[pymethods]\nimpl DataSlice {\n pub fn data_type(&self) -> DataType {\n self.data_type.clone()\n }\n\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n\n pub fn field(&self, field_name: &str) -> PyResult> {\n let field_schema = match &self.data_type.schema.typ {\n schema::ValueType::Struct(struct_type) => {\n match struct_type.fields.iter().find(|f| f.name == field_name) {\n Some(field) => field,\n None => return Ok(None),\n }\n }\n _ => return Err(PyException::new_err(\"expect struct type\")),\n };\n let value_mapping = match self.value.as_ref() {\n spec::ValueMapping::Field(spec::FieldMapping {\n scope,\n field_path: spec::FieldPath(field_path),\n }) => spec::ValueMapping::Field(spec::FieldMapping {\n scope: scope.clone(),\n field_path: spec::FieldPath(\n field_path\n .iter()\n .cloned()\n .chain([field_name.to_string()])\n .collect(),\n ),\n }),\n\n spec::ValueMapping::Struct(v) => v\n .fields\n .iter()\n .find(|f| f.name == field_name)\n .map(|f| f.spec.clone())\n .ok_or_else(|| PyException::new_err(format!(\"field {field_name} not found\")))?,\n\n spec::ValueMapping::Constant { .. } => {\n return Err(PyException::new_err(\n \"field access not supported for literal\",\n ));\n }\n };\n Ok(Some(DataSlice {\n scope: self.scope.clone(),\n value: Arc::new(value_mapping),\n data_type: field_schema.value_type.clone().into(),\n }))\n }\n}\n\nimpl DataSlice {\n fn extract_value_mapping(&self) -> spec::ValueMapping {\n match self.value.as_ref() {\n spec::ValueMapping::Field(v) => spec::ValueMapping::Field(spec::FieldMapping {\n field_path: v.field_path.clone(),\n scope: v.scope.clone().or_else(|| Some(self.scope.name.clone())),\n }),\n v => v.clone(),\n }\n }\n}\n\nimpl std::fmt::Display for DataSlice {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(\n f,\n \"DataSlice({}; {} {}) \",\n self.data_type.schema, self.scope, self.value\n )?;\n Ok(())\n }\n}\n\n#[pyclass]\npub struct DataCollector {\n name: String,\n scope: Arc,\n collector: Mutex>,\n}\n\n#[pymethods]\nimpl DataCollector {\n fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\nimpl std::fmt::Display for DataCollector {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let collector = self.collector.lock().unwrap();\n write!(f, \"DataCollector \\\"{}\\\" ({}\", self.name, self.scope)?;\n if let Some(collector) = collector.as_ref() {\n write!(f, \": {}\", collector.schema)?;\n if collector.is_used {\n write!(f, \" (used)\")?;\n }\n }\n write!(f, \")\")?;\n Ok(())\n }\n}\n\n#[pyclass]\npub struct FlowBuilder {\n lib_context: Arc,\n flow_inst_context: Arc,\n\n root_op_scope: Arc,\n flow_instance_name: String,\n reactive_ops: Vec>,\n\n direct_input_fields: Vec,\n direct_output_value: Option,\n\n import_ops: Vec>,\n export_ops: Vec>,\n\n declarations: Vec,\n\n next_generated_op_id: usize,\n}\n\n#[pymethods]\nimpl FlowBuilder {\n #[new]\n pub fn new(name: &str) -> PyResult {\n let lib_context = get_lib_context().into_py_result()?;\n let root_op_scope = OpScope::new(\n spec::ROOT_SCOPE_NAME.to_string(),\n None,\n Arc::new(Mutex::new(DataScopeBuilder::new())),\n );\n let flow_inst_context = build_flow_instance_context(name, None);\n let result = Self {\n lib_context,\n flow_inst_context,\n root_op_scope,\n flow_instance_name: name.to_string(),\n\n reactive_ops: vec![],\n\n import_ops: vec![],\n export_ops: vec![],\n\n direct_input_fields: vec![],\n direct_output_value: None,\n\n declarations: vec![],\n\n next_generated_op_id: 0,\n };\n Ok(result)\n }\n\n pub fn root_scope(&self) -> OpScopeRef {\n OpScopeRef(self.root_op_scope.clone())\n }\n\n #[pyo3(signature = (kind, op_spec, target_scope, name, refresh_options=None, execution_options=None))]\n #[allow(clippy::too_many_arguments)]\n pub fn add_source(\n &mut self,\n py: Python<'_>,\n kind: String,\n op_spec: py::Pythonized>,\n target_scope: Option,\n name: String,\n refresh_options: Option>,\n execution_options: Option>,\n ) -> PyResult {\n if let Some(target_scope) = target_scope {\n if *target_scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"source can only be added to the root scope\",\n ));\n }\n }\n let import_op = spec::NamedSpec {\n name,\n spec: spec::ImportOpSpec {\n source: spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n },\n refresh_options: refresh_options.map(|o| o.into_inner()).unwrap_or_default(),\n execution_options: execution_options\n .map(|o| o.into_inner())\n .unwrap_or_default(),\n },\n };\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(\n analyzer_ctx.analyze_import_op(&self.root_op_scope, import_op.clone()),\n )\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n let result = Self::last_field_to_data_slice(&self.root_op_scope).into_py_result()?;\n self.import_ops.push(import_op);\n Ok(result)\n }\n\n pub fn constant(\n &self,\n value_type: py::Pythonized,\n value: Bound<'_, PyAny>,\n ) -> PyResult {\n let schema = value_type.into_inner();\n let value = py::value_from_py_object(&schema.typ, &value)?;\n let slice = DataSlice {\n scope: self.root_op_scope.clone(),\n value: Arc::new(spec::ValueMapping::Constant(spec::ConstantMapping {\n schema: schema.clone(),\n value: serde_json::to_value(value).into_py_result()?,\n })),\n data_type: schema.into(),\n };\n Ok(slice)\n }\n\n pub fn add_direct_input(\n &mut self,\n name: String,\n value_type: py::Pythonized,\n ) -> PyResult {\n let value_type = value_type.into_inner();\n {\n let mut root_data_scope = self.root_op_scope.data.lock().unwrap();\n root_data_scope\n .add_field(name.clone(), &value_type)\n .into_py_result()?;\n }\n let result = Self::last_field_to_data_slice(&self.root_op_scope).into_py_result()?;\n self.direct_input_fields\n .push(FieldSchema { name, value_type });\n Ok(result)\n }\n\n pub fn set_direct_output(&mut self, data_slice: DataSlice) -> PyResult<()> {\n if data_slice.scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"direct output must be value in the root scope\",\n ));\n }\n self.direct_output_value = Some(data_slice.extract_value_mapping());\n Ok(())\n }\n\n #[pyo3(signature = (data_slice, execution_options=None))]\n pub fn for_each(\n &mut self,\n data_slice: DataSlice,\n execution_options: Option>,\n ) -> PyResult {\n let parent_scope = &data_slice.scope;\n let field_path = match data_slice.value.as_ref() {\n spec::ValueMapping::Field(v) => &v.field_path,\n _ => return Err(PyException::new_err(\"expect field path\")),\n };\n let num_parent_layers = parent_scope.ancestors().count();\n let scope_name = format!(\n \"{}_{}\",\n field_path.last().map_or(\"\", |s| s.as_str()),\n num_parent_layers\n );\n let (_, child_op_scope) = parent_scope\n .new_foreach_op_scope(scope_name.clone(), field_path)\n .into_py_result()?;\n\n let reactive_op = spec::NamedSpec {\n name: format!(\".for_each.{}\", self.next_generated_op_id),\n spec: spec::ReactiveOpSpec::ForEach(spec::ForEachOpSpec {\n field_path: field_path.clone(),\n op_scope: spec::ReactiveOpScope {\n name: scope_name,\n ops: vec![],\n },\n execution_options: execution_options\n .map(|o| o.into_inner())\n .unwrap_or_default(),\n }),\n };\n self.next_generated_op_id += 1;\n self.get_mut_reactive_ops(parent_scope)\n .into_py_result()?\n .push(reactive_op);\n\n Ok(OpScopeRef(child_op_scope))\n }\n\n #[pyo3(signature = (kind, op_spec, args, target_scope, name))]\n pub fn transform(\n &mut self,\n py: Python<'_>,\n kind: String,\n op_spec: py::Pythonized>,\n args: Vec<(DataSlice, Option)>,\n target_scope: Option,\n name: String,\n ) -> PyResult {\n let spec = spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n };\n let op_scope = Self::minimum_common_scope(\n args.iter().map(|(ds, _)| &ds.scope),\n target_scope.as_ref().map(|s| &s.0),\n )\n .into_py_result()?;\n\n let reactive_op = spec::NamedSpec {\n name,\n spec: spec::ReactiveOpSpec::Transform(spec::TransformOpSpec {\n inputs: args\n .iter()\n .map(|(ds, arg_name)| spec::OpArgBinding {\n arg_name: spec::OpArgName(arg_name.clone()),\n value: ds.extract_value_mapping(),\n })\n .collect(),\n op: spec,\n }),\n };\n\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(analyzer_ctx.analyze_reactive_op(op_scope, &reactive_op))\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n self.get_mut_reactive_ops(op_scope)\n .into_py_result()?\n .push(reactive_op);\n\n let result = Self::last_field_to_data_slice(op_scope).into_py_result()?;\n Ok(result)\n }\n\n #[pyo3(signature = (collector, fields, auto_uuid_field=None))]\n pub fn collect(\n &mut self,\n py: Python<'_>,\n collector: &DataCollector,\n fields: Vec<(FieldName, DataSlice)>,\n auto_uuid_field: Option,\n ) -> PyResult<()> {\n let common_scope = Self::minimum_common_scope(fields.iter().map(|(_, ds)| &ds.scope), None)\n .into_py_result()?;\n let name = format!(\".collect.{}\", self.next_generated_op_id);\n self.next_generated_op_id += 1;\n\n let reactive_op = spec::NamedSpec {\n name,\n spec: spec::ReactiveOpSpec::Collect(spec::CollectOpSpec {\n input: spec::StructMapping {\n fields: fields\n .iter()\n .map(|(name, ds)| NamedSpec {\n name: name.clone(),\n spec: ds.extract_value_mapping(),\n })\n .collect(),\n },\n scope_name: collector.scope.name.clone(),\n collector_name: collector.name.clone(),\n auto_uuid_field: auto_uuid_field.clone(),\n }),\n };\n\n let analyzer_ctx = AnalyzerContext {\n lib_ctx: self.lib_context.clone(),\n flow_ctx: self.flow_inst_context.clone(),\n };\n let analyzed = py\n .allow_threads(|| {\n get_runtime().block_on(analyzer_ctx.analyze_reactive_op(common_scope, &reactive_op))\n })\n .into_py_result()?;\n std::mem::drop(analyzed);\n\n self.get_mut_reactive_ops(common_scope)\n .into_py_result()?\n .push(reactive_op);\n\n let collector_schema = CollectorSchema::from_fields(\n fields\n .into_iter()\n .map(|(name, ds)| FieldSchema {\n name,\n value_type: ds.data_type.schema,\n })\n .collect(),\n auto_uuid_field,\n );\n {\n let mut collector = collector.collector.lock().unwrap();\n if let Some(collector) = collector.as_mut() {\n collector.merge_schema(&collector_schema).into_py_result()?;\n } else {\n *collector = Some(CollectorBuilder::new(Arc::new(collector_schema)));\n }\n }\n\n Ok(())\n }\n\n #[pyo3(signature = (name, kind, op_spec, index_options, input, setup_by_user=false))]\n pub fn export(\n &mut self,\n name: String,\n kind: String,\n op_spec: py::Pythonized>,\n index_options: py::Pythonized,\n input: &DataCollector,\n setup_by_user: bool,\n ) -> PyResult<()> {\n let spec = spec::OpSpec {\n kind,\n spec: op_spec.into_inner(),\n };\n\n if input.scope != self.root_op_scope {\n return Err(PyException::new_err(\n \"Export can only work on collectors belonging to the root scope.\",\n ));\n }\n self.export_ops.push(spec::NamedSpec {\n name,\n spec: spec::ExportOpSpec {\n collector_name: input.name.clone(),\n target: spec,\n index_options: index_options.into_inner(),\n setup_by_user,\n },\n });\n Ok(())\n }\n\n pub fn declare(&mut self, op_spec: py::Pythonized) -> PyResult<()> {\n self.declarations.push(op_spec.into_inner());\n Ok(())\n }\n\n pub fn scope_field(&self, scope: OpScopeRef, field_name: &str) -> PyResult> {\n let field_type = {\n let scope_builder = scope.0.data.lock().unwrap();\n let (_, field_schema) = scope_builder\n .data\n .find_field(field_name)\n .ok_or_else(|| PyException::new_err(format!(\"field {field_name} not found\")))?;\n schema::EnrichedValueType::from_alternative(&field_schema.value_type)\n .into_py_result()?\n };\n Ok(Some(DataSlice {\n scope: scope.0,\n value: Arc::new(spec::ValueMapping::Field(spec::FieldMapping {\n scope: None,\n field_path: spec::FieldPath(vec![field_name.to_string()]),\n })),\n data_type: DataType { schema: field_type },\n }))\n }\n\n pub fn build_flow(&self, py: Python<'_>, py_event_loop: Py) -> PyResult {\n let spec = spec::FlowInstanceSpec {\n name: self.flow_instance_name.clone(),\n import_ops: self.import_ops.clone(),\n reactive_ops: self.reactive_ops.clone(),\n export_ops: self.export_ops.clone(),\n declarations: self.declarations.clone(),\n };\n let flow_instance_ctx = build_flow_instance_context(\n &self.flow_instance_name,\n Some(crate::py::PythonExecutionContext::new(py, py_event_loop)),\n );\n let flow_ctx = py\n .allow_threads(|| {\n get_runtime().block_on(async move {\n let analyzed_flow =\n super::AnalyzedFlow::from_flow_instance(spec, flow_instance_ctx).await?;\n let persistence_ctx = self.lib_context.require_persistence_ctx()?;\n let execution_ctx = {\n let flow_setup_ctx = persistence_ctx.setup_ctx.read().await;\n FlowContext::new(\n Arc::new(analyzed_flow),\n flow_setup_ctx\n .all_setup_states\n .flows\n .get(&self.flow_instance_name),\n )\n .await?\n };\n anyhow::Ok(execution_ctx)\n })\n })\n .into_py_result()?;\n let mut flow_ctxs = self.lib_context.flows.lock().unwrap();\n let flow_ctx = match flow_ctxs.entry(self.flow_instance_name.clone()) {\n btree_map::Entry::Occupied(_) => {\n return Err(PyException::new_err(format!(\n \"flow instance name already exists: {}\",\n self.flow_instance_name\n )));\n }\n btree_map::Entry::Vacant(entry) => {\n let flow_ctx = Arc::new(flow_ctx);\n entry.insert(flow_ctx.clone());\n flow_ctx\n }\n };\n Ok(py::Flow(flow_ctx))\n }\n\n pub fn build_transient_flow_async<'py>(\n &self,\n py: Python<'py>,\n py_event_loop: Py,\n ) -> PyResult> {\n if self.direct_input_fields.is_empty() {\n return Err(PyException::new_err(\"expect at least one direct input\"));\n }\n let direct_output_value = if let Some(direct_output_value) = &self.direct_output_value {\n direct_output_value\n } else {\n return Err(PyException::new_err(\"expect direct output\"));\n };\n let spec = spec::TransientFlowSpec {\n name: self.flow_instance_name.clone(),\n input_fields: self.direct_input_fields.clone(),\n reactive_ops: self.reactive_ops.clone(),\n output_value: direct_output_value.clone(),\n };\n let py_ctx = crate::py::PythonExecutionContext::new(py, py_event_loop);\n\n let analyzed_flow = get_runtime().spawn_blocking(|| {\n let local_set = LocalSet::new();\n local_set.block_on(\n get_runtime(),\n super::AnalyzedTransientFlow::from_transient_flow(spec, Some(py_ctx)),\n )\n });\n future_into_py(py, async move {\n Ok(py::TransientFlow(Arc::new(\n analyzed_flow.await.into_py_result()?.into_py_result()?,\n )))\n })\n }\n\n pub fn __str__(&self) -> String {\n format!(\"{self}\")\n }\n\n pub fn __repr__(&self) -> String {\n self.__str__()\n }\n}\n\nimpl std::fmt::Display for FlowBuilder {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"Flow instance name: {}\\n\\n\", self.flow_instance_name)?;\n for op in self.import_ops.iter() {\n write!(\n f,\n \"Source op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n for field in self.direct_input_fields.iter() {\n writeln!(f, \"Direct input {}: {}\", field.name, field.value_type)?;\n }\n if !self.direct_input_fields.is_empty() {\n writeln!(f)?;\n }\n for op in self.reactive_ops.iter() {\n write!(\n f,\n \"Reactive op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n for op in self.export_ops.iter() {\n write!(\n f,\n \"Export op {}\\n{}\\n\",\n op.name,\n serde_json::to_string_pretty(&op.spec).unwrap_or_default()\n )?;\n }\n if let Some(output) = &self.direct_output_value {\n write!(f, \"Direct output: {output}\\n\\n\")?;\n }\n Ok(())\n }\n}\n\nimpl FlowBuilder {\n fn last_field_to_data_slice(op_scope: &Arc) -> Result {\n let data_scope = op_scope.data.lock().unwrap();\n let last_field = data_scope.last_field().unwrap();\n let result = DataSlice {\n scope: op_scope.clone(),\n value: Arc::new(spec::ValueMapping::Field(spec::FieldMapping {\n scope: None,\n field_path: spec::FieldPath(vec![last_field.name.clone()]),\n })),\n data_type: schema::EnrichedValueType::from_alternative(&last_field.value_type)?.into(),\n };\n Ok(result)\n }\n\n fn minimum_common_scope<'a>(\n scopes: impl Iterator>,\n target_scope: Option<&'a Arc>,\n ) -> Result<&'a Arc> {\n let mut scope_iter = scopes;\n let mut common_scope = scope_iter\n .next()\n .ok_or_else(|| PyException::new_err(\"expect at least one input\"))?;\n for scope in scope_iter {\n if scope.is_op_scope_descendant(common_scope) {\n common_scope = scope;\n } else if !common_scope.is_op_scope_descendant(scope) {\n api_bail!(\n \"expect all arguments share the common scope, got {} and {} exclusive to each other\",\n common_scope,\n scope\n );\n }\n }\n if let Some(target_scope) = target_scope {\n if !target_scope.is_op_scope_descendant(common_scope) {\n api_bail!(\n \"the field can only be attached to a scope or sub-scope of the input value. Target scope: {}, input scope: {}\",\n target_scope,\n common_scope\n );\n }\n common_scope = target_scope;\n }\n Ok(common_scope)\n }\n\n fn get_mut_reactive_ops<'a>(\n &'a mut self,\n op_scope: &OpScope,\n ) -> Result<&'a mut Vec>> {\n Self::get_mut_reactive_ops_internal(op_scope, &mut self.reactive_ops)\n }\n\n fn get_mut_reactive_ops_internal<'a>(\n op_scope: &OpScope,\n root_reactive_ops: &'a mut Vec>,\n ) -> Result<&'a mut Vec>> {\n let result = match &op_scope.parent {\n None => root_reactive_ops,\n Some((parent_op_scope, field_path)) => {\n let parent_reactive_ops =\n Self::get_mut_reactive_ops_internal(parent_op_scope, root_reactive_ops)?;\n // Reuse the last foreach if matched, otherwise create a new one.\n match parent_reactive_ops.last() {\n Some(spec::NamedSpec {\n spec: spec::ReactiveOpSpec::ForEach(foreach_spec),\n ..\n }) if &foreach_spec.field_path == field_path\n && foreach_spec.op_scope.name == op_scope.name => {}\n\n _ => {\n api_bail!(\"already out of op scope `{}`\", op_scope.name);\n }\n }\n match &mut parent_reactive_ops.last_mut().unwrap().spec {\n spec::ReactiveOpSpec::ForEach(foreach_spec) => &mut foreach_spec.op_scope.ops,\n _ => unreachable!(),\n }\n }\n };\n Ok(result)\n }\n}\n"], ["/cocoindex/src/execution/dumper.rs", "use crate::prelude::*;\n\nuse futures::{StreamExt, future::try_join_all};\nuse itertools::Itertools;\nuse serde::ser::SerializeSeq;\nuse sqlx::PgPool;\nuse std::path::{Path, PathBuf};\nuse yaml_rust2::YamlEmitter;\n\nuse super::evaluator::SourceRowEvaluationContext;\nuse super::memoization::EvaluationMemoryOptions;\nuse super::row_indexer;\nuse crate::base::{schema, value};\nuse crate::builder::plan::{AnalyzedImportOp, ExecutionPlan};\nuse crate::ops::interface::SourceExecutorListOptions;\nuse crate::utils::yaml_ser::YamlSerializer;\n\n#[derive(Debug, Clone, Deserialize)]\npub struct EvaluateAndDumpOptions {\n pub output_dir: String,\n pub use_cache: bool,\n}\n\nconst FILENAME_PREFIX_MAX_LENGTH: usize = 128;\n\nstruct TargetExportData<'a> {\n schema: &'a Vec,\n // The purpose is to make rows sorted by primary key.\n data: BTreeMap,\n}\n\nimpl Serialize for TargetExportData<'_> {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let mut seq = serializer.serialize_seq(Some(self.data.len()))?;\n for (_, values) in self.data.iter() {\n seq.serialize_element(&value::TypedFieldsValue {\n schema: self.schema,\n values_iter: values.fields.iter(),\n })?;\n }\n seq.end()\n }\n}\n\n#[derive(Serialize)]\nstruct SourceOutputData<'a> {\n key: value::TypedValue<'a>,\n\n #[serde(skip_serializing_if = \"Option::is_none\")]\n exports: Option>>,\n\n #[serde(skip_serializing_if = \"Option::is_none\")]\n error: Option,\n}\n\nstruct Dumper<'a> {\n plan: &'a ExecutionPlan,\n setup_execution_ctx: &'a exec_ctx::FlowSetupExecutionContext,\n schema: &'a schema::FlowSchema,\n pool: &'a PgPool,\n options: EvaluateAndDumpOptions,\n}\n\nimpl<'a> Dumper<'a> {\n async fn evaluate_source_entry<'b>(\n &'a self,\n import_op_idx: usize,\n import_op: &'a AnalyzedImportOp,\n key: &value::KeyValue,\n collected_values_buffer: &'b mut Vec>,\n ) -> Result>>>\n where\n 'a: 'b,\n {\n let data_builder = row_indexer::evaluate_source_entry_with_memory(\n &SourceRowEvaluationContext {\n plan: self.plan,\n import_op,\n schema: self.schema,\n key,\n import_op_idx,\n },\n self.setup_execution_ctx,\n EvaluationMemoryOptions {\n enable_cache: self.options.use_cache,\n evaluation_only: true,\n },\n self.pool,\n )\n .await?;\n\n let data_builder = if let Some(data_builder) = data_builder {\n data_builder\n } else {\n return Ok(None);\n };\n\n *collected_values_buffer = data_builder.collected_values;\n let exports = self\n .plan\n .export_ops\n .iter()\n .map(|export_op| -> Result<_> {\n let collector_idx = export_op.input.collector_idx as usize;\n let entry = (\n export_op.name.as_str(),\n TargetExportData {\n schema: &self.schema.root_op_scope.collectors[collector_idx]\n .spec\n .fields,\n data: collected_values_buffer[collector_idx]\n .iter()\n .map(|v| -> Result<_> {\n let key = row_indexer::extract_primary_key(\n &export_op.primary_key_def,\n v,\n )?;\n Ok((key, v))\n })\n .collect::>()?,\n },\n );\n Ok(entry)\n })\n .collect::>()?;\n Ok(Some(exports))\n }\n\n async fn evaluate_and_dump_source_entry(\n &self,\n import_op_idx: usize,\n import_op: &AnalyzedImportOp,\n key: value::KeyValue,\n file_path: PathBuf,\n ) -> Result<()> {\n let _permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n let mut collected_values_buffer = Vec::new();\n let (exports, error) = match self\n .evaluate_source_entry(import_op_idx, import_op, &key, &mut collected_values_buffer)\n .await\n {\n Ok(exports) => (exports, None),\n Err(e) => (None, Some(format!(\"{e:?}\"))),\n };\n let key_value = value::Value::from(key);\n let file_data = SourceOutputData {\n key: value::TypedValue {\n t: &import_op.primary_key_type,\n v: &key_value,\n },\n exports,\n error,\n };\n\n let yaml_output = {\n let mut yaml_output = String::new();\n let yaml_data = YamlSerializer::serialize(&file_data)?;\n let mut yaml_emitter = YamlEmitter::new(&mut yaml_output);\n yaml_emitter.multiline_strings(true);\n yaml_emitter.compact(true);\n yaml_emitter.dump(&yaml_data)?;\n yaml_output\n };\n tokio::fs::write(file_path, yaml_output).await?;\n\n Ok(())\n }\n\n async fn evaluate_and_dump_for_source(\n &self,\n import_op_idx: usize,\n import_op: &AnalyzedImportOp,\n ) -> Result<()> {\n let mut keys_by_filename_prefix: IndexMap> = IndexMap::new();\n\n let mut rows_stream = import_op.executor.list(&SourceExecutorListOptions {\n include_ordinal: false,\n });\n while let Some(rows) = rows_stream.next().await {\n for row in rows?.into_iter() {\n let mut s = row\n .key\n .to_strs()\n .into_iter()\n .map(|s| urlencoding::encode(&s).into_owned())\n .join(\":\");\n s.truncate(\n (0..(FILENAME_PREFIX_MAX_LENGTH - import_op.name.as_str().len()))\n .rev()\n .find(|i| s.is_char_boundary(*i))\n .unwrap_or(0),\n );\n keys_by_filename_prefix.entry(s).or_default().push(row.key);\n }\n }\n let output_dir = Path::new(&self.options.output_dir);\n let evaluate_futs =\n keys_by_filename_prefix\n .into_iter()\n .flat_map(|(filename_prefix, keys)| {\n let num_keys = keys.len();\n keys.into_iter().enumerate().map(move |(i, key)| {\n let extra_id = if num_keys > 1 {\n Cow::Owned(format!(\".{i}\"))\n } else {\n Cow::Borrowed(\"\")\n };\n let file_name =\n format!(\"{}@{}{}.yaml\", import_op.name, filename_prefix, extra_id);\n let file_path = output_dir.join(Path::new(&file_name));\n self.evaluate_and_dump_source_entry(\n import_op_idx,\n import_op,\n key,\n file_path,\n )\n })\n });\n try_join_all(evaluate_futs).await?;\n Ok(())\n }\n\n async fn evaluate_and_dump(&self) -> Result<()> {\n try_join_all(\n self.plan\n .import_ops\n .iter()\n .enumerate()\n .map(|(idx, import_op)| self.evaluate_and_dump_for_source(idx, import_op)),\n )\n .await?;\n Ok(())\n }\n}\n\npub async fn evaluate_and_dump(\n plan: &ExecutionPlan,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n schema: &schema::FlowSchema,\n options: EvaluateAndDumpOptions,\n pool: &PgPool,\n) -> Result<()> {\n let output_dir = Path::new(&options.output_dir);\n if output_dir.exists() {\n if !output_dir.is_dir() {\n return Err(anyhow::anyhow!(\"The path exists and is not a directory\"));\n }\n } else {\n tokio::fs::create_dir(output_dir).await?;\n }\n\n let dumper = Dumper {\n plan,\n setup_execution_ctx,\n schema,\n pool,\n options,\n };\n dumper.evaluate_and_dump().await\n}\n"], ["/cocoindex/src/ops/sources/amazon_s3.rs", "use crate::fields_value;\nuse async_stream::try_stream;\nuse aws_config::BehaviorVersion;\nuse aws_sdk_s3::Client;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse std::sync::Arc;\nuse urlencoding;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\n/// Decode a form-encoded URL string, treating '+' as spaces\nfn decode_form_encoded_url(input: &str) -> Result> {\n // Replace '+' with spaces (form encoding convention), then decode\n // This handles both cases correctly:\n // - Literal '+' would be encoded as '%2B' and remain unchanged after replacement\n // - Space would be encoded as '+' and become ' ' after replacement\n let with_spaces = input.replace(\"+\", \" \");\n Ok(urlencoding::decode(&with_spaces)?.into())\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n bucket_name: String,\n prefix: Option,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n sqs_queue_url: Option,\n}\n\nstruct SqsContext {\n client: aws_sdk_sqs::Client,\n queue_url: String,\n}\n\nimpl SqsContext {\n async fn delete_message(&self, receipt_handle: String) -> Result<()> {\n self.client\n .delete_message()\n .queue_url(&self.queue_url)\n .receipt_handle(receipt_handle)\n .send()\n .await?;\n Ok(())\n }\n}\n\nstruct Executor {\n client: Client,\n bucket_name: String,\n prefix: Option,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n sqs_context: Option>,\n}\n\nimpl Executor {\n fn is_excluded(&self, key: &str) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(key))\n }\n\n fn is_file_included(&self, key: &str) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(key))\n && !self.is_excluded(key)\n }\n}\n\nfn datetime_to_ordinal(dt: &aws_sdk_s3::primitives::DateTime) -> Ordinal {\n Ordinal(Some((dt.as_nanos() / 1000) as i64))\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n _options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n try_stream! {\n let mut continuation_token = None;\n loop {\n let mut req = self.client\n .list_objects_v2()\n .bucket(&self.bucket_name);\n if let Some(ref p) = self.prefix {\n req = req.prefix(p);\n }\n if let Some(ref token) = continuation_token {\n req = req.continuation_token(token);\n }\n let resp = req.send().await?;\n if let Some(contents) = &resp.contents {\n let mut batch = Vec::new();\n for obj in contents {\n if let Some(key) = obj.key() {\n // Only include files (not folders)\n if key.ends_with('/') { continue; }\n let include = self.included_glob_set\n .as_ref()\n .map(|gs| gs.is_match(key))\n .unwrap_or(true);\n let exclude = self.excluded_glob_set\n .as_ref()\n .map(|gs| gs.is_match(key))\n .unwrap_or(false);\n if include && !exclude {\n batch.push(PartialSourceRowMetadata {\n key: KeyValue::Str(key.to_string().into()),\n ordinal: obj.last_modified().map(datetime_to_ordinal),\n });\n }\n }\n }\n if !batch.is_empty() {\n yield batch;\n }\n }\n if resp.is_truncated == Some(true) {\n continuation_token = resp.next_continuation_token.clone().map(|s| s.to_string());\n } else {\n break;\n }\n }\n }.boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let key_str = key.str_value()?;\n if !self.is_file_included(key_str) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n let resp = self\n .client\n .get_object()\n .bucket(&self.bucket_name)\n .key(key_str.as_ref())\n .send()\n .await;\n let obj = match resp {\n Err(e) if e.as_service_error().is_some_and(|e| e.is_no_such_key()) => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n r => r?,\n };\n let ordinal = if options.include_ordinal {\n obj.last_modified().map(datetime_to_ordinal)\n } else {\n None\n };\n let value = if options.include_value {\n let bytes = obj.body.collect().await?.into_bytes();\n Some(SourceValue::Existence(if self.binary {\n fields_value!(bytes.to_vec())\n } else {\n fields_value!(String::from_utf8_lossy(&bytes).to_string())\n }))\n } else {\n None\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n let sqs_context = if let Some(sqs_context) = &self.sqs_context {\n sqs_context\n } else {\n return Ok(None);\n };\n let stream = stream! {\n loop {\n match self.poll_sqs(sqs_context).await {\n Ok(messages) => {\n for message in messages {\n yield Ok(message);\n }\n }\n Err(e) => {\n yield Err(e);\n }\n };\n }\n };\n Ok(Some(stream.boxed()))\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3EventNotification {\n #[serde(default, rename = \"Records\")]\n pub records: Vec,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3EventRecord {\n #[serde(rename = \"eventName\")]\n pub event_name: String,\n pub s3: Option,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Entity {\n pub bucket: S3Bucket,\n pub object: S3Object,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Bucket {\n pub name: String,\n}\n\n#[derive(Debug, Deserialize)]\npub struct S3Object {\n pub key: String,\n}\n\nimpl Executor {\n async fn poll_sqs(&self, sqs_context: &Arc) -> Result> {\n let resp = sqs_context\n .client\n .receive_message()\n .queue_url(&sqs_context.queue_url)\n .max_number_of_messages(10)\n .wait_time_seconds(20)\n .send()\n .await?;\n let messages = if let Some(messages) = resp.messages {\n messages\n } else {\n return Ok(Vec::new());\n };\n let mut change_messages = vec![];\n for message in messages.into_iter() {\n if let Some(body) = message.body {\n let notification: S3EventNotification = serde_json::from_str(&body)?;\n let mut changes = vec![];\n for record in notification.records {\n let s3 = if let Some(s3) = record.s3 {\n s3\n } else {\n continue;\n };\n if s3.bucket.name != self.bucket_name {\n continue;\n }\n if !self\n .prefix\n .as_ref()\n .is_none_or(|prefix| s3.object.key.starts_with(prefix))\n {\n continue;\n }\n if record.event_name.starts_with(\"ObjectCreated:\")\n || record.event_name.starts_with(\"ObjectRemoved:\")\n {\n let decoded_key = decode_form_encoded_url(&s3.object.key)?;\n changes.push(SourceChange {\n key: KeyValue::Str(decoded_key),\n data: None,\n });\n }\n }\n if let Some(receipt_handle) = message.receipt_handle {\n if !changes.is_empty() {\n let sqs_context = sqs_context.clone();\n change_messages.push(SourceChangeMessage {\n changes,\n ack_fn: Some(Box::new(move || {\n async move { sqs_context.delete_message(receipt_handle).await }\n .boxed()\n })),\n });\n } else {\n sqs_context.delete_message(receipt_handle).await?;\n }\n }\n }\n }\n Ok(change_messages)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"AmazonS3\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n let config = aws_config::load_defaults(BehaviorVersion::latest()).await;\n Ok(Box::new(Executor {\n client: Client::new(&config),\n bucket_name: spec.bucket_name,\n prefix: spec.prefix,\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n sqs_context: spec.sqs_queue_url.map(|url| {\n Arc::new(SqsContext {\n client: aws_sdk_sqs::Client::new(&config),\n queue_url: url,\n })\n }),\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/ops/sources/google_drive.rs", "use chrono::Duration;\nuse google_drive3::{\n DriveHub,\n api::{File, Scope},\n yup_oauth2::{ServiceAccountAuthenticator, read_service_account_key},\n};\nuse http_body_util::BodyExt;\nuse hyper_rustls::HttpsConnector;\nuse hyper_util::client::legacy::connect::HttpConnector;\nuse phf::phf_map;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\nstruct ExportMimeType {\n text: &'static str,\n binary: &'static str,\n}\n\nconst FOLDER_MIME_TYPE: &str = \"application/vnd.google-apps.folder\";\nconst FILE_MIME_TYPE: &str = \"application/vnd.google-apps.file\";\nstatic EXPORT_MIME_TYPES: phf::Map<&'static str, ExportMimeType> = phf_map! {\n \"application/vnd.google-apps.document\" =>\n ExportMimeType {\n text: \"text/markdown\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.spreadsheet\" =>\n ExportMimeType {\n text: \"text/csv\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.presentation\" =>\n ExportMimeType {\n text: \"text/plain\",\n binary: \"application/pdf\",\n },\n \"application/vnd.google-apps.drawing\" =>\n ExportMimeType {\n text: \"image/svg+xml\",\n binary: \"image/png\",\n },\n \"application/vnd.google-apps.script\" =>\n ExportMimeType {\n text: \"application/vnd.google-apps.script+json\",\n binary: \"application/vnd.google-apps.script+json\",\n },\n};\n\nfn is_supported_file_type(mime_type: &str) -> bool {\n !mime_type.starts_with(\"application/vnd.google-apps.\")\n || EXPORT_MIME_TYPES.contains_key(mime_type)\n || mime_type == FILE_MIME_TYPE\n}\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n service_account_credential_path: String,\n binary: bool,\n root_folder_ids: Vec,\n recent_changes_poll_interval: Option,\n}\n\nstruct Executor {\n drive_hub: DriveHub>,\n binary: bool,\n root_folder_ids: IndexSet>,\n recent_updates_poll_interval: Option,\n}\n\nimpl Executor {\n async fn new(spec: Spec) -> Result {\n let service_account_key =\n read_service_account_key(spec.service_account_credential_path).await?;\n let auth = ServiceAccountAuthenticator::builder(service_account_key)\n .build()\n .await?;\n let client =\n hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new())\n .build(\n hyper_rustls::HttpsConnectorBuilder::new()\n .with_provider_and_native_roots(\n rustls::crypto::aws_lc_rs::default_provider(),\n )?\n .https_only()\n .enable_http2()\n .build(),\n );\n let drive_hub = DriveHub::new(client, auth);\n Ok(Self {\n drive_hub,\n binary: spec.binary,\n root_folder_ids: spec.root_folder_ids.into_iter().map(Arc::from).collect(),\n recent_updates_poll_interval: spec.recent_changes_poll_interval,\n })\n }\n}\n\nfn escape_string(s: &str) -> String {\n let mut escaped = String::with_capacity(s.len());\n for c in s.chars() {\n match c {\n '\\'' | '\\\\' => escaped.push('\\\\'),\n _ => {}\n }\n escaped.push(c);\n }\n escaped\n}\n\nconst CUTOFF_TIME_BUFFER: Duration = Duration::seconds(1);\nimpl Executor {\n fn visit_file(\n &self,\n file: File,\n new_folder_ids: &mut Vec>,\n seen_ids: &mut HashSet>,\n ) -> Result> {\n if file.trashed == Some(true) {\n return Ok(None);\n }\n let (id, mime_type) = match (file.id, file.mime_type) {\n (Some(id), Some(mime_type)) => (Arc::::from(id), mime_type),\n (id, mime_type) => {\n warn!(\"Skipping file with incomplete metadata: id={id:?}, mime_type={mime_type:?}\",);\n return Ok(None);\n }\n };\n if !seen_ids.insert(id.clone()) {\n return Ok(None);\n }\n let result = if mime_type == FOLDER_MIME_TYPE {\n new_folder_ids.push(id);\n None\n } else if is_supported_file_type(&mime_type) {\n Some(PartialSourceRowMetadata {\n key: KeyValue::Str(id),\n ordinal: file.modified_time.map(|t| t.try_into()).transpose()?,\n })\n } else {\n None\n };\n Ok(result)\n }\n\n async fn list_files(\n &self,\n folder_id: &str,\n fields: &str,\n next_page_token: &mut Option,\n ) -> Result> {\n let query = format!(\"'{}' in parents\", escape_string(folder_id));\n let mut list_call = self\n .drive_hub\n .files()\n .list()\n .add_scope(Scope::Readonly)\n .q(&query)\n .param(\"fields\", fields);\n if let Some(next_page_token) = &next_page_token {\n list_call = list_call.page_token(next_page_token);\n }\n let (_, files) = list_call.doit().await?;\n *next_page_token = files.next_page_token;\n let file_iter = files.files.into_iter().flat_map(|file| file.into_iter());\n Ok(file_iter)\n }\n\n fn make_cutoff_time(\n most_recent_modified_time: Option>,\n list_start_time: DateTime,\n ) -> DateTime {\n let safe_upperbound = list_start_time - CUTOFF_TIME_BUFFER;\n most_recent_modified_time\n .map(|t| t.min(safe_upperbound))\n .unwrap_or(safe_upperbound)\n }\n\n async fn get_recent_updates(\n &self,\n cutoff_time: &mut DateTime,\n ) -> Result {\n let mut page_size: i32 = 10;\n let mut next_page_token: Option = None;\n let mut changes = Vec::new();\n let mut most_recent_modified_time = None;\n let start_time = Utc::now();\n 'paginate: loop {\n let mut list_call = self\n .drive_hub\n .files()\n .list()\n .add_scope(Scope::Readonly)\n .param(\"fields\", \"files(id,modifiedTime,parents,trashed)\")\n .order_by(\"modifiedTime desc\")\n .page_size(page_size);\n if let Some(token) = next_page_token {\n list_call = list_call.page_token(token.as_str());\n }\n let (_, files) = list_call.doit().await?;\n for file in files.files.into_iter().flat_map(|files| files.into_iter()) {\n let modified_time = file.modified_time.unwrap_or_default();\n if most_recent_modified_time.is_none() {\n most_recent_modified_time = Some(modified_time);\n }\n if modified_time <= *cutoff_time {\n break 'paginate;\n }\n let file_id = file.id.ok_or_else(|| anyhow!(\"File has no id\"))?;\n if self.is_file_covered(&file_id).await? {\n changes.push(SourceChange {\n key: KeyValue::Str(Arc::from(file_id)),\n data: None,\n });\n }\n }\n if let Some(token) = files.next_page_token {\n next_page_token = Some(token);\n } else {\n break;\n }\n // List more in a page since 2nd.\n page_size = 100;\n }\n *cutoff_time = Self::make_cutoff_time(most_recent_modified_time, start_time);\n Ok(SourceChangeMessage {\n changes,\n ack_fn: None,\n })\n }\n\n async fn is_file_covered(&self, file_id: &str) -> Result {\n let mut next_file_id = Some(Cow::Borrowed(file_id));\n while let Some(file_id) = next_file_id {\n if self.root_folder_ids.contains(file_id.as_ref()) {\n return Ok(true);\n }\n let (_, file) = self\n .drive_hub\n .files()\n .get(&file_id)\n .add_scope(Scope::Readonly)\n .param(\"fields\", \"parents\")\n .doit()\n .await?;\n next_file_id = file\n .parents\n .into_iter()\n .flat_map(|parents| parents.into_iter())\n .map(Cow::Owned)\n .next();\n }\n Ok(false)\n }\n}\n\ntrait ResultExt {\n type OptResult;\n fn or_not_found(self) -> Self::OptResult;\n}\n\nimpl ResultExt for google_drive3::Result {\n type OptResult = google_drive3::Result>;\n\n fn or_not_found(self) -> Self::OptResult {\n match self {\n Ok(value) => Ok(Some(value)),\n Err(google_drive3::Error::BadRequest(err_msg))\n if err_msg\n .get(\"error\")\n .and_then(|e| e.get(\"code\"))\n .and_then(|code| code.as_i64())\n == Some(404) =>\n {\n Ok(None)\n }\n Err(e) => Err(e),\n }\n }\n}\n\nfn optional_modified_time(include_ordinal: bool) -> &'static str {\n if include_ordinal { \",modifiedTime\" } else { \"\" }\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n let mut seen_ids = HashSet::new();\n let mut folder_ids = self.root_folder_ids.clone();\n let fields = format!(\n \"files(id,name,mimeType,trashed{})\",\n optional_modified_time(options.include_ordinal)\n );\n let mut new_folder_ids = Vec::new();\n try_stream! {\n while let Some(folder_id) = folder_ids.pop() {\n let mut next_page_token = None;\n loop {\n let mut curr_rows = Vec::new();\n let files = self\n .list_files(&folder_id, &fields, &mut next_page_token)\n .await?;\n for file in files {\n curr_rows.extend(self.visit_file(file, &mut new_folder_ids, &mut seen_ids)?);\n }\n if !curr_rows.is_empty() {\n yield curr_rows;\n }\n if next_page_token.is_none() {\n break;\n }\n }\n folder_ids.extend(new_folder_ids.drain(..).rev());\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let file_id = key.str_value()?;\n let fields = format!(\n \"id,name,mimeType,trashed{}\",\n optional_modified_time(options.include_ordinal)\n );\n let resp = self\n .drive_hub\n .files()\n .get(file_id)\n .add_scope(Scope::Readonly)\n .param(\"fields\", &fields)\n .doit()\n .await\n .or_not_found()?;\n let file = match resp {\n Some((_, file)) if file.trashed != Some(true) => file,\n _ => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n };\n let ordinal = if options.include_ordinal {\n file.modified_time.map(|t| t.try_into()).transpose()?\n } else {\n None\n };\n let type_n_body = if let Some(export_mime_type) = file\n .mime_type\n .as_ref()\n .and_then(|mime_type| EXPORT_MIME_TYPES.get(mime_type.as_str()))\n {\n let target_mime_type = if self.binary {\n export_mime_type.binary\n } else {\n export_mime_type.text\n };\n self.drive_hub\n .files()\n .export(file_id, target_mime_type)\n .add_scope(Scope::Readonly)\n .doit()\n .await\n .or_not_found()?\n .map(|content| (Some(target_mime_type.to_string()), content.into_body()))\n } else {\n self.drive_hub\n .files()\n .get(file_id)\n .add_scope(Scope::Readonly)\n .param(\"alt\", \"media\")\n .doit()\n .await\n .or_not_found()?\n .map(|(resp, _)| (file.mime_type, resp.into_body()))\n };\n let value = match type_n_body {\n Some((mime_type, resp_body)) => {\n let content = resp_body.collect().await?;\n\n let fields = vec![\n file.name.unwrap_or_default().into(),\n mime_type.into(),\n if self.binary {\n content.to_bytes().to_vec().into()\n } else {\n String::from_utf8_lossy(&content.to_bytes())\n .to_string()\n .into()\n },\n ];\n Some(SourceValue::Existence(FieldValues { fields }))\n }\n None => None,\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n let poll_interval = if let Some(poll_interval) = self.recent_updates_poll_interval {\n poll_interval\n } else {\n return Ok(None);\n };\n let mut cutoff_time = Utc::now() - CUTOFF_TIME_BUFFER;\n let mut interval = tokio::time::interval(poll_interval);\n interval.tick().await;\n let stream = stream! {\n loop {\n interval.tick().await;\n yield self.get_recent_updates(&mut cutoff_time).await;\n }\n };\n Ok(Some(stream.boxed()))\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"GoogleDrive\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n schema_builder.add_field(FieldSchema::new(\n \"file_id\",\n make_output_type(BasicValueType::Str),\n ));\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n let mime_type_field = schema_builder.add_field(FieldSchema::new(\n \"mime_type\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n )\n .with_attr(\n field_attrs::CONTENT_MIME_TYPE,\n serde_json::to_value(mime_type_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(spec).await?))\n }\n}\n"], ["/cocoindex/src/base/json_schema.rs", "use crate::prelude::*;\n\nuse crate::utils::immutable::RefList;\nuse schemars::schema::{\n ArrayValidation, InstanceType, ObjectValidation, Schema, SchemaObject, SingleOrVec,\n SubschemaValidation,\n};\nuse std::fmt::Write;\n\npub struct ToJsonSchemaOptions {\n /// If true, mark all fields as required.\n /// Use union type (with `null`) for optional fields instead.\n /// Models like OpenAI will reject the schema if a field is not required.\n pub fields_always_required: bool,\n\n /// If true, the JSON schema supports the `format` keyword.\n pub supports_format: bool,\n\n /// If true, extract descriptions to a separate extra instruction.\n pub extract_descriptions: bool,\n\n /// If true, the top level must be a JSON object.\n pub top_level_must_be_object: bool,\n}\n\nstruct JsonSchemaBuilder {\n options: ToJsonSchemaOptions,\n extra_instructions_per_field: IndexMap,\n}\n\nimpl JsonSchemaBuilder {\n fn new(options: ToJsonSchemaOptions) -> Self {\n Self {\n options,\n extra_instructions_per_field: IndexMap::new(),\n }\n }\n\n fn set_description(\n &mut self,\n schema: &mut SchemaObject,\n description: impl ToString,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) {\n if self.options.extract_descriptions {\n let mut fields: Vec<_> = field_path.iter().map(|f| f.as_str()).collect();\n fields.reverse();\n self.extra_instructions_per_field\n .insert(fields.join(\".\"), description.to_string());\n } else {\n schema.metadata.get_or_insert_default().description = Some(description.to_string());\n }\n }\n\n fn for_basic_value_type(\n &mut self,\n basic_type: &schema::BasicValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n let mut schema = SchemaObject::default();\n match basic_type {\n schema::BasicValueType::Str => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n }\n schema::BasicValueType::Bytes => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n }\n schema::BasicValueType::Bool => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Boolean)));\n }\n schema::BasicValueType::Int64 => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Integer)));\n }\n schema::BasicValueType::Float32 | schema::BasicValueType::Float64 => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Number)));\n }\n schema::BasicValueType::Range => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Array)));\n schema.array = Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n SchemaObject {\n instance_type: Some(SingleOrVec::Single(Box::new(\n InstanceType::Integer,\n ))),\n ..Default::default()\n }\n .into(),\n ))),\n min_items: Some(2),\n max_items: Some(2),\n ..Default::default()\n }));\n self.set_description(\n &mut schema,\n \"A range represented by a list of two positions, start pos (inclusive), end pos (exclusive).\",\n field_path,\n );\n }\n schema::BasicValueType::Uuid => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"uuid\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A UUID, e.g. 123e4567-e89b-12d3-a456-426614174000\",\n field_path,\n );\n }\n schema::BasicValueType::Date => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A date in YYYY-MM-DD format, e.g. 2025-03-27\",\n field_path,\n );\n }\n schema::BasicValueType::Time => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A time in HH:MM:SS format, e.g. 13:32:12\",\n field_path,\n );\n }\n schema::BasicValueType::LocalDateTime => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date-time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"Date time without timezone offset in YYYY-MM-DDTHH:MM:SS format, e.g. 2025-03-27T13:32:12\",\n field_path,\n );\n }\n schema::BasicValueType::OffsetDateTime => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"date-time\".to_string());\n }\n self.set_description(\n &mut schema,\n \"Date time with timezone offset in RFC3339, e.g. 2025-03-27T13:32:12Z, 2025-03-27T07:32:12.313-06:00\",\n field_path,\n );\n }\n &schema::BasicValueType::TimeDelta => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::String)));\n if self.options.supports_format {\n schema.format = Some(\"duration\".to_string());\n }\n self.set_description(\n &mut schema,\n \"A duration, e.g. 'PT1H2M3S' (ISO 8601) or '1 day 2 hours 3 seconds'\",\n field_path,\n );\n }\n schema::BasicValueType::Json => {\n // Can be any value. No type constraint.\n }\n schema::BasicValueType::Vector(s) => {\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Array)));\n schema.array = Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n self.for_basic_value_type(&s.element_type, field_path)\n .into(),\n ))),\n min_items: s.dimension.and_then(|d| u32::try_from(d).ok()),\n max_items: s.dimension.and_then(|d| u32::try_from(d).ok()),\n ..Default::default()\n }));\n }\n schema::BasicValueType::Union(s) => {\n schema.subschemas = Some(Box::new(SubschemaValidation {\n one_of: Some(\n s.types\n .iter()\n .map(|t| Schema::Object(self.for_basic_value_type(t, field_path)))\n .collect(),\n ),\n ..Default::default()\n }));\n }\n }\n schema\n }\n\n fn for_struct_schema(\n &mut self,\n struct_schema: &schema::StructSchema,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n let mut schema = SchemaObject::default();\n if let Some(description) = &struct_schema.description {\n self.set_description(&mut schema, description, field_path);\n }\n schema.instance_type = Some(SingleOrVec::Single(Box::new(InstanceType::Object)));\n schema.object = Some(Box::new(ObjectValidation {\n properties: struct_schema\n .fields\n .iter()\n .map(|f| {\n let mut schema =\n self.for_enriched_value_type(&f.value_type, field_path.prepend(&f.name));\n if self.options.fields_always_required && f.value_type.nullable {\n if let Some(instance_type) = &mut schema.instance_type {\n let mut types = match instance_type {\n SingleOrVec::Single(t) => vec![**t],\n SingleOrVec::Vec(t) => std::mem::take(t),\n };\n types.push(InstanceType::Null);\n *instance_type = SingleOrVec::Vec(types);\n }\n }\n (f.name.to_string(), schema.into())\n })\n .collect(),\n required: struct_schema\n .fields\n .iter()\n .filter(|&f| (self.options.fields_always_required || !f.value_type.nullable))\n .map(|f| f.name.to_string())\n .collect(),\n additional_properties: Some(Schema::Bool(false).into()),\n ..Default::default()\n }));\n schema\n }\n\n fn for_value_type(\n &mut self,\n value_type: &schema::ValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n match value_type {\n schema::ValueType::Basic(b) => self.for_basic_value_type(b, field_path),\n schema::ValueType::Struct(s) => self.for_struct_schema(s, field_path),\n schema::ValueType::Table(c) => SchemaObject {\n instance_type: Some(SingleOrVec::Single(Box::new(InstanceType::Array))),\n array: Some(Box::new(ArrayValidation {\n items: Some(SingleOrVec::Single(Box::new(\n self.for_struct_schema(&c.row, field_path).into(),\n ))),\n ..Default::default()\n })),\n ..Default::default()\n },\n }\n }\n\n fn for_enriched_value_type(\n &mut self,\n enriched_value_type: &schema::EnrichedValueType,\n field_path: RefList<'_, &'_ spec::FieldName>,\n ) -> SchemaObject {\n self.for_value_type(&enriched_value_type.typ, field_path)\n }\n\n fn build_extra_instructions(&self) -> Result> {\n if self.extra_instructions_per_field.is_empty() {\n return Ok(None);\n }\n\n let mut instructions = String::new();\n write!(&mut instructions, \"Instructions for specific fields:\\n\\n\")?;\n for (field_path, instruction) in self.extra_instructions_per_field.iter() {\n write!(\n &mut instructions,\n \"- {}: {}\\n\\n\",\n if field_path.is_empty() {\n \"(root object)\"\n } else {\n field_path.as_str()\n },\n instruction\n )?;\n }\n Ok(Some(instructions))\n }\n}\n\npub struct ValueExtractor {\n value_type: schema::ValueType,\n object_wrapper_field_name: Option,\n}\n\nimpl ValueExtractor {\n pub fn extract_value(&self, json_value: serde_json::Value) -> Result {\n let unwrapped_json_value =\n if let Some(object_wrapper_field_name) = &self.object_wrapper_field_name {\n match json_value {\n serde_json::Value::Object(mut o) => o\n .remove(object_wrapper_field_name)\n .unwrap_or(serde_json::Value::Null),\n _ => {\n bail!(\"Field `{}` not found\", object_wrapper_field_name)\n }\n }\n } else {\n json_value\n };\n let result = value::Value::from_json(unwrapped_json_value, &self.value_type)?;\n Ok(result)\n }\n}\n\npub struct BuildJsonSchemaOutput {\n pub schema: SchemaObject,\n pub extra_instructions: Option,\n pub value_extractor: ValueExtractor,\n}\n\npub fn build_json_schema(\n value_type: schema::EnrichedValueType,\n options: ToJsonSchemaOptions,\n) -> Result {\n let mut builder = JsonSchemaBuilder::new(options);\n let (schema, object_wrapper_field_name) = if builder.options.top_level_must_be_object\n && !matches!(value_type.typ, schema::ValueType::Struct(_))\n {\n let object_wrapper_field_name = \"value\".to_string();\n let wrapper_struct = schema::StructSchema {\n fields: Arc::new(vec![schema::FieldSchema {\n name: object_wrapper_field_name.clone(),\n value_type: value_type.clone(),\n }]),\n description: None,\n };\n (\n builder.for_struct_schema(&wrapper_struct, RefList::Nil),\n Some(object_wrapper_field_name),\n )\n } else {\n (\n builder.for_enriched_value_type(&value_type, RefList::Nil),\n None,\n )\n };\n Ok(BuildJsonSchemaOutput {\n schema,\n extra_instructions: builder.build_extra_instructions()?,\n value_extractor: ValueExtractor {\n value_type: value_type.typ,\n object_wrapper_field_name,\n },\n })\n}\n"], ["/cocoindex/src/execution/memoization.rs", "use anyhow::{Result, bail};\nuse serde::{Deserialize, Serialize};\nuse std::{\n borrow::Cow,\n collections::HashMap,\n future::Future,\n sync::{Arc, Mutex},\n};\n\nuse crate::{\n base::{schema, value},\n service::error::{SharedError, SharedResultExtRef},\n utils::fingerprint::{Fingerprint, Fingerprinter},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StoredCacheEntry {\n time_sec: i64,\n value: serde_json::Value,\n}\n#[derive(Debug, Clone, Serialize, Deserialize, Default)]\npub struct StoredMemoizationInfo {\n #[serde(default, skip_serializing_if = \"HashMap::is_empty\")]\n pub cache: HashMap,\n\n #[serde(default, skip_serializing_if = \"HashMap::is_empty\")]\n pub uuids: HashMap>,\n\n #[serde(default, skip_serializing_if = \"Option::is_none\")]\n pub content_hash: Option,\n}\n\npub type CacheEntryCell = Arc>>;\nenum CacheData {\n /// Existing entry in previous runs, but not in current run yet.\n Previous(serde_json::Value),\n /// Value appeared in current run.\n Current(CacheEntryCell),\n}\n\nstruct CacheEntry {\n time: chrono::DateTime,\n data: CacheData,\n}\n\n#[derive(Default)]\nstruct UuidEntry {\n uuids: Vec,\n num_current: usize,\n}\n\nimpl UuidEntry {\n fn new(uuids: Vec) -> Self {\n Self {\n uuids,\n num_current: 0,\n }\n }\n\n fn into_stored(self) -> Option> {\n if self.num_current == 0 {\n return None;\n }\n let mut uuids = self.uuids;\n if self.num_current < uuids.len() {\n uuids.truncate(self.num_current);\n }\n Some(uuids)\n }\n}\n\npub struct EvaluationMemoryOptions {\n pub enable_cache: bool,\n\n /// If true, it's for evaluation only.\n /// In this mode, we don't memoize anything.\n pub evaluation_only: bool,\n}\n\npub struct EvaluationMemory {\n current_time: chrono::DateTime,\n cache: Option>>,\n uuids: Mutex>,\n evaluation_only: bool,\n}\n\nimpl EvaluationMemory {\n pub fn new(\n current_time: chrono::DateTime,\n stored_info: Option,\n options: EvaluationMemoryOptions,\n ) -> Self {\n let (stored_cache, stored_uuids) = stored_info\n .map(|stored_info| (stored_info.cache, stored_info.uuids))\n .unzip();\n Self {\n current_time,\n cache: options.enable_cache.then(|| {\n Mutex::new(\n stored_cache\n .into_iter()\n .flat_map(|iter| iter.into_iter())\n .map(|(k, e)| {\n (\n k,\n CacheEntry {\n time: chrono::DateTime::from_timestamp(e.time_sec, 0)\n .unwrap_or(chrono::DateTime::::MIN_UTC),\n data: CacheData::Previous(e.value),\n },\n )\n })\n .collect(),\n )\n }),\n uuids: Mutex::new(\n (!options.evaluation_only)\n .then_some(stored_uuids)\n .flatten()\n .into_iter()\n .flat_map(|iter| iter.into_iter())\n .map(|(k, v)| (k, UuidEntry::new(v)))\n .collect(),\n ),\n evaluation_only: options.evaluation_only,\n }\n }\n\n pub fn into_stored(self) -> Result {\n if self.evaluation_only {\n bail!(\"For evaluation only, cannot convert to stored MemoizationInfo\");\n }\n let cache = if let Some(cache) = self.cache {\n cache\n .into_inner()?\n .into_iter()\n .filter_map(|(k, e)| match e.data {\n CacheData::Previous(_) => None,\n CacheData::Current(entry) => match entry.get() {\n Some(Ok(v)) => Some(serde_json::to_value(v).map(|value| {\n (\n k,\n StoredCacheEntry {\n time_sec: e.time.timestamp(),\n value,\n },\n )\n })),\n _ => None,\n },\n })\n .collect::>()?\n } else {\n bail!(\"Cache is disabled, cannot convert to stored MemoizationInfo\");\n };\n let uuids = self\n .uuids\n .into_inner()?\n .into_iter()\n .filter_map(|(k, v)| v.into_stored().map(|uuids| (k, uuids)))\n .collect();\n Ok(StoredMemoizationInfo {\n cache,\n uuids,\n content_hash: None,\n })\n }\n\n pub fn get_cache_entry(\n &self,\n key: impl FnOnce() -> Result,\n typ: &schema::ValueType,\n ttl: Option,\n ) -> Result> {\n let mut cache = if let Some(cache) = &self.cache {\n cache.lock().unwrap()\n } else {\n return Ok(None);\n };\n let result = match cache.entry(key()?) {\n std::collections::hash_map::Entry::Occupied(mut entry)\n if !ttl\n .map(|ttl| entry.get().time + ttl < self.current_time)\n .unwrap_or(false) =>\n {\n let entry_mut = &mut entry.get_mut();\n match &mut entry_mut.data {\n CacheData::Previous(value) => {\n let value = value::Value::from_json(std::mem::take(value), typ)?;\n let cell = Arc::new(tokio::sync::OnceCell::from(Ok(value)));\n let time = entry_mut.time;\n entry.insert(CacheEntry {\n time,\n data: CacheData::Current(cell.clone()),\n });\n cell\n }\n CacheData::Current(cell) => cell.clone(),\n }\n }\n entry => {\n let cell = Arc::new(tokio::sync::OnceCell::new());\n entry.insert_entry(CacheEntry {\n time: self.current_time,\n data: CacheData::Current(cell.clone()),\n });\n cell\n }\n };\n Ok(Some(result))\n }\n\n pub fn next_uuid(&self, key: Fingerprint) -> Result {\n let mut uuids = self.uuids.lock().unwrap();\n\n let entry = uuids.entry(key).or_default();\n let uuid = if self.evaluation_only {\n let fp = Fingerprinter::default()\n .with(&key)?\n .with(&entry.num_current)?\n .into_fingerprint();\n uuid::Uuid::new_v8(fp.0)\n } else if entry.num_current < entry.uuids.len() {\n entry.uuids[entry.num_current]\n } else {\n let uuid = uuid::Uuid::new_v4();\n entry.uuids.push(uuid);\n uuid\n };\n entry.num_current += 1;\n Ok(uuid)\n }\n}\n\npub async fn evaluate_with_cell(\n cell: Option<&CacheEntryCell>,\n compute: impl FnOnce() -> Fut,\n) -> Result>\nwhere\n Fut: Future>,\n{\n let result = match cell {\n Some(cell) => Cow::Borrowed(\n cell.get_or_init(|| {\n let fut = compute();\n async move { fut.await.map_err(SharedError::new) }\n })\n .await\n .std_result()?,\n ),\n None => Cow::Owned(compute().await?),\n };\n Ok(result)\n}\n"], ["/cocoindex/src/execution/live_updater.rs", "use crate::{execution::stats::UpdateStats, prelude::*};\n\nuse super::stats;\nuse futures::future::try_join_all;\nuse sqlx::PgPool;\nuse tokio::{sync::watch, task::JoinSet, time::MissedTickBehavior};\n\npub struct FlowLiveUpdaterUpdates {\n pub active_sources: Vec,\n pub updated_sources: Vec,\n}\nstruct FlowLiveUpdaterStatus {\n pub active_source_idx: BTreeSet,\n pub source_updates_num: Vec,\n}\n\nstruct UpdateReceiveState {\n status_rx: watch::Receiver,\n last_num_source_updates: Vec,\n is_done: bool,\n}\n\npub struct FlowLiveUpdater {\n flow_ctx: Arc,\n join_set: Mutex>>>,\n stats_per_task: Vec>,\n recv_state: tokio::sync::Mutex,\n num_remaining_tasks_rx: watch::Receiver,\n\n // Hold tx to avoid dropping the sender.\n _status_tx: watch::Sender,\n _num_remaining_tasks_tx: watch::Sender,\n}\n\n#[derive(Debug, Clone, Default, Serialize, Deserialize)]\npub struct FlowLiveUpdaterOptions {\n /// If true, the updater will keep refreshing the index.\n /// Otherwise, it will only apply changes from the source up to the current time.\n pub live_mode: bool,\n\n /// If true, stats will be printed to the console.\n pub print_stats: bool,\n}\n\nconst REPORT_INTERVAL: std::time::Duration = std::time::Duration::from_secs(10);\n\nstruct SharedAckFn Result<()>> {\n count: usize,\n ack_fn: Option,\n}\n\nimpl Result<()>> SharedAckFn {\n fn new(count: usize, ack_fn: AckAsyncFn) -> Self {\n Self {\n count,\n ack_fn: Some(ack_fn),\n }\n }\n\n async fn ack(v: &Mutex) -> Result<()> {\n let ack_fn = {\n let mut v = v.lock().unwrap();\n v.count -= 1;\n if v.count > 0 { None } else { v.ack_fn.take() }\n };\n if let Some(ack_fn) = ack_fn {\n ack_fn().await?;\n }\n Ok(())\n }\n}\n\nstruct SourceUpdateTask {\n source_idx: usize,\n\n flow: Arc,\n plan: Arc,\n execution_ctx: Arc>,\n source_update_stats: Arc,\n pool: PgPool,\n options: FlowLiveUpdaterOptions,\n\n status_tx: watch::Sender,\n num_remaining_tasks_tx: watch::Sender,\n}\n\nimpl Drop for SourceUpdateTask {\n fn drop(&mut self) {\n self.status_tx.send_modify(|update| {\n update.active_source_idx.remove(&self.source_idx);\n });\n self.num_remaining_tasks_tx.send_modify(|update| {\n *update -= 1;\n });\n }\n}\n\nimpl SourceUpdateTask {\n async fn run(self) -> Result<()> {\n let source_idx = self.source_idx;\n let source_context = self\n .execution_ctx\n .get_source_indexing_context(&self.flow, source_idx, &self.pool)\n .await?;\n\n let import_op = &self.plan.import_ops[source_idx];\n\n let report_stats = |stats: &stats::UpdateStats, kind: &str| {\n self.source_update_stats.merge(stats);\n if self.options.print_stats {\n println!(\n \"{}.{} ({kind}): {}\",\n self.flow.flow_instance.name, import_op.name, stats\n );\n } else {\n trace!(\n \"{}.{} ({kind}): {}\",\n self.flow.flow_instance.name, import_op.name, stats\n );\n }\n };\n\n let mut futs: Vec>> = Vec::new();\n\n // Deal with change streams.\n if self.options.live_mode {\n if let Some(change_stream) = import_op.executor.change_stream().await? {\n let change_stream_stats = Arc::new(stats::UpdateStats::default());\n futs.push(\n {\n let change_stream_stats = change_stream_stats.clone();\n let pool = self.pool.clone();\n let status_tx = self.status_tx.clone();\n async move {\n let mut change_stream = change_stream;\n let retry_options = retryable::RetryOptions {\n max_retries: None,\n initial_backoff: std::time::Duration::from_secs(5),\n max_backoff: std::time::Duration::from_secs(60),\n };\n loop {\n // Workaround as AsyncFnMut isn't mature yet.\n // Should be changed to use AsyncFnMut once it is.\n let change_stream = tokio::sync::Mutex::new(&mut change_stream);\n let change_msg = retryable::run(\n || async {\n let mut change_stream = change_stream.lock().await;\n change_stream\n .next()\n .await\n .transpose()\n .map_err(retryable::Error::always_retryable)\n },\n &retry_options,\n )\n .await?;\n let change_msg = if let Some(change_msg) = change_msg {\n change_msg\n } else {\n break;\n };\n\n let update_stats = Arc::new(stats::UpdateStats::default());\n let ack_fn = {\n let status_tx = status_tx.clone();\n let update_stats = update_stats.clone();\n let change_stream_stats = change_stream_stats.clone();\n async move || {\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n change_stream_stats.merge(&update_stats);\n }\n if let Some(ack_fn) = change_msg.ack_fn {\n ack_fn().await\n } else {\n Ok(())\n }\n }\n };\n let shared_ack_fn = Arc::new(Mutex::new(SharedAckFn::new(\n change_msg.changes.iter().len(),\n ack_fn,\n )));\n for change in change_msg.changes {\n let shared_ack_fn = shared_ack_fn.clone();\n let concur_permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n tokio::spawn(source_context.clone().process_source_key(\n change.key,\n change.data,\n update_stats.clone(),\n concur_permit,\n Some(move || async move {\n SharedAckFn::ack(&shared_ack_fn).await\n }),\n pool.clone(),\n ));\n }\n }\n Ok(())\n }\n }\n .boxed(),\n );\n\n futs.push(\n async move {\n let mut interval = tokio::time::interval(REPORT_INTERVAL);\n let mut last_change_stream_stats: UpdateStats =\n change_stream_stats.as_ref().clone();\n interval.set_missed_tick_behavior(MissedTickBehavior::Delay);\n interval.tick().await;\n loop {\n interval.tick().await;\n let curr_change_stream_stats = change_stream_stats.as_ref().clone();\n let delta = curr_change_stream_stats.delta(&last_change_stream_stats);\n if delta.has_any_change() {\n report_stats(&delta, \"change stream\");\n last_change_stream_stats = curr_change_stream_stats;\n }\n }\n }\n .boxed(),\n );\n }\n }\n\n // The main update loop.\n futs.push({\n let status_tx = self.status_tx.clone();\n let pool = self.pool.clone();\n let live_mode = self.options.live_mode;\n async move {\n let update_stats = Arc::new(stats::UpdateStats::default());\n source_context.update(&pool, &update_stats).await?;\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n }\n report_stats(&update_stats, \"batch update\");\n\n if let (true, Some(refresh_interval)) =\n (live_mode, import_op.refresh_options.refresh_interval)\n {\n let mut interval = tokio::time::interval(refresh_interval);\n interval.set_missed_tick_behavior(MissedTickBehavior::Delay);\n interval.tick().await;\n loop {\n interval.tick().await;\n\n let update_stats = Arc::new(stats::UpdateStats::default());\n source_context.update(&pool, &update_stats).await?;\n if update_stats.has_any_change() {\n status_tx.send_modify(|update| {\n update.source_updates_num[source_idx] += 1;\n });\n }\n report_stats(&update_stats, \"interval refresh\");\n }\n }\n Ok(())\n }\n .boxed()\n });\n\n let join_result = try_join_all(futs).await;\n if let Err(err) = join_result {\n error!(\"Error in source `{}`: {:?}\", import_op.name, err);\n return Err(err);\n }\n Ok(())\n }\n}\n\nimpl FlowLiveUpdater {\n pub async fn start(\n flow_ctx: Arc,\n pool: &PgPool,\n options: FlowLiveUpdaterOptions,\n ) -> Result {\n let plan = flow_ctx.flow.get_execution_plan().await?;\n let execution_ctx = Arc::new(flow_ctx.use_owned_execution_ctx().await?);\n\n let (status_tx, status_rx) = watch::channel(FlowLiveUpdaterStatus {\n active_source_idx: BTreeSet::from_iter(0..plan.import_ops.len()),\n source_updates_num: vec![0; plan.import_ops.len()],\n });\n\n let (num_remaining_tasks_tx, num_remaining_tasks_rx) =\n watch::channel(plan.import_ops.len());\n\n let mut join_set = JoinSet::new();\n let mut stats_per_task = Vec::new();\n\n for source_idx in 0..plan.import_ops.len() {\n let source_update_stats = Arc::new(stats::UpdateStats::default());\n let source_update_task = SourceUpdateTask {\n source_idx,\n flow: flow_ctx.flow.clone(),\n plan: plan.clone(),\n execution_ctx: execution_ctx.clone(),\n source_update_stats: source_update_stats.clone(),\n pool: pool.clone(),\n options: options.clone(),\n status_tx: status_tx.clone(),\n num_remaining_tasks_tx: num_remaining_tasks_tx.clone(),\n };\n join_set.spawn(source_update_task.run());\n stats_per_task.push(source_update_stats);\n }\n Ok(Self {\n flow_ctx,\n join_set: Mutex::new(Some(join_set)),\n stats_per_task,\n recv_state: tokio::sync::Mutex::new(UpdateReceiveState {\n status_rx,\n last_num_source_updates: vec![0; plan.import_ops.len()],\n is_done: false,\n }),\n num_remaining_tasks_rx,\n\n _status_tx: status_tx,\n _num_remaining_tasks_tx: num_remaining_tasks_tx,\n })\n }\n\n pub async fn wait(&self) -> Result<()> {\n {\n let mut rx = self.num_remaining_tasks_rx.clone();\n rx.wait_for(|v| *v == 0).await?;\n }\n\n let Some(mut join_set) = self.join_set.lock().unwrap().take() else {\n return Ok(());\n };\n while let Some(task_result) = join_set.join_next().await {\n match task_result {\n Ok(Ok(_)) => {}\n Ok(Err(err)) => {\n return Err(err);\n }\n Err(err) if err.is_cancelled() => {}\n Err(err) => {\n return Err(err.into());\n }\n }\n }\n Ok(())\n }\n\n pub fn abort(&self) {\n let mut join_set = self.join_set.lock().unwrap();\n if let Some(join_set) = &mut *join_set {\n join_set.abort_all();\n }\n }\n\n pub fn index_update_info(&self) -> stats::IndexUpdateInfo {\n stats::IndexUpdateInfo {\n sources: std::iter::zip(\n self.flow_ctx.flow.flow_instance.import_ops.iter(),\n self.stats_per_task.iter(),\n )\n .map(|(import_op, stats)| stats::SourceUpdateInfo {\n source_name: import_op.name.clone(),\n stats: stats.as_ref().clone(),\n })\n .collect(),\n }\n }\n\n pub async fn next_status_updates(&self) -> Result {\n let mut recv_state = self.recv_state.lock().await;\n let recv_state = &mut *recv_state;\n\n if recv_state.is_done {\n return Ok(FlowLiveUpdaterUpdates {\n active_sources: vec![],\n updated_sources: vec![],\n });\n }\n\n recv_state.status_rx.changed().await?;\n let status = recv_state.status_rx.borrow_and_update();\n let updates = FlowLiveUpdaterUpdates {\n active_sources: status\n .active_source_idx\n .iter()\n .map(|idx| {\n self.flow_ctx.flow.flow_instance.import_ops[*idx]\n .name\n .clone()\n })\n .collect(),\n updated_sources: status\n .source_updates_num\n .iter()\n .enumerate()\n .filter_map(|(idx, num_updates)| {\n if num_updates > &recv_state.last_num_source_updates[idx] {\n Some(\n self.flow_ctx.flow.flow_instance.import_ops[idx]\n .name\n .clone(),\n )\n } else {\n None\n }\n })\n .collect(),\n };\n recv_state.last_num_source_updates = status.source_updates_num.clone();\n if status.active_source_idx.is_empty() {\n recv_state.is_done = true;\n }\n Ok(updates)\n }\n}\n"], ["/cocoindex/src/ops/functions/split_recursively.rs", "use anyhow::anyhow;\nuse log::{error, trace};\nuse regex::{Matches, Regex};\nuse std::collections::HashSet;\nuse std::sync::LazyLock;\nuse std::{collections::HashMap, sync::Arc};\nuse unicase::UniCase;\n\nuse crate::base::field_attrs;\nuse crate::ops::registry::ExecutorFactoryRegistry;\nuse crate::{fields_value, ops::sdk::*};\n\n#[derive(Deserialize)]\nstruct CustomLanguageSpec {\n language_name: String,\n #[serde(default)]\n aliases: Vec,\n separators_regex: Vec,\n}\n\n#[derive(Deserialize)]\nstruct Spec {\n #[serde(default)]\n custom_languages: Vec,\n}\n\nconst SYNTAX_LEVEL_GAP_COST: usize = 512;\nconst MISSING_OVERLAP_COST: usize = 512;\nconst PER_LINE_BREAK_LEVEL_GAP_COST: usize = 64;\nconst TOO_SMALL_CHUNK_COST: usize = 1048576;\n\npub struct Args {\n text: ResolvedOpArg,\n chunk_size: ResolvedOpArg,\n min_chunk_size: Option,\n chunk_overlap: Option,\n language: Option,\n}\n\nstruct SimpleLanguageConfig {\n name: String,\n aliases: Vec,\n separator_regex: Vec,\n}\n\nstatic DEFAULT_LANGUAGE_CONFIG: LazyLock =\n LazyLock::new(|| SimpleLanguageConfig {\n name: \"_DEFAULT\".to_string(),\n aliases: vec![],\n separator_regex: [r\"\\n\\n+\", r\"\\n\", r\"\\s+\"]\n .into_iter()\n .map(|s| Regex::new(s).unwrap())\n .collect(),\n });\n\nstruct TreesitterLanguageConfig {\n name: String,\n tree_sitter_lang: tree_sitter::Language,\n terminal_node_kind_ids: HashSet,\n}\n\nfn add_treesitter_language<'a>(\n output: &'a mut HashMap, Arc>,\n name: &'static str,\n aliases: impl IntoIterator,\n lang_fn: impl Into,\n terminal_node_kinds: impl IntoIterator,\n) {\n let tree_sitter_lang: tree_sitter::Language = lang_fn.into();\n let terminal_node_kind_ids = terminal_node_kinds\n .into_iter()\n .filter_map(|kind| {\n let id = tree_sitter_lang.id_for_node_kind(kind, true);\n if id != 0 {\n trace!(\"Got id for node kind: `{kind}` -> {id}\");\n Some(id)\n } else {\n error!(\"Failed in getting id for node kind: `{kind}`\");\n None\n }\n })\n .collect();\n\n let config = Arc::new(TreesitterLanguageConfig {\n name: name.to_string(),\n tree_sitter_lang,\n terminal_node_kind_ids,\n });\n for name in std::iter::once(name).chain(aliases.into_iter()) {\n if output.insert(name.into(), config.clone()).is_some() {\n panic!(\"Language `{name}` already exists\");\n }\n }\n}\n\nstatic TREE_SITTER_LANGUAGE_BY_LANG: LazyLock<\n HashMap, Arc>,\n> = LazyLock::new(|| {\n let mut map = HashMap::new();\n add_treesitter_language(&mut map, \"C\", [\".c\"], tree_sitter_c::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"C++\",\n [\".cpp\", \".cc\", \".cxx\", \".h\", \".hpp\", \"cpp\"],\n tree_sitter_c::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"C#\",\n [\".cs\", \"cs\", \"csharp\"],\n tree_sitter_c_sharp::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"CSS\",\n [\".css\", \".scss\"],\n tree_sitter_css::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Fortran\",\n [\".f\", \".f90\", \".f95\", \".f03\", \"f\", \"f90\", \"f95\", \"f03\"],\n tree_sitter_fortran::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Go\",\n [\".go\", \"golang\"],\n tree_sitter_go::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"HTML\",\n [\".html\", \".htm\"],\n tree_sitter_html::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"Java\", [\".java\"], tree_sitter_java::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"JavaScript\",\n [\".js\", \"js\"],\n tree_sitter_javascript::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"JSON\", [\".json\"], tree_sitter_json::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Kotlin\",\n [\".kt\", \".kts\"],\n tree_sitter_kotlin_ng::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Markdown\",\n [\".md\", \".mdx\", \"md\"],\n tree_sitter_md::LANGUAGE,\n [\"inline\"],\n );\n add_treesitter_language(\n &mut map,\n \"Pascal\",\n [\".pas\", \"pas\", \".dpr\", \"dpr\", \"Delphi\"],\n tree_sitter_pascal::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"PHP\", [\".php\"], tree_sitter_php::LANGUAGE_PHP, []);\n add_treesitter_language(\n &mut map,\n \"Python\",\n [\".py\"],\n tree_sitter_python::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"R\", [\".r\"], tree_sitter_r::LANGUAGE, []);\n add_treesitter_language(&mut map, \"Ruby\", [\".rb\"], tree_sitter_ruby::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Rust\",\n [\".rs\", \"rs\"],\n tree_sitter_rust::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"Scala\",\n [\".scala\"],\n tree_sitter_scala::LANGUAGE,\n [],\n );\n add_treesitter_language(&mut map, \"SQL\", [\".sql\"], tree_sitter_sequel::LANGUAGE, []);\n add_treesitter_language(\n &mut map,\n \"Swift\",\n [\".swift\"],\n tree_sitter_swift::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TOML\",\n [\".toml\"],\n tree_sitter_toml_ng::LANGUAGE,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TSX\",\n [\".tsx\"],\n tree_sitter_typescript::LANGUAGE_TSX,\n [],\n );\n add_treesitter_language(\n &mut map,\n \"TypeScript\",\n [\".ts\", \"ts\"],\n tree_sitter_typescript::LANGUAGE_TYPESCRIPT,\n [],\n );\n add_treesitter_language(&mut map, \"XML\", [\".xml\"], tree_sitter_xml::LANGUAGE_XML, []);\n add_treesitter_language(&mut map, \"DTD\", [\".dtd\"], tree_sitter_xml::LANGUAGE_DTD, []);\n add_treesitter_language(\n &mut map,\n \"YAML\",\n [\".yaml\", \".yml\"],\n tree_sitter_yaml::LANGUAGE,\n [],\n );\n map\n});\n\nenum ChunkKind<'t> {\n TreeSitterNode {\n lang_config: &'t TreesitterLanguageConfig,\n node: tree_sitter::Node<'t>,\n },\n RegexpSepChunk {\n lang_config: &'t SimpleLanguageConfig,\n next_regexp_sep_id: usize,\n },\n}\n\nstruct Chunk<'t, 's: 't> {\n full_text: &'s str,\n range: RangeValue,\n kind: ChunkKind<'t>,\n}\n\nimpl<'t, 's: 't> Chunk<'t, 's> {\n fn text(&self) -> &'s str {\n self.range.extract_str(self.full_text)\n }\n}\n\nstruct TextChunksIter<'t, 's: 't> {\n lang_config: &'t SimpleLanguageConfig,\n parent: &'t Chunk<'t, 's>,\n matches_iter: Matches<'t, 's>,\n regexp_sep_id: usize,\n next_start_pos: Option,\n}\n\nimpl<'t, 's: 't> TextChunksIter<'t, 's> {\n fn new(\n lang_config: &'t SimpleLanguageConfig,\n parent: &'t Chunk<'t, 's>,\n regexp_sep_id: usize,\n ) -> Self {\n Self {\n lang_config,\n parent,\n matches_iter: lang_config.separator_regex[regexp_sep_id].find_iter(parent.text()),\n regexp_sep_id,\n next_start_pos: Some(parent.range.start),\n }\n }\n}\n\nimpl<'t, 's: 't> Iterator for TextChunksIter<'t, 's> {\n type Item = Chunk<'t, 's>;\n\n fn next(&mut self) -> Option {\n let start_pos = self.next_start_pos?;\n let end_pos = match self.matches_iter.next() {\n Some(grp) => {\n self.next_start_pos = Some(self.parent.range.start + grp.end());\n self.parent.range.start + grp.start()\n }\n None => {\n self.next_start_pos = None;\n if start_pos >= self.parent.range.end {\n return None;\n }\n self.parent.range.end\n }\n };\n Some(Chunk {\n full_text: self.parent.full_text,\n range: RangeValue::new(start_pos, end_pos),\n kind: ChunkKind::RegexpSepChunk {\n lang_config: self.lang_config,\n next_regexp_sep_id: self.regexp_sep_id + 1,\n },\n })\n }\n}\n\nstruct TreeSitterNodeIter<'t, 's: 't> {\n lang_config: &'t TreesitterLanguageConfig,\n full_text: &'s str,\n cursor: Option>,\n next_start_pos: usize,\n end_pos: usize,\n}\n\nimpl<'t, 's: 't> TreeSitterNodeIter<'t, 's> {\n fn fill_gap(\n next_start_pos: &mut usize,\n gap_end_pos: usize,\n full_text: &'s str,\n ) -> Option> {\n let start_pos = *next_start_pos;\n if start_pos < gap_end_pos {\n *next_start_pos = gap_end_pos;\n Some(Chunk {\n full_text,\n range: RangeValue::new(start_pos, gap_end_pos),\n kind: ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n },\n })\n } else {\n None\n }\n }\n}\n\nimpl<'t, 's: 't> Iterator for TreeSitterNodeIter<'t, 's> {\n type Item = Chunk<'t, 's>;\n\n fn next(&mut self) -> Option {\n let cursor = if let Some(cursor) = &mut self.cursor {\n cursor\n } else {\n return Self::fill_gap(&mut self.next_start_pos, self.end_pos, self.full_text);\n };\n let node = cursor.node();\n if let Some(gap) =\n Self::fill_gap(&mut self.next_start_pos, node.start_byte(), self.full_text)\n {\n return Some(gap);\n }\n if !cursor.goto_next_sibling() {\n self.cursor = None;\n }\n self.next_start_pos = node.end_byte();\n Some(Chunk {\n full_text: self.full_text,\n range: RangeValue::new(node.start_byte(), node.end_byte()),\n kind: ChunkKind::TreeSitterNode {\n lang_config: self.lang_config,\n node,\n },\n })\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]\nenum LineBreakLevel {\n Inline,\n Newline,\n DoubleNewline,\n}\n\nimpl LineBreakLevel {\n fn ord(self) -> usize {\n match self {\n LineBreakLevel::Inline => 0,\n LineBreakLevel::Newline => 1,\n LineBreakLevel::DoubleNewline => 2,\n }\n }\n}\n\nfn line_break_level(c: &str) -> LineBreakLevel {\n let mut lb_level = LineBreakLevel::Inline;\n let mut iter = c.chars();\n while let Some(c) = iter.next() {\n if c == '\\n' || c == '\\r' {\n lb_level = LineBreakLevel::Newline;\n for c2 in iter.by_ref() {\n if c2 == '\\n' || c2 == '\\r' {\n if c == c2 {\n return LineBreakLevel::DoubleNewline;\n }\n } else {\n break;\n }\n }\n }\n }\n lb_level\n}\n\nconst INLINE_SPACE_CHARS: [char; 2] = [' ', '\\t'];\n\nstruct AtomChunk {\n range: RangeValue,\n boundary_syntax_level: usize,\n\n internal_lb_level: LineBreakLevel,\n boundary_lb_level: LineBreakLevel,\n}\n\nstruct AtomChunksCollector<'s> {\n full_text: &'s str,\n\n curr_level: usize,\n min_level: usize,\n atom_chunks: Vec,\n}\nimpl<'s> AtomChunksCollector<'s> {\n fn collect(&mut self, range: RangeValue) {\n // Trim trailing whitespaces.\n let end_trimmed_text = &self.full_text[range.start..range.end].trim_end();\n if end_trimmed_text.is_empty() {\n return;\n }\n\n // Trim leading whitespaces.\n let trimmed_text = end_trimmed_text.trim_start();\n let new_start = range.start + (end_trimmed_text.len() - trimmed_text.len());\n let new_end = new_start + trimmed_text.len();\n\n // Align to beginning of the line if possible.\n let prev_end = self.atom_chunks.last().map_or(0, |chunk| chunk.range.end);\n let gap = &self.full_text[prev_end..new_start];\n let boundary_lb_level = line_break_level(gap);\n let range = if boundary_lb_level != LineBreakLevel::Inline {\n let trimmed_gap = gap.trim_end_matches(INLINE_SPACE_CHARS);\n RangeValue::new(prev_end + trimmed_gap.len(), new_end)\n } else {\n RangeValue::new(new_start, new_end)\n };\n\n self.atom_chunks.push(AtomChunk {\n range,\n boundary_syntax_level: self.min_level,\n internal_lb_level: line_break_level(trimmed_text),\n boundary_lb_level,\n });\n self.min_level = self.curr_level;\n }\n\n fn into_atom_chunks(mut self) -> Vec {\n self.atom_chunks.push(AtomChunk {\n range: RangeValue::new(self.full_text.len(), self.full_text.len()),\n boundary_syntax_level: self.min_level,\n internal_lb_level: LineBreakLevel::Inline,\n boundary_lb_level: LineBreakLevel::DoubleNewline,\n });\n self.atom_chunks\n }\n}\n\n#[derive(Debug, Clone, PartialEq, Eq)]\nstruct OutputPosition {\n char_offset: usize,\n line: u32,\n column: u32,\n}\n\nimpl OutputPosition {\n fn into_output(self) -> value::Value {\n value::Value::Struct(fields_value!(\n self.char_offset as i64,\n self.line as i64,\n self.column as i64\n ))\n }\n}\nstruct Position {\n byte_offset: usize,\n output: Option,\n}\n\nimpl Position {\n fn new(byte_offset: usize) -> Self {\n Self {\n byte_offset,\n output: None,\n }\n }\n}\n\nstruct ChunkOutput<'s> {\n start_pos: Position,\n end_pos: Position,\n text: &'s str,\n}\n\nstruct RecursiveChunker<'s> {\n full_text: &'s str,\n chunk_size: usize,\n chunk_overlap: usize,\n min_chunk_size: usize,\n}\n\nimpl<'t, 's: 't> RecursiveChunker<'s> {\n fn collect_atom_chunks_from_iter(\n &self,\n sub_chunks_iter: impl Iterator>,\n atom_collector: &mut AtomChunksCollector<'s>,\n ) -> Result<()> {\n atom_collector.curr_level += 1;\n for sub_chunk in sub_chunks_iter {\n let range = sub_chunk.range;\n if range.len() <= self.min_chunk_size {\n atom_collector.collect(range);\n } else {\n self.collect_atom_chunks(sub_chunk, atom_collector)?;\n }\n }\n atom_collector.curr_level -= 1;\n if atom_collector.curr_level < atom_collector.min_level {\n atom_collector.min_level = atom_collector.curr_level;\n }\n Ok(())\n }\n\n fn collect_atom_chunks(\n &self,\n chunk: Chunk<'t, 's>,\n atom_collector: &mut AtomChunksCollector<'s>,\n ) -> Result<()> {\n match chunk.kind {\n ChunkKind::TreeSitterNode { lang_config, node } => {\n if !lang_config.terminal_node_kind_ids.contains(&node.kind_id()) {\n let mut cursor = node.walk();\n if cursor.goto_first_child() {\n return self.collect_atom_chunks_from_iter(\n TreeSitterNodeIter {\n lang_config,\n full_text: self.full_text,\n cursor: Some(cursor),\n next_start_pos: node.start_byte(),\n end_pos: node.end_byte(),\n },\n atom_collector,\n );\n }\n }\n self.collect_atom_chunks(\n Chunk {\n full_text: self.full_text,\n range: chunk.range,\n kind: ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n },\n },\n atom_collector,\n )\n }\n ChunkKind::RegexpSepChunk {\n lang_config,\n next_regexp_sep_id,\n } => {\n if next_regexp_sep_id >= lang_config.separator_regex.len() {\n atom_collector.collect(chunk.range);\n Ok(())\n } else {\n self.collect_atom_chunks_from_iter(\n TextChunksIter::new(lang_config, &chunk, next_regexp_sep_id),\n atom_collector,\n )\n }\n }\n }\n }\n\n fn get_overlap_cost_base(&self, offset: usize) -> usize {\n if self.chunk_overlap == 0 {\n 0\n } else {\n (self.full_text.len() - offset) * MISSING_OVERLAP_COST / self.chunk_overlap\n }\n }\n\n fn merge_atom_chunks(&self, atom_chunks: Vec) -> Vec> {\n struct AtomRoutingPlan {\n start_idx: usize, // index of `atom_chunks` for the start chunk\n prev_plan_idx: usize, // index of `plans` for the previous plan\n cost: usize,\n overlap_cost_base: usize,\n }\n type PrevPlanCandidate = (std::cmp::Reverse, usize); // (cost, start_idx)\n\n let mut plans = Vec::with_capacity(atom_chunks.len());\n // Janitor\n plans.push(AtomRoutingPlan {\n start_idx: 0,\n prev_plan_idx: 0,\n cost: 0,\n overlap_cost_base: self.get_overlap_cost_base(0),\n });\n let mut prev_plan_candidates = std::collections::BinaryHeap::::new();\n\n let mut gap_cost_cache = vec![0];\n let mut syntax_level_gap_cost = |boundary: usize, internal: usize| -> usize {\n if boundary > internal {\n let gap = boundary - internal;\n for i in gap_cost_cache.len()..=gap {\n gap_cost_cache.push(gap_cost_cache[i - 1] + SYNTAX_LEVEL_GAP_COST / i);\n }\n gap_cost_cache[gap]\n } else {\n 0\n }\n };\n\n for (i, chunk) in atom_chunks[0..atom_chunks.len() - 1].iter().enumerate() {\n let mut min_cost = usize::MAX;\n let mut arg_min_start_idx: usize = 0;\n let mut arg_min_prev_plan_idx: usize = 0;\n let mut start_idx = i;\n\n let end_syntax_level = atom_chunks[i + 1].boundary_syntax_level;\n let end_lb_level = atom_chunks[i + 1].boundary_lb_level;\n\n let mut internal_syntax_level = usize::MAX;\n let mut internal_lb_level = LineBreakLevel::Inline;\n\n fn lb_level_gap(boundary: LineBreakLevel, internal: LineBreakLevel) -> usize {\n if boundary.ord() < internal.ord() {\n internal.ord() - boundary.ord()\n } else {\n 0\n }\n }\n loop {\n let start_chunk = &atom_chunks[start_idx];\n let chunk_size = chunk.range.end - start_chunk.range.start;\n\n let mut cost = 0;\n cost +=\n syntax_level_gap_cost(start_chunk.boundary_syntax_level, internal_syntax_level);\n cost += syntax_level_gap_cost(end_syntax_level, internal_syntax_level);\n cost += (lb_level_gap(start_chunk.boundary_lb_level, internal_lb_level)\n + lb_level_gap(end_lb_level, internal_lb_level))\n * PER_LINE_BREAK_LEVEL_GAP_COST;\n if chunk_size < self.min_chunk_size {\n cost += TOO_SMALL_CHUNK_COST;\n }\n\n if chunk_size > self.chunk_size {\n if min_cost == usize::MAX {\n min_cost = cost + plans[start_idx].cost;\n arg_min_start_idx = start_idx;\n arg_min_prev_plan_idx = start_idx;\n }\n break;\n }\n\n let prev_plan_idx = if self.chunk_overlap > 0 {\n while let Some(top_prev_plan) = prev_plan_candidates.peek() {\n let overlap_size =\n atom_chunks[top_prev_plan.1].range.end - start_chunk.range.start;\n if overlap_size <= self.chunk_overlap {\n break;\n }\n prev_plan_candidates.pop();\n }\n prev_plan_candidates.push((\n std::cmp::Reverse(\n plans[start_idx].cost + plans[start_idx].overlap_cost_base,\n ),\n start_idx,\n ));\n prev_plan_candidates.peek().unwrap().1\n } else {\n start_idx\n };\n let prev_plan = &plans[prev_plan_idx];\n cost += prev_plan.cost;\n if self.chunk_overlap == 0 {\n cost += MISSING_OVERLAP_COST / 2;\n } else {\n let start_cost_base = self.get_overlap_cost_base(start_chunk.range.start);\n cost += if prev_plan.overlap_cost_base < start_cost_base {\n MISSING_OVERLAP_COST + prev_plan.overlap_cost_base - start_cost_base\n } else {\n MISSING_OVERLAP_COST\n };\n }\n if cost < min_cost {\n min_cost = cost;\n arg_min_start_idx = start_idx;\n arg_min_prev_plan_idx = prev_plan_idx;\n }\n\n if start_idx == 0 {\n break;\n }\n\n start_idx -= 1;\n internal_syntax_level =\n internal_syntax_level.min(start_chunk.boundary_syntax_level);\n internal_lb_level = internal_lb_level.max(start_chunk.internal_lb_level);\n }\n plans.push(AtomRoutingPlan {\n start_idx: arg_min_start_idx,\n prev_plan_idx: arg_min_prev_plan_idx,\n cost: min_cost,\n overlap_cost_base: self.get_overlap_cost_base(chunk.range.end),\n });\n prev_plan_candidates.clear();\n }\n\n let mut output = Vec::new();\n let mut plan_idx = plans.len() - 1;\n while plan_idx > 0 {\n let plan = &plans[plan_idx];\n let start_chunk = &atom_chunks[plan.start_idx];\n let end_chunk = &atom_chunks[plan_idx - 1];\n output.push(ChunkOutput {\n start_pos: Position::new(start_chunk.range.start),\n end_pos: Position::new(end_chunk.range.end),\n text: &self.full_text[start_chunk.range.start..end_chunk.range.end],\n });\n plan_idx = plan.prev_plan_idx;\n }\n output.reverse();\n output\n }\n\n fn split_root_chunk(&self, kind: ChunkKind<'t>) -> Result>> {\n let mut atom_collector = AtomChunksCollector {\n full_text: self.full_text,\n min_level: 0,\n curr_level: 0,\n atom_chunks: Vec::new(),\n };\n self.collect_atom_chunks(\n Chunk {\n full_text: self.full_text,\n range: RangeValue::new(0, self.full_text.len()),\n kind,\n },\n &mut atom_collector,\n )?;\n let atom_chunks = atom_collector.into_atom_chunks();\n let output = self.merge_atom_chunks(atom_chunks);\n Ok(output)\n }\n}\n\nstruct Executor {\n args: Args,\n custom_languages: HashMap, Arc>,\n}\n\nimpl Executor {\n fn new(args: Args, spec: Spec) -> Result {\n let mut custom_languages = HashMap::new();\n for lang in spec.custom_languages {\n let separator_regex = lang\n .separators_regex\n .iter()\n .map(|s| Regex::new(s))\n .collect::>()\n .with_context(|| {\n format!(\n \"failed in parsing regexp for language `{}`\",\n lang.language_name\n )\n })?;\n let language_config = Arc::new(SimpleLanguageConfig {\n name: lang.language_name,\n aliases: lang.aliases,\n separator_regex,\n });\n if custom_languages\n .insert(\n UniCase::new(language_config.name.clone()),\n language_config.clone(),\n )\n .is_some()\n {\n api_bail!(\n \"duplicate language name / alias: `{}`\",\n language_config.name\n );\n }\n for alias in &language_config.aliases {\n if custom_languages\n .insert(UniCase::new(alias.clone()), language_config.clone())\n .is_some()\n {\n api_bail!(\"duplicate language name / alias: `{}`\", alias);\n }\n }\n }\n Ok(Self {\n args,\n custom_languages,\n })\n }\n}\n\nfn set_output_positions<'a>(text: &str, positions: impl Iterator) {\n let mut positions = positions.collect::>();\n positions.sort_by_key(|o| o.byte_offset);\n\n let mut positions_iter = positions.iter_mut();\n let Some(mut next_position) = positions_iter.next() else {\n return;\n };\n\n let mut char_offset = 0;\n let mut line = 1;\n let mut column = 1;\n for (byte_offset, ch) in text.char_indices() {\n while next_position.byte_offset == byte_offset {\n next_position.output = Some(OutputPosition {\n char_offset,\n line,\n column,\n });\n if let Some(position) = positions_iter.next() {\n next_position = position;\n } else {\n return;\n }\n }\n char_offset += 1;\n if ch == '\\n' {\n line += 1;\n column = 1;\n } else {\n column += 1;\n }\n }\n\n // Offsets after the last char.\n loop {\n next_position.output = Some(OutputPosition {\n char_offset,\n line,\n column,\n });\n if let Some(position) = positions_iter.next() {\n next_position = position;\n } else {\n return;\n }\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n async fn evaluate(&self, input: Vec) -> Result {\n let full_text = self.args.text.value(&input)?.as_str()?;\n let chunk_size = self.args.chunk_size.value(&input)?.as_int64()?;\n let recursive_chunker = RecursiveChunker {\n full_text,\n chunk_size: chunk_size as usize,\n chunk_overlap: (self.args.chunk_overlap.value(&input)?)\n .optional()\n .map(|v| v.as_int64())\n .transpose()?\n .unwrap_or(0) as usize,\n min_chunk_size: (self.args.min_chunk_size.value(&input)?)\n .optional()\n .map(|v| v.as_int64())\n .transpose()?\n .unwrap_or(chunk_size / 2) as usize,\n };\n\n let language = UniCase::new(\n (if let Some(language) = self.args.language.value(&input)?.optional() {\n language.as_str()?\n } else {\n \"\"\n })\n .to_string(),\n );\n let mut output = if let Some(lang_config) = self.custom_languages.get(&language) {\n recursive_chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config,\n next_regexp_sep_id: 0,\n })?\n } else if let Some(lang_config) = TREE_SITTER_LANGUAGE_BY_LANG.get(&language) {\n let mut parser = tree_sitter::Parser::new();\n parser.set_language(&lang_config.tree_sitter_lang)?;\n let tree = parser.parse(full_text.as_ref(), None).ok_or_else(|| {\n anyhow!(\"failed in parsing text in language: {}\", lang_config.name)\n })?;\n recursive_chunker.split_root_chunk(ChunkKind::TreeSitterNode {\n lang_config,\n node: tree.root_node(),\n })?\n } else {\n recursive_chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n })?\n };\n\n set_output_positions(\n full_text,\n output.iter_mut().flat_map(|chunk_output| {\n std::iter::once(&mut chunk_output.start_pos)\n .chain(std::iter::once(&mut chunk_output.end_pos))\n }),\n );\n\n let table = output\n .into_iter()\n .map(|chunk_output| {\n let output_start = chunk_output.start_pos.output.unwrap();\n let output_end = chunk_output.end_pos.output.unwrap();\n (\n RangeValue::new(output_start.char_offset, output_end.char_offset).into(),\n fields_value!(\n Arc::::from(chunk_output.text),\n output_start.into_output(),\n output_end.into_output()\n )\n .into(),\n )\n })\n .collect();\n\n Ok(Value::KTable(table))\n }\n}\n\nstruct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"SplitRecursively\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n _spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n chunk_size: args_resolver\n .next_arg(\"chunk_size\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n min_chunk_size: args_resolver\n .next_optional_arg(\"min_chunk_size\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n chunk_overlap: args_resolver\n .next_optional_arg(\"chunk_overlap\")?\n .expect_type(&ValueType::Basic(BasicValueType::Int64))?,\n language: args_resolver\n .next_optional_arg(\"language\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n };\n\n let pos_struct = schema::ValueType::Struct(schema::StructSchema {\n fields: Arc::new(vec![\n schema::FieldSchema::new(\"offset\", make_output_type(BasicValueType::Int64)),\n schema::FieldSchema::new(\"line\", make_output_type(BasicValueType::Int64)),\n schema::FieldSchema::new(\"column\", make_output_type(BasicValueType::Int64)),\n ]),\n description: None,\n });\n\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n schema_builder.add_field(FieldSchema::new(\n \"location\",\n make_output_type(BasicValueType::Range),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"text\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"start\",\n schema::EnrichedValueType {\n typ: pos_struct.clone(),\n nullable: false,\n attrs: Default::default(),\n },\n ));\n schema_builder.add_field(FieldSchema::new(\n \"end\",\n schema::EnrichedValueType {\n typ: pos_struct,\n nullable: false,\n attrs: Default::default(),\n },\n ));\n let output_schema = make_output_type(TableSchema::new(TableKind::KTable, struct_schema))\n .with_attr(\n field_attrs::CHUNK_BASE_TEXT,\n serde_json::to_value(args_resolver.get_analyze_value(&args.text))?,\n );\n Ok((args, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(args, spec)?))\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory.register(registry)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n // Helper function to assert chunk text and its consistency with the range within the original text.\n fn assert_chunk_text_consistency(\n full_text: &str, // Added full text\n actual_chunk: &ChunkOutput<'_>,\n expected_text: &str,\n context: &str,\n ) {\n // Extract text using the chunk's range from the original full text.\n let extracted_text = full_text\n .get(actual_chunk.start_pos.byte_offset..actual_chunk.end_pos.byte_offset)\n .unwrap();\n // Assert that the expected text matches the text provided in the chunk.\n assert_eq!(\n actual_chunk.text, expected_text,\n \"Provided chunk text mismatch - {context}\"\n );\n // Assert that the expected text also matches the text extracted using the chunk's range.\n assert_eq!(\n extracted_text, expected_text,\n \"Range inconsistency: extracted text mismatch - {context}\"\n );\n }\n\n // Creates a default RecursiveChunker for testing, assuming no language-specific parsing.\n fn create_test_chunker(\n text: &str,\n chunk_size: usize,\n min_chunk_size: usize,\n chunk_overlap: usize,\n ) -> RecursiveChunker {\n RecursiveChunker {\n full_text: text,\n chunk_size,\n chunk_overlap,\n min_chunk_size,\n }\n }\n\n #[tokio::test]\n async fn test_split_recursively() {\n let spec = Spec {\n custom_languages: vec![],\n };\n let factory = Arc::new(Factory);\n let text_content = \"Linea 1.\\nLinea 2.\\n\\nLinea 3.\";\n\n let input_args_values = vec![\n text_content.to_string().into(),\n (15i64).into(),\n (5i64).into(),\n (0i64).into(),\n Value::Null,\n ];\n\n let input_arg_schemas = vec![\n build_arg_schema(\"text\", BasicValueType::Str),\n build_arg_schema(\"chunk_size\", BasicValueType::Int64),\n build_arg_schema(\"min_chunk_size\", BasicValueType::Int64),\n build_arg_schema(\"chunk_overlap\", BasicValueType::Int64),\n build_arg_schema(\"language\", BasicValueType::Str),\n ];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed: {:?}\",\n result.err()\n );\n let value = result.unwrap();\n\n match value {\n Value::KTable(table) => {\n let expected_chunks = vec![\n (RangeValue::new(0, 8), \"Linea 1.\"),\n (RangeValue::new(9, 17), \"Linea 2.\"),\n (RangeValue::new(19, 27), \"Linea 3.\"),\n ];\n\n for (range, expected_text) in expected_chunks {\n let key: KeyValue = range.into();\n match table.get(&key) {\n Some(scope_value_ref) => {\n let chunk_text =\n scope_value_ref.0.fields[0].as_str().unwrap_or_else(|_| {\n panic!(\"Chunk text not a string for key {key:?}\")\n });\n assert_eq!(**chunk_text, *expected_text);\n }\n None => panic!(\"Expected row value for key {key:?}, not found\"),\n }\n }\n }\n other => panic!(\"Expected Value::KTable, got {other:?}\"),\n }\n }\n\n #[test]\n fn test_translate_bytes_to_chars_simple() {\n let text = \"abc😄def\";\n let mut start1 = Position::new(0);\n let mut end1 = Position::new(3);\n let mut start2 = Position::new(3);\n let mut end2 = Position::new(7);\n let mut start3 = Position::new(7);\n let mut end3 = Position::new(10);\n let mut end_full = Position::new(text.len());\n\n let offsets = vec![\n &mut start1,\n &mut end1,\n &mut start2,\n &mut end2,\n &mut start3,\n &mut end3,\n &mut end_full,\n ];\n\n set_output_positions(text, offsets.into_iter());\n\n assert_eq!(\n start1.output,\n Some(OutputPosition {\n char_offset: 0,\n line: 1,\n column: 1,\n })\n );\n assert_eq!(\n end1.output,\n Some(OutputPosition {\n char_offset: 3,\n line: 1,\n column: 4,\n })\n );\n assert_eq!(\n start2.output,\n Some(OutputPosition {\n char_offset: 3,\n line: 1,\n column: 4,\n })\n );\n assert_eq!(\n end2.output,\n Some(OutputPosition {\n char_offset: 4,\n line: 1,\n column: 5,\n })\n );\n assert_eq!(\n end3.output,\n Some(OutputPosition {\n char_offset: 7,\n line: 1,\n column: 8,\n })\n );\n assert_eq!(\n end_full.output,\n Some(OutputPosition {\n char_offset: 7,\n line: 1,\n column: 8,\n })\n );\n }\n\n #[test]\n fn test_basic_split_no_overlap() {\n let text = \"Linea 1.\\nLinea 2.\\n\\nLinea 3.\";\n let chunker = create_test_chunker(text, 15, 5, 0);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert_eq!(chunks.len(), 3);\n assert_chunk_text_consistency(text, &chunks[0], \"Linea 1.\", \"Test 1, Chunk 0\");\n assert_chunk_text_consistency(text, &chunks[1], \"Linea 2.\", \"Test 1, Chunk 1\");\n assert_chunk_text_consistency(text, &chunks[2], \"Linea 3.\", \"Test 1, Chunk 2\");\n\n // Test splitting when chunk_size forces breaks within segments.\n let text2 = \"A very very long text that needs to be split.\";\n let chunker2 = create_test_chunker(text2, 20, 12, 0);\n let result2 = chunker2.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result2.is_ok());\n let chunks2 = result2.unwrap();\n\n // Expect multiple chunks, likely split by spaces due to chunk_size.\n assert!(chunks2.len() > 1);\n assert_chunk_text_consistency(text2, &chunks2[0], \"A very very long\", \"Test 2, Chunk 0\");\n assert!(chunks2[0].text.len() <= 20);\n }\n\n #[test]\n fn test_basic_split_with_overlap() {\n let text = \"This is a test text that is a bit longer to see how the overlap works.\";\n let chunker = create_test_chunker(text, 20, 10, 5);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert!(chunks.len() > 1);\n\n if chunks.len() >= 2 {\n assert!(chunks[0].text.len() <= 25);\n }\n }\n\n #[test]\n fn test_split_trims_whitespace() {\n let text = \" \\n First chunk. \\n\\n Second chunk with spaces at the end. \\n\";\n let chunker = create_test_chunker(text, 30, 10, 0);\n\n let result = chunker.split_root_chunk(ChunkKind::RegexpSepChunk {\n lang_config: &DEFAULT_LANGUAGE_CONFIG,\n next_regexp_sep_id: 0,\n });\n\n assert!(result.is_ok());\n let chunks = result.unwrap();\n\n assert_eq!(chunks.len(), 3);\n\n assert_chunk_text_consistency(\n text,\n &chunks[0],\n \" First chunk.\",\n \"Whitespace Test, Chunk 0\",\n );\n assert_chunk_text_consistency(\n text,\n &chunks[1],\n \" Second chunk with spaces\",\n \"Whitespace Test, Chunk 1\",\n );\n assert_chunk_text_consistency(text, &chunks[2], \"at the end.\", \"Whitespace Test, Chunk 2\");\n }\n}\n"], ["/cocoindex/src/ops/sources/azure_blob.rs", "use crate::fields_value;\nuse async_stream::try_stream;\nuse azure_core::prelude::NextMarker;\nuse azure_identity::{DefaultAzureCredential, TokenCredentialOptions};\nuse azure_storage::StorageCredentials;\nuse azure_storage_blobs::prelude::*;\nuse futures::StreamExt;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse std::sync::Arc;\n\nuse crate::base::field_attrs;\nuse crate::ops::sdk::*;\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n account_name: String,\n container_name: String,\n prefix: Option,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n\n /// SAS token for authentication. Takes precedence over account_access_key.\n sas_token: Option>,\n /// Account access key for authentication. If not provided, will use default Azure credential.\n account_access_key: Option>,\n}\n\nstruct Executor {\n client: BlobServiceClient,\n container_name: String,\n prefix: Option,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n}\n\nimpl Executor {\n fn is_excluded(&self, key: &str) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(key))\n }\n\n fn is_file_included(&self, key: &str) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(key))\n && !self.is_excluded(key)\n }\n}\n\nfn datetime_to_ordinal(dt: &time::OffsetDateTime) -> Ordinal {\n Ordinal(Some(dt.unix_timestamp_nanos() as i64 / 1000))\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n _options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n try_stream! {\n let mut continuation_token: Option = None;\n loop {\n let mut list_builder = self.client\n .container_client(&self.container_name)\n .list_blobs();\n\n if let Some(p) = &self.prefix {\n list_builder = list_builder.prefix(p.clone());\n }\n\n if let Some(token) = continuation_token.take() {\n list_builder = list_builder.marker(token);\n }\n\n let mut page_stream = list_builder.into_stream();\n let Some(page_result) = page_stream.next().await else {\n break;\n };\n\n let page = page_result?;\n let mut batch = Vec::new();\n\n for blob in page.blobs.blobs() {\n let key = &blob.name;\n\n // Only include files (not directories)\n if key.ends_with('/') { continue; }\n\n if self.is_file_included(key) {\n let ordinal = Some(datetime_to_ordinal(&blob.properties.last_modified));\n batch.push(PartialSourceRowMetadata {\n key: KeyValue::Str(key.clone().into()),\n ordinal,\n });\n }\n }\n\n if !batch.is_empty() {\n yield batch;\n }\n\n continuation_token = page.next_marker;\n if continuation_token.is_none() {\n break;\n }\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n let key_str = key.str_value()?;\n if !self.is_file_included(key_str) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n\n let blob_client = self\n .client\n .container_client(&self.container_name)\n .blob_client(key_str.as_ref());\n\n let mut stream = blob_client.get().into_stream();\n let result = stream.next().await;\n\n let blob_response = match result {\n Some(response) => response?,\n None => {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n };\n\n let ordinal = if options.include_ordinal {\n Some(datetime_to_ordinal(\n &blob_response.blob.properties.last_modified,\n ))\n } else {\n None\n };\n\n let value = if options.include_value {\n let bytes = blob_response.data.collect().await?;\n Some(SourceValue::Existence(if self.binary {\n fields_value!(bytes)\n } else {\n fields_value!(String::from_utf8_lossy(&bytes).to_string())\n }))\n } else {\n None\n };\n\n Ok(PartialSourceRowData { value, ordinal })\n }\n\n async fn change_stream(\n &self,\n ) -> Result>>> {\n // Azure Blob Storage doesn't have built-in change notifications like S3+SQS\n Ok(None)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"AzureBlob\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n context: Arc,\n ) -> Result> {\n let credential = if let Some(sas_token) = spec.sas_token {\n let sas_token = context.auth_registry.get(&sas_token)?;\n StorageCredentials::sas_token(sas_token)?\n } else if let Some(account_access_key) = spec.account_access_key {\n let account_access_key = context.auth_registry.get(&account_access_key)?;\n StorageCredentials::access_key(spec.account_name.clone(), account_access_key)\n } else {\n let default_credential = Arc::new(DefaultAzureCredential::create(\n TokenCredentialOptions::default(),\n )?);\n StorageCredentials::token_credential(default_credential)\n };\n\n let client = BlobServiceClient::new(&spec.account_name, credential);\n Ok(Box::new(Executor {\n client,\n container_name: spec.container_name,\n prefix: spec.prefix,\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/lib_context.rs", "use crate::prelude::*;\n\nuse crate::builder::AnalyzedFlow;\nuse crate::execution::source_indexer::SourceIndexingContext;\nuse crate::service::error::ApiError;\nuse crate::settings;\nuse crate::setup::ObjectSetupStatus;\nuse axum::http::StatusCode;\nuse sqlx::PgPool;\nuse sqlx::postgres::PgConnectOptions;\nuse tokio::runtime::Runtime;\n\npub struct FlowExecutionContext {\n pub setup_execution_context: Arc,\n pub setup_status: setup::FlowSetupStatus,\n source_indexing_contexts: Vec>>,\n}\n\nasync fn build_setup_context(\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n) -> Result<(\n Arc,\n setup::FlowSetupStatus,\n)> {\n let setup_execution_context = Arc::new(exec_ctx::build_flow_setup_execution_context(\n &analyzed_flow.flow_instance,\n &analyzed_flow.data_schema,\n &analyzed_flow.setup_state,\n existing_flow_ss,\n )?);\n\n let setup_status = setup::check_flow_setup_status(\n Some(&setup_execution_context.setup_state),\n existing_flow_ss,\n &analyzed_flow.flow_instance_ctx,\n )\n .await?;\n\n Ok((setup_execution_context, setup_status))\n}\n\nimpl FlowExecutionContext {\n async fn new(\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result {\n let (setup_execution_context, setup_status) =\n build_setup_context(analyzed_flow, existing_flow_ss).await?;\n\n let mut source_indexing_contexts = Vec::new();\n source_indexing_contexts.resize_with(analyzed_flow.flow_instance.import_ops.len(), || {\n tokio::sync::OnceCell::new()\n });\n\n Ok(Self {\n setup_execution_context,\n setup_status,\n source_indexing_contexts,\n })\n }\n\n pub async fn update_setup_state(\n &mut self,\n analyzed_flow: &AnalyzedFlow,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result<()> {\n let (setup_execution_context, setup_status) =\n build_setup_context(analyzed_flow, existing_flow_ss).await?;\n\n self.setup_execution_context = setup_execution_context;\n self.setup_status = setup_status;\n Ok(())\n }\n\n pub async fn get_source_indexing_context(\n &self,\n flow: &Arc,\n source_idx: usize,\n pool: &PgPool,\n ) -> Result<&Arc> {\n self.source_indexing_contexts[source_idx]\n .get_or_try_init(|| async move {\n anyhow::Ok(Arc::new(\n SourceIndexingContext::load(\n flow.clone(),\n source_idx,\n self.setup_execution_context.clone(),\n pool,\n )\n .await?,\n ))\n })\n .await\n }\n}\n\npub struct FlowContext {\n pub flow: Arc,\n execution_ctx: Arc>,\n}\n\nimpl FlowContext {\n pub fn flow_name(&self) -> &str {\n &self.flow.flow_instance.name\n }\n\n pub async fn new(\n flow: Arc,\n existing_flow_ss: Option<&setup::FlowSetupState>,\n ) -> Result {\n let execution_ctx = Arc::new(tokio::sync::RwLock::new(\n FlowExecutionContext::new(&flow, existing_flow_ss).await?,\n ));\n Ok(Self {\n flow,\n execution_ctx,\n })\n }\n\n pub async fn use_execution_ctx(\n &self,\n ) -> Result> {\n let execution_ctx = self.execution_ctx.read().await;\n if !execution_ctx.setup_status.is_up_to_date() {\n api_bail!(\n \"Setup for flow `{}` is not up-to-date. Please run `cocoindex setup` to update the setup.\",\n self.flow_name()\n );\n }\n Ok(execution_ctx)\n }\n\n pub async fn use_owned_execution_ctx(\n &self,\n ) -> Result> {\n let execution_ctx = self.execution_ctx.clone().read_owned().await;\n if !execution_ctx.setup_status.is_up_to_date() {\n api_bail!(\n \"Setup for flow `{}` is not up-to-date. Please run `cocoindex setup` to update the setup.\",\n self.flow_name()\n );\n }\n Ok(execution_ctx)\n }\n\n pub fn get_execution_ctx_for_setup(&self) -> &tokio::sync::RwLock {\n &self.execution_ctx\n }\n}\n\nstatic TOKIO_RUNTIME: LazyLock = LazyLock::new(|| Runtime::new().unwrap());\nstatic AUTH_REGISTRY: LazyLock> = LazyLock::new(|| Arc::new(AuthRegistry::new()));\n\ntype PoolKey = (String, Option);\ntype PoolValue = Arc>;\n\n#[derive(Default)]\npub struct DbPools {\n pub pools: Mutex>,\n}\n\nimpl DbPools {\n pub async fn get_pool(&self, conn_spec: &settings::DatabaseConnectionSpec) -> Result {\n let db_pool_cell = {\n let key = (conn_spec.url.clone(), conn_spec.user.clone());\n let mut db_pools = self.pools.lock().unwrap();\n db_pools.entry(key).or_default().clone()\n };\n let pool = db_pool_cell\n .get_or_try_init(|| async move {\n let mut pg_options: PgConnectOptions = conn_spec.url.parse()?;\n if let Some(user) = &conn_spec.user {\n pg_options = pg_options.username(user);\n }\n if let Some(password) = &conn_spec.password {\n pg_options = pg_options.password(password);\n }\n let pool = PgPool::connect_with(pg_options)\n .await\n .context(\"Failed to connect to database\")?;\n anyhow::Ok(pool)\n })\n .await?;\n Ok(pool.clone())\n }\n}\n\npub struct LibSetupContext {\n pub all_setup_states: setup::AllSetupStates,\n pub global_setup_status: setup::GlobalSetupStatus,\n}\npub struct PersistenceContext {\n pub builtin_db_pool: PgPool,\n pub setup_ctx: tokio::sync::RwLock,\n}\n\npub struct LibContext {\n pub db_pools: DbPools,\n pub persistence_ctx: Option,\n pub flows: Mutex>>,\n\n pub global_concurrency_controller: Arc,\n}\n\nimpl LibContext {\n pub fn get_flow_context(&self, flow_name: &str) -> Result> {\n let flows = self.flows.lock().unwrap();\n let flow_ctx = flows\n .get(flow_name)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"Flow instance not found: {flow_name}\"),\n StatusCode::NOT_FOUND,\n )\n })?\n .clone();\n Ok(flow_ctx)\n }\n\n pub fn remove_flow_context(&self, flow_name: &str) {\n let mut flows = self.flows.lock().unwrap();\n flows.remove(flow_name);\n }\n\n pub fn require_persistence_ctx(&self) -> Result<&PersistenceContext> {\n self.persistence_ctx\n .as_ref()\n .ok_or_else(|| anyhow!(\"Database is required for this operation. Please set COCOINDEX_DATABASE_URL environment variable and call cocoindex.init() with database settings.\"))\n }\n\n pub fn require_builtin_db_pool(&self) -> Result<&PgPool> {\n Ok(&self.require_persistence_ctx()?.builtin_db_pool)\n }\n}\n\npub fn get_runtime() -> &'static Runtime {\n &TOKIO_RUNTIME\n}\n\npub fn get_auth_registry() -> &'static Arc {\n &AUTH_REGISTRY\n}\n\nstatic LIB_INIT: OnceLock<()> = OnceLock::new();\npub fn create_lib_context(settings: settings::Settings) -> Result {\n LIB_INIT.get_or_init(|| {\n let _ = env_logger::try_init();\n\n pyo3_async_runtimes::tokio::init_with_runtime(get_runtime()).unwrap();\n\n let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();\n });\n\n let db_pools = DbPools::default();\n let persistence_ctx = if let Some(database_spec) = &settings.database {\n let (pool, all_setup_states) = get_runtime().block_on(async {\n let pool = db_pools.get_pool(database_spec).await?;\n let existing_ss = setup::get_existing_setup_state(&pool).await?;\n anyhow::Ok((pool, existing_ss))\n })?;\n Some(PersistenceContext {\n builtin_db_pool: pool,\n setup_ctx: tokio::sync::RwLock::new(LibSetupContext {\n global_setup_status: setup::GlobalSetupStatus::from_setup_states(&all_setup_states),\n all_setup_states,\n }),\n })\n } else {\n // No database configured\n None\n };\n\n Ok(LibContext {\n db_pools,\n persistence_ctx,\n flows: Mutex::new(BTreeMap::new()),\n global_concurrency_controller: Arc::new(concur_control::ConcurrencyController::new(\n &concur_control::Options {\n max_inflight_rows: settings.global_execution_options.source_max_inflight_rows,\n max_inflight_bytes: settings.global_execution_options.source_max_inflight_bytes,\n },\n )),\n })\n}\n\npub static LIB_CONTEXT: RwLock>> = RwLock::new(None);\n\npub(crate) fn init_lib_context(settings: settings::Settings) -> Result<()> {\n let mut lib_context_locked = LIB_CONTEXT.write().unwrap();\n *lib_context_locked = Some(Arc::new(create_lib_context(settings)?));\n Ok(())\n}\n\npub(crate) fn get_lib_context() -> Result> {\n let lib_context_locked = LIB_CONTEXT.read().unwrap();\n lib_context_locked\n .as_ref()\n .cloned()\n .ok_or_else(|| anyhow!(\"CocoIndex library is not initialized or already stopped\"))\n}\n\npub(crate) fn clear_lib_context() {\n let mut lib_context_locked = LIB_CONTEXT.write().unwrap();\n *lib_context_locked = None;\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_db_pools_default() {\n let db_pools = DbPools::default();\n assert!(db_pools.pools.lock().unwrap().is_empty());\n }\n\n #[test]\n fn test_lib_context_without_database() {\n let lib_context = create_lib_context(settings::Settings::default()).unwrap();\n assert!(lib_context.persistence_ctx.is_none());\n assert!(lib_context.require_builtin_db_pool().is_err());\n }\n\n #[test]\n fn test_persistence_context_type_safety() {\n // This test ensures that PersistenceContext groups related fields together\n let settings = settings::Settings {\n database: Some(settings::DatabaseConnectionSpec {\n url: \"postgresql://test\".to_string(),\n user: None,\n password: None,\n }),\n ..Default::default()\n };\n\n // This would fail at runtime due to invalid connection, but we're testing the structure\n let result = create_lib_context(settings);\n // We expect this to fail due to invalid connection, but the structure should be correct\n assert!(result.is_err());\n }\n}\n"], ["/cocoindex/src/ops/functions/extract_by_llm.rs", "use crate::llm::{\n LlmGenerateRequest, LlmGenerationClient, LlmSpec, OutputFormat, new_llm_generation_client,\n};\nuse crate::ops::sdk::*;\nuse crate::prelude::*;\nuse base::json_schema::build_json_schema;\nuse schemars::schema::SchemaObject;\nuse std::borrow::Cow;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Spec {\n llm_spec: LlmSpec,\n output_type: EnrichedValueType,\n instruction: Option,\n}\n\npub struct Args {\n text: Option,\n image: Option,\n}\n\nstruct Executor {\n args: Args,\n client: Box,\n model: String,\n output_json_schema: SchemaObject,\n system_prompt: String,\n value_extractor: base::json_schema::ValueExtractor,\n}\n\nfn get_system_prompt(instructions: &Option, extra_instructions: Option) -> String {\n let mut message =\n \"You are a helpful assistant that processes user-provided inputs (text, images, or both) to produce structured outputs. \\\nYour task is to follow the provided instructions to generate or extract information and output valid JSON matching the specified schema. \\\nBase your response solely on the content of the input. \\\nFor generative tasks, respond accurately and relevantly based on what is provided. \\\nUnless explicitly instructed otherwise, output only the JSON. DO NOT include explanations, descriptions, or formatting outside the JSON.\"\n .to_string();\n\n if let Some(custom_instructions) = instructions {\n message.push_str(\"\\n\\n\");\n message.push_str(custom_instructions);\n }\n\n if let Some(extra_instructions) = extra_instructions {\n message.push_str(\"\\n\\n\");\n message.push_str(&extra_instructions);\n }\n\n message\n}\n\nimpl Executor {\n async fn new(spec: Spec, args: Args) -> Result {\n let client = new_llm_generation_client(\n spec.llm_spec.api_type,\n spec.llm_spec.address,\n spec.llm_spec.api_config,\n )\n .await?;\n let schema_output = build_json_schema(spec.output_type, client.json_schema_options())?;\n Ok(Self {\n args,\n client,\n model: spec.llm_spec.model,\n output_json_schema: schema_output.schema,\n system_prompt: get_system_prompt(&spec.instruction, schema_output.extra_instructions),\n value_extractor: schema_output.value_extractor,\n })\n }\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n fn behavior_version(&self) -> Option {\n Some(1)\n }\n\n fn enable_cache(&self) -> bool {\n true\n }\n\n async fn evaluate(&self, input: Vec) -> Result {\n let image_bytes: Option> = self\n .args\n .image\n .as_ref()\n .map(|arg| arg.value(&input)?.as_bytes())\n .transpose()?\n .map(|bytes| Cow::Borrowed(bytes.as_ref()));\n let text = self\n .args\n .text\n .as_ref()\n .map(|arg| arg.value(&input)?.as_str())\n .transpose()?;\n\n if text.is_none() && image_bytes.is_none() {\n api_bail!(\"At least one of `text` or `image` must be provided\");\n }\n\n let user_prompt = text.map_or(\"\", |v| v);\n let req = LlmGenerateRequest {\n model: &self.model,\n system_prompt: Some(Cow::Borrowed(&self.system_prompt)),\n user_prompt: Cow::Borrowed(user_prompt),\n image: image_bytes,\n output_format: Some(OutputFormat::JsonSchema {\n name: Cow::Borrowed(\"ExtractedData\"),\n schema: Cow::Borrowed(&self.output_json_schema),\n }),\n };\n let res = self.client.generate(req).await?;\n let json_value: serde_json::Value = serde_json::from_str(res.text.as_str())?;\n let value = self.value_extractor.extract_value(json_value)?;\n Ok(value)\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"ExtractByLlm\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_optional_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n image: args_resolver\n .next_optional_arg(\"image\")?\n .expect_type(&ValueType::Basic(BasicValueType::Bytes))?,\n };\n\n if args.text.is_none() && args.image.is_none() {\n api_bail!(\"At least one of 'text' or 'image' must be provided\");\n }\n\n Ok((args, spec.output_type.clone()))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n resolved_input_schema: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor::new(spec, resolved_input_schema).await?))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n #[tokio::test]\n #[ignore = \"This test requires an OpenAI API key or a configured local LLM and may make network calls.\"]\n async fn test_extract_by_llm() {\n // Define the expected output structure\n let target_output_schema = StructSchema {\n fields: Arc::new(vec![\n FieldSchema::new(\n \"extracted_field_name\",\n make_output_type(BasicValueType::Str),\n ),\n FieldSchema::new(\n \"extracted_field_value\",\n make_output_type(BasicValueType::Int64),\n ),\n ]),\n description: Some(\"A test structure for extraction\".into()),\n };\n\n let output_type_spec = EnrichedValueType {\n typ: ValueType::Struct(target_output_schema.clone()),\n nullable: false,\n attrs: Arc::new(BTreeMap::new()),\n };\n\n let spec = Spec {\n llm_spec: LlmSpec {\n api_type: crate::llm::LlmApiType::OpenAi,\n model: \"gpt-4o\".to_string(),\n address: None,\n api_config: None,\n },\n output_type: output_type_spec,\n instruction: Some(\"Extract the name and value from the text. The name is a string, the value is an integer.\".to_string()),\n };\n\n let factory = Arc::new(Factory);\n let text_content = \"The item is called 'CocoIndex Test' and its value is 42.\";\n\n let input_args_values = vec![text_content.to_string().into()];\n\n let input_arg_schemas = vec![build_arg_schema(\"text\", BasicValueType::Str)];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n if result.is_err() {\n eprintln!(\n \"test_extract_by_llm: test_flow_function returned error (potentially expected for evaluate): {:?}\",\n result.as_ref().err()\n );\n }\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed. NOTE: This test may require network access/API keys for OpenAI. Error: {:?}\",\n result.err()\n );\n\n let value = result.unwrap();\n\n match value {\n Value::Struct(field_values) => {\n assert_eq!(\n field_values.fields.len(),\n target_output_schema.fields.len(),\n \"Mismatched number of fields in output struct\"\n );\n for (idx, field_schema) in target_output_schema.fields.iter().enumerate() {\n match (&field_values.fields[idx], &field_schema.value_type.typ) {\n (\n Value::Basic(BasicValue::Str(_)),\n ValueType::Basic(BasicValueType::Str),\n ) => {}\n (\n Value::Basic(BasicValue::Int64(_)),\n ValueType::Basic(BasicValueType::Int64),\n ) => {}\n (val, expected_type) => panic!(\n \"Field '{}' type mismatch. Got {:?}, expected type compatible with {:?}\",\n field_schema.name,\n val.kind(),\n expected_type\n ),\n }\n }\n }\n _ => panic!(\"Expected Value::Struct, got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/execution/source_indexer.rs", "use crate::{\n prelude::*,\n service::error::{SharedError, SharedResult, SharedResultExt},\n};\n\nuse futures::future::Ready;\nuse sqlx::PgPool;\nuse std::collections::{HashMap, hash_map};\nuse tokio::{sync::Semaphore, task::JoinSet};\n\nuse super::{\n db_tracking,\n evaluator::SourceRowEvaluationContext,\n row_indexer::{self, SkippedOr, SourceVersion},\n stats,\n};\n\nuse crate::ops::interface;\nstruct SourceRowIndexingState {\n source_version: SourceVersion,\n processing_sem: Arc,\n touched_generation: usize,\n}\n\nimpl Default for SourceRowIndexingState {\n fn default() -> Self {\n Self {\n source_version: SourceVersion::default(),\n processing_sem: Arc::new(Semaphore::new(1)),\n touched_generation: 0,\n }\n }\n}\n\nstruct SourceIndexingState {\n rows: HashMap,\n scan_generation: usize,\n}\n\npub struct SourceIndexingContext {\n flow: Arc,\n source_idx: usize,\n pending_update: Mutex>>>>,\n update_sem: Semaphore,\n state: Mutex,\n setup_execution_ctx: Arc,\n}\n\npub const NO_ACK: Option Ready>> = None;\n\nimpl SourceIndexingContext {\n pub async fn load(\n flow: Arc,\n source_idx: usize,\n setup_execution_ctx: Arc,\n pool: &PgPool,\n ) -> Result {\n let plan = flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[source_idx];\n let mut list_state = db_tracking::ListTrackedSourceKeyMetadataState::new();\n let mut rows = HashMap::new();\n let scan_generation = 0;\n {\n let mut key_metadata_stream = list_state.list(\n setup_execution_ctx.import_ops[source_idx].source_id,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n );\n while let Some(key_metadata) = key_metadata_stream.next().await {\n let key_metadata = key_metadata?;\n let source_key = value::Value::::from_json(\n key_metadata.source_key,\n &import_op.primary_key_type,\n )?\n .into_key()?;\n rows.insert(\n source_key,\n SourceRowIndexingState {\n source_version: SourceVersion::from_stored(\n key_metadata.processed_source_ordinal,\n &key_metadata.process_logic_fingerprint,\n plan.logic_fingerprint,\n ),\n processing_sem: Arc::new(Semaphore::new(1)),\n touched_generation: scan_generation,\n },\n );\n }\n }\n Ok(Self {\n flow,\n source_idx,\n state: Mutex::new(SourceIndexingState {\n rows,\n scan_generation,\n }),\n pending_update: Mutex::new(None),\n update_sem: Semaphore::new(1),\n setup_execution_ctx,\n })\n }\n\n pub async fn process_source_key<\n AckFut: Future> + Send + 'static,\n AckFn: FnOnce() -> AckFut,\n >(\n self: Arc,\n key: value::KeyValue,\n source_data: Option,\n update_stats: Arc,\n _concur_permit: concur_control::CombinedConcurrencyControllerPermit,\n ack_fn: Option,\n pool: PgPool,\n ) {\n let process = async {\n let plan = self.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[self.source_idx];\n let schema = &self.flow.data_schema;\n let source_data = match source_data {\n Some(source_data) => source_data,\n None => import_op\n .executor\n .get_value(\n &key,\n &interface::SourceExecutorGetOptions {\n include_value: true,\n include_ordinal: true,\n },\n )\n .await?\n .try_into()?,\n };\n\n let source_version = SourceVersion::from_current_data(&source_data);\n let processing_sem = {\n let mut state = self.state.lock().unwrap();\n let touched_generation = state.scan_generation;\n match state.rows.entry(key.clone()) {\n hash_map::Entry::Occupied(mut entry) => {\n if entry\n .get()\n .source_version\n .should_skip(&source_version, Some(update_stats.as_ref()))\n {\n return anyhow::Ok(());\n }\n let sem = entry.get().processing_sem.clone();\n if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n entry.remove();\n } else {\n entry.get_mut().source_version = source_version.clone();\n }\n sem\n }\n hash_map::Entry::Vacant(entry) => {\n if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n update_stats.num_no_change.inc(1);\n return anyhow::Ok(());\n }\n let new_entry = SourceRowIndexingState {\n source_version: source_version.clone(),\n touched_generation,\n ..Default::default()\n };\n let sem = new_entry.processing_sem.clone();\n entry.insert(new_entry);\n sem\n }\n }\n };\n\n let _processing_permit = processing_sem.acquire().await?;\n let result = row_indexer::update_source_row(\n &SourceRowEvaluationContext {\n plan: &plan,\n import_op,\n schema,\n key: &key,\n import_op_idx: self.source_idx,\n },\n &self.setup_execution_ctx,\n source_data.value,\n &source_version,\n &pool,\n &update_stats,\n )\n .await?;\n let target_source_version = if let SkippedOr::Skipped(existing_source_version) = result\n {\n Some(existing_source_version)\n } else if source_version.kind == row_indexer::SourceVersionKind::NonExistence {\n Some(source_version)\n } else {\n None\n };\n if let Some(target_source_version) = target_source_version {\n let mut state = self.state.lock().unwrap();\n let scan_generation = state.scan_generation;\n let entry = state.rows.entry(key.clone());\n match entry {\n hash_map::Entry::Occupied(mut entry) => {\n if !entry\n .get()\n .source_version\n .should_skip(&target_source_version, None)\n {\n if target_source_version.kind\n == row_indexer::SourceVersionKind::NonExistence\n {\n entry.remove();\n } else {\n let mut_entry = entry.get_mut();\n mut_entry.source_version = target_source_version;\n mut_entry.touched_generation = scan_generation;\n }\n }\n }\n hash_map::Entry::Vacant(entry) => {\n if target_source_version.kind\n != row_indexer::SourceVersionKind::NonExistence\n {\n entry.insert(SourceRowIndexingState {\n source_version: target_source_version,\n touched_generation: scan_generation,\n ..Default::default()\n });\n }\n }\n }\n }\n anyhow::Ok(())\n };\n let process_and_ack = async {\n process.await?;\n if let Some(ack_fn) = ack_fn {\n ack_fn().await?;\n }\n anyhow::Ok(())\n };\n if let Err(e) = process_and_ack.await {\n update_stats.num_errors.inc(1);\n error!(\n \"{:?}\",\n e.context(format!(\n \"Error in processing row from source `{source}` with key: {key}\",\n source = self.flow.flow_instance.import_ops[self.source_idx].name\n ))\n );\n }\n }\n\n pub async fn update(\n self: &Arc,\n pool: &PgPool,\n update_stats: &Arc,\n ) -> Result<()> {\n let pending_update_fut = {\n let mut pending_update = self.pending_update.lock().unwrap();\n if let Some(pending_update_fut) = &*pending_update {\n pending_update_fut.clone()\n } else {\n let slf = self.clone();\n let pool = pool.clone();\n let update_stats = update_stats.clone();\n let task = tokio::spawn(async move {\n {\n let _permit = slf.update_sem.acquire().await?;\n {\n let mut pending_update = slf.pending_update.lock().unwrap();\n *pending_update = None;\n }\n slf.update_once(&pool, &update_stats).await?;\n }\n anyhow::Ok(())\n });\n let pending_update_fut = async move {\n task.await\n .map_err(SharedError::from)?\n .map_err(SharedError::new)\n }\n .boxed()\n .shared();\n *pending_update = Some(pending_update_fut.clone());\n pending_update_fut\n }\n };\n pending_update_fut.await.std_result()?;\n Ok(())\n }\n\n async fn update_once(\n self: &Arc,\n pool: &PgPool,\n update_stats: &Arc,\n ) -> Result<()> {\n let plan = self.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[self.source_idx];\n let mut rows_stream = import_op\n .executor\n .list(&interface::SourceExecutorListOptions {\n include_ordinal: true,\n });\n let mut join_set = JoinSet::new();\n let scan_generation = {\n let mut state = self.state.lock().unwrap();\n state.scan_generation += 1;\n state.scan_generation\n };\n while let Some(row) = rows_stream.next().await {\n for row in row? {\n let source_version = SourceVersion::from_current_with_ordinal(\n row.ordinal\n .ok_or_else(|| anyhow::anyhow!(\"ordinal is not available\"))?,\n );\n {\n let mut state = self.state.lock().unwrap();\n let scan_generation = state.scan_generation;\n let row_state = state.rows.entry(row.key.clone()).or_default();\n row_state.touched_generation = scan_generation;\n if row_state\n .source_version\n .should_skip(&source_version, Some(update_stats.as_ref()))\n {\n continue;\n }\n }\n let concur_permit = import_op\n .concurrency_controller\n .acquire(concur_control::BYTES_UNKNOWN_YET)\n .await?;\n join_set.spawn(self.clone().process_source_key(\n row.key,\n None,\n update_stats.clone(),\n concur_permit,\n NO_ACK,\n pool.clone(),\n ));\n }\n }\n while let Some(result) = join_set.join_next().await {\n if let Err(e) = result {\n if !e.is_cancelled() {\n error!(\"{e:?}\");\n }\n }\n }\n\n let deleted_key_versions = {\n let mut deleted_key_versions = Vec::new();\n let state = self.state.lock().unwrap();\n for (key, row_state) in state.rows.iter() {\n if row_state.touched_generation < scan_generation {\n deleted_key_versions.push((key.clone(), row_state.source_version.ordinal));\n }\n }\n deleted_key_versions\n };\n for (key, source_ordinal) in deleted_key_versions {\n // If the source ordinal is unavailable, call without source ordinal so that another polling will be triggered to avoid out-of-order.\n let source_data = source_ordinal\n .is_available()\n .then(|| interface::SourceData {\n value: interface::SourceValue::NonExistence,\n ordinal: source_ordinal,\n });\n let concur_permit = import_op.concurrency_controller.acquire(Some(|| 0)).await?;\n join_set.spawn(self.clone().process_source_key(\n key,\n source_data,\n update_stats.clone(),\n concur_permit,\n NO_ACK,\n pool.clone(),\n ));\n }\n while let Some(result) = join_set.join_next().await {\n if let Err(e) = result {\n if !e.is_cancelled() {\n error!(\"{e:?}\");\n }\n }\n }\n\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/sources/local_file.rs", "use async_stream::try_stream;\nuse globset::{Glob, GlobSet, GlobSetBuilder};\nuse log::warn;\nuse std::borrow::Cow;\nuse std::path::Path;\nuse std::{path::PathBuf, sync::Arc};\n\nuse crate::base::field_attrs;\nuse crate::{fields_value, ops::sdk::*};\n\n#[derive(Debug, Deserialize)]\npub struct Spec {\n path: String,\n binary: bool,\n included_patterns: Option>,\n excluded_patterns: Option>,\n}\n\nstruct Executor {\n root_path: PathBuf,\n binary: bool,\n included_glob_set: Option,\n excluded_glob_set: Option,\n}\n\nimpl Executor {\n fn is_excluded(&self, path: impl AsRef + Copy) -> bool {\n self.excluded_glob_set\n .as_ref()\n .is_some_and(|glob_set| glob_set.is_match(path))\n }\n\n fn is_file_included(&self, path: impl AsRef + Copy) -> bool {\n self.included_glob_set\n .as_ref()\n .is_none_or(|glob_set| glob_set.is_match(path))\n && !self.is_excluded(path)\n }\n}\n\n#[async_trait]\nimpl SourceExecutor for Executor {\n fn list<'a>(\n &'a self,\n options: &'a SourceExecutorListOptions,\n ) -> BoxStream<'a, Result>> {\n let root_component_size = self.root_path.components().count();\n let mut dirs = Vec::new();\n dirs.push(Cow::Borrowed(&self.root_path));\n let mut new_dirs = Vec::new();\n try_stream! {\n while let Some(dir) = dirs.pop() {\n let mut entries = tokio::fs::read_dir(dir.as_ref()).await?;\n while let Some(entry) = entries.next_entry().await? {\n let path = entry.path();\n let mut path_components = path.components();\n for _ in 0..root_component_size {\n path_components.next();\n }\n let relative_path = path_components.as_path();\n if path.is_dir() {\n if !self.is_excluded(relative_path) {\n new_dirs.push(Cow::Owned(path));\n }\n } else if self.is_file_included(relative_path) {\n let ordinal: Option = if options.include_ordinal {\n Some(path.metadata()?.modified()?.try_into()?)\n } else {\n None\n };\n if let Some(relative_path) = relative_path.to_str() {\n yield vec![PartialSourceRowMetadata {\n key: KeyValue::Str(relative_path.into()),\n ordinal,\n }];\n } else {\n warn!(\"Skipped ill-formed file path: {}\", path.display());\n }\n }\n }\n dirs.extend(new_dirs.drain(..).rev());\n }\n }\n .boxed()\n }\n\n async fn get_value(\n &self,\n key: &KeyValue,\n options: &SourceExecutorGetOptions,\n ) -> Result {\n if !self.is_file_included(key.str_value()?.as_ref()) {\n return Ok(PartialSourceRowData {\n value: Some(SourceValue::NonExistence),\n ordinal: Some(Ordinal::unavailable()),\n });\n }\n let path = self.root_path.join(key.str_value()?.as_ref());\n let ordinal = if options.include_ordinal {\n Some(path.metadata()?.modified()?.try_into()?)\n } else {\n None\n };\n let value = if options.include_value {\n match std::fs::read(path) {\n Ok(content) => {\n let content = if self.binary {\n fields_value!(content)\n } else {\n fields_value!(String::from_utf8_lossy(&content).to_string())\n };\n Some(SourceValue::Existence(content))\n }\n Err(e) if e.kind() == std::io::ErrorKind::NotFound => {\n Some(SourceValue::NonExistence)\n }\n Err(e) => Err(e)?,\n }\n } else {\n None\n };\n Ok(PartialSourceRowData { value, ordinal })\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SourceFactoryBase for Factory {\n type Spec = Spec;\n\n fn name(&self) -> &str {\n \"LocalFile\"\n }\n\n async fn get_output_schema(\n &self,\n spec: &Spec,\n _context: &FlowInstanceContext,\n ) -> Result {\n let mut struct_schema = StructSchema::default();\n let mut schema_builder = StructSchemaBuilder::new(&mut struct_schema);\n let filename_field = schema_builder.add_field(FieldSchema::new(\n \"filename\",\n make_output_type(BasicValueType::Str),\n ));\n schema_builder.add_field(FieldSchema::new(\n \"content\",\n make_output_type(if spec.binary {\n BasicValueType::Bytes\n } else {\n BasicValueType::Str\n })\n .with_attr(\n field_attrs::CONTENT_FILENAME,\n serde_json::to_value(filename_field.to_field_ref())?,\n ),\n ));\n\n Ok(make_output_type(TableSchema::new(\n TableKind::KTable,\n struct_schema,\n )))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor {\n root_path: PathBuf::from(spec.path),\n binary: spec.binary,\n included_glob_set: spec.included_patterns.map(build_glob_set).transpose()?,\n excluded_glob_set: spec.excluded_patterns.map(build_glob_set).transpose()?,\n }))\n }\n}\n\nfn build_glob_set(patterns: Vec) -> Result {\n let mut builder = GlobSetBuilder::new();\n for pattern in patterns {\n builder.add(Glob::new(pattern.as_str())?);\n }\n Ok(builder.build()?)\n}\n"], ["/cocoindex/src/service/flows.rs", "use crate::prelude::*;\n\nuse crate::execution::{evaluator, indexing_status, memoization, row_indexer, stats};\nuse crate::lib_context::LibContext;\nuse crate::{base::schema::FlowSchema, ops::interface::SourceExecutorListOptions};\nuse axum::{\n Json,\n extract::{Path, State},\n http::StatusCode,\n};\nuse axum_extra::extract::Query;\n\npub async fn list_flows(\n State(lib_context): State>,\n) -> Result>, ApiError> {\n Ok(Json(\n lib_context.flows.lock().unwrap().keys().cloned().collect(),\n ))\n}\n\npub async fn get_flow_schema(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n Ok(Json(flow_ctx.flow.data_schema.clone()))\n}\n\n#[derive(Serialize)]\npub struct GetFlowResponse {\n flow_spec: spec::FlowInstanceSpec,\n data_schema: FlowSchema,\n fingerprint: utils::fingerprint::Fingerprint,\n}\n\npub async fn get_flow(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let flow_spec = flow_ctx.flow.flow_instance.clone();\n let data_schema = flow_ctx.flow.data_schema.clone();\n let fingerprint = utils::fingerprint::Fingerprinter::default()\n .with(&flow_spec)\n .map_err(|e| api_error!(\"failed to fingerprint flow spec: {e}\"))?\n .with(&data_schema)\n .map_err(|e| api_error!(\"failed to fingerprint data schema: {e}\"))?\n .into_fingerprint();\n Ok(Json(GetFlowResponse {\n flow_spec,\n data_schema,\n fingerprint,\n }))\n}\n\n#[derive(Deserialize)]\npub struct GetKeysParam {\n field: String,\n}\n\n#[derive(Serialize)]\npub struct GetKeysResponse {\n key_type: schema::EnrichedValueType,\n keys: Vec,\n}\n\npub async fn get_keys(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let schema = &flow_ctx.flow.data_schema;\n\n let field_idx = schema\n .fields\n .iter()\n .position(|f| f.name == query.field)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field not found: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n let key_type = schema.fields[field_idx]\n .value_type\n .typ\n .key_type()\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field has no key: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n\n let execution_plan = flow_ctx.flow.get_execution_plan().await?;\n let import_op = execution_plan\n .import_ops\n .iter()\n .find(|op| op.output.field_idx == field_idx as u32)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"field is not a source: {}\", query.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n\n let mut rows_stream = import_op.executor.list(&SourceExecutorListOptions {\n include_ordinal: false,\n });\n let mut keys = Vec::new();\n while let Some(rows) = rows_stream.next().await {\n keys.extend(rows?.into_iter().map(|row| row.key));\n }\n Ok(Json(GetKeysResponse {\n key_type: key_type.clone(),\n keys,\n }))\n}\n\n#[derive(Deserialize)]\npub struct SourceRowKeyParams {\n field: String,\n key: Vec,\n}\n\n#[derive(Serialize)]\npub struct EvaluateDataResponse {\n schema: FlowSchema,\n data: value::ScopeValue,\n}\n\nstruct SourceRowKeyContextHolder<'a> {\n plan: Arc,\n import_op_idx: usize,\n schema: &'a FlowSchema,\n key: value::KeyValue,\n}\n\nimpl<'a> SourceRowKeyContextHolder<'a> {\n async fn create(flow_ctx: &'a FlowContext, source_row_key: SourceRowKeyParams) -> Result {\n let schema = &flow_ctx.flow.data_schema;\n let import_op_idx = flow_ctx\n .flow\n .flow_instance\n .import_ops\n .iter()\n .position(|op| op.name == source_row_key.field)\n .ok_or_else(|| {\n ApiError::new(\n &format!(\"source field not found: {}\", source_row_key.field),\n StatusCode::BAD_REQUEST,\n )\n })?;\n let plan = flow_ctx.flow.get_execution_plan().await?;\n let import_op = &plan.import_ops[import_op_idx];\n let field_schema = &schema.fields[import_op.output.field_idx as usize];\n let table_schema = match &field_schema.value_type.typ {\n schema::ValueType::Table(table) => table,\n _ => api_bail!(\"field is not a table: {}\", source_row_key.field),\n };\n let key_field = table_schema\n .key_field()\n .ok_or_else(|| api_error!(\"field {} does not have a key\", source_row_key.field))?;\n let key = value::KeyValue::from_strs(source_row_key.key, &key_field.value_type.typ)?;\n Ok(Self {\n plan,\n import_op_idx,\n schema,\n key,\n })\n }\n\n fn as_context<'b>(&'b self) -> evaluator::SourceRowEvaluationContext<'b> {\n evaluator::SourceRowEvaluationContext {\n plan: &self.plan,\n import_op: &self.plan.import_ops[self.import_op_idx],\n schema: self.schema,\n key: &self.key,\n import_op_idx: self.import_op_idx,\n }\n }\n}\n\npub async fn evaluate_data(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let source_row_key_ctx = SourceRowKeyContextHolder::create(&flow_ctx, query).await?;\n let execution_ctx = flow_ctx.use_execution_ctx().await?;\n let evaluate_output = row_indexer::evaluate_source_entry_with_memory(\n &source_row_key_ctx.as_context(),\n &execution_ctx.setup_execution_context,\n memoization::EvaluationMemoryOptions {\n enable_cache: true,\n evaluation_only: true,\n },\n lib_context.require_builtin_db_pool()?,\n )\n .await?\n .ok_or_else(|| {\n api_error!(\n \"value not found for source at the specified key: {key:?}\",\n key = source_row_key_ctx.key\n )\n })?;\n\n Ok(Json(EvaluateDataResponse {\n schema: flow_ctx.flow.data_schema.clone(),\n data: evaluate_output.data_scope.into(),\n }))\n}\n\npub async fn update(\n Path(flow_name): Path,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let live_updater = execution::FlowLiveUpdater::start(\n flow_ctx.clone(),\n lib_context.require_builtin_db_pool()?,\n execution::FlowLiveUpdaterOptions {\n live_mode: false,\n ..Default::default()\n },\n )\n .await?;\n live_updater.wait().await?;\n Ok(Json(live_updater.index_update_info()))\n}\n\npub async fn get_row_indexing_status(\n Path(flow_name): Path,\n Query(query): Query,\n State(lib_context): State>,\n) -> Result, ApiError> {\n let flow_ctx = lib_context.get_flow_context(&flow_name)?;\n let source_row_key_ctx = SourceRowKeyContextHolder::create(&flow_ctx, query).await?;\n\n let execution_ctx = flow_ctx.use_execution_ctx().await?;\n let indexing_status = indexing_status::get_source_row_indexing_status(\n &source_row_key_ctx.as_context(),\n &execution_ctx.setup_execution_context,\n lib_context.require_builtin_db_pool()?,\n )\n .await?;\n Ok(Json(indexing_status))\n}\n"], ["/cocoindex/src/llm/gemini.rs", "use crate::prelude::*;\n\nuse crate::llm::{\n LlmEmbeddingClient, LlmGenerateRequest, LlmGenerateResponse, LlmGenerationClient, OutputFormat,\n ToJsonSchemaOptions, detect_image_mime_type,\n};\nuse base64::prelude::*;\nuse google_cloud_aiplatform_v1 as vertexai;\nuse serde_json::Value;\nuse urlencoding::encode;\n\nfn get_embedding_dimension(model: &str) -> Option {\n let model = model.to_ascii_lowercase();\n if model.starts_with(\"gemini-embedding-\") {\n Some(3072)\n } else if model.starts_with(\"text-embedding-\") {\n Some(768)\n } else if model.starts_with(\"embedding-\") {\n Some(768)\n } else if model.starts_with(\"text-multilingual-embedding-\") {\n Some(768)\n } else {\n None\n }\n}\n\npub struct AiStudioClient {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl AiStudioClient {\n pub fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Gemini doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"GEMINI_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"GEMINI_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n// Recursively remove all `additionalProperties` fields from a JSON value\nfn remove_additional_properties(value: &mut Value) {\n match value {\n Value::Object(map) => {\n map.remove(\"additionalProperties\");\n for v in map.values_mut() {\n remove_additional_properties(v);\n }\n }\n Value::Array(arr) => {\n for v in arr {\n remove_additional_properties(v);\n }\n }\n _ => {}\n }\n}\n\nimpl AiStudioClient {\n fn get_api_url(&self, model: &str, api_name: &str) -> String {\n format!(\n \"https://generativelanguage.googleapis.com/v1beta/models/{}:{}?key={}\",\n encode(model),\n api_name,\n encode(&self.api_key)\n )\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for AiStudioClient {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result {\n let mut user_parts: Vec = Vec::new();\n\n // Add text part first\n user_parts.push(serde_json::json!({ \"text\": request.user_prompt }));\n\n // Add image part if present\n if let Some(image_bytes) = &request.image {\n let base64_image = BASE64_STANDARD.encode(image_bytes.as_ref());\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n user_parts.push(serde_json::json!({\n \"inlineData\": {\n \"mimeType\": mime_type,\n \"data\": base64_image\n }\n }));\n }\n\n // Compose the contents\n let contents = vec![serde_json::json!({\n \"role\": \"user\",\n \"parts\": user_parts\n })];\n\n // Prepare payload\n let mut payload = serde_json::json!({ \"contents\": contents });\n if let Some(system) = request.system_prompt {\n payload[\"systemInstruction\"] = serde_json::json!({\n \"parts\": [ { \"text\": system } ]\n });\n }\n\n // If structured output is requested, add schema and responseMimeType\n if let Some(OutputFormat::JsonSchema { schema, .. }) = &request.output_format {\n let mut schema_json = serde_json::to_value(schema)?;\n remove_additional_properties(&mut schema_json);\n payload[\"generationConfig\"] = serde_json::json!({\n \"responseMimeType\": \"application/json\",\n \"responseSchema\": schema_json\n });\n }\n\n let url = self.get_api_url(request.model, \"generateContent\");\n let resp = retryable::run(\n || self.client.post(&url).json(&payload).send(),\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Gemini API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let resp_json: Value = resp.json().await.context(\"Invalid JSON\")?;\n\n if let Some(error) = resp_json.get(\"error\") {\n bail!(\"Gemini API error: {:?}\", error);\n }\n let mut resp_json = resp_json;\n let text = match &mut resp_json[\"candidates\"][0][\"content\"][\"parts\"][0][\"text\"] {\n Value::String(s) => std::mem::take(s),\n _ => bail!(\"No text in response\"),\n };\n\n Ok(LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[derive(Deserialize)]\nstruct ContentEmbedding {\n values: Vec,\n}\n#[derive(Deserialize)]\nstruct EmbedContentResponse {\n embedding: ContentEmbedding,\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for AiStudioClient {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let url = self.get_api_url(request.model, \"embedContent\");\n let mut payload = serde_json::json!({\n \"model\": request.model,\n \"content\": { \"parts\": [{ \"text\": request.text }] },\n });\n if let Some(task_type) = request.task_type {\n payload[\"taskType\"] = serde_json::Value::String(task_type.into());\n }\n let resp = retryable::run(\n || self.client.post(&url).json(&payload).send(),\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Gemini API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let embedding_resp: EmbedContentResponse = resp.json().await.context(\"Invalid JSON\")?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding_resp.embedding.values,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n\npub struct VertexAiClient {\n client: vertexai::client::PredictionService,\n config: super::VertexAiConfig,\n}\n\nimpl VertexAiClient {\n pub async fn new(\n address: Option,\n api_config: Option,\n ) -> Result {\n if address.is_some() {\n api_bail!(\"VertexAi API address is not supported for VertexAi API type\");\n }\n let Some(super::LlmApiConfig::VertexAi(config)) = api_config else {\n api_bail!(\"VertexAi API config is required for VertexAi API type\");\n };\n let client = vertexai::client::PredictionService::builder()\n .build()\n .await?;\n Ok(Self { client, config })\n }\n\n fn get_model_path(&self, model: &str) -> String {\n format!(\n \"projects/{}/locations/{}/publishers/google/models/{}\",\n self.config.project,\n self.config.region.as_deref().unwrap_or(\"global\"),\n model\n )\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for VertexAiClient {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n use vertexai::model::{Blob, Content, GenerationConfig, Part, Schema, part::Data};\n\n // Compose parts\n let mut parts = Vec::new();\n // Add text part\n parts.push(Part::new().set_text(request.user_prompt.to_string()));\n // Add image part if present\n if let Some(image_bytes) = request.image {\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n parts.push(\n Part::new().set_inline_data(\n Blob::new()\n .set_data(image_bytes.into_owned())\n .set_mime_type(mime_type.to_string()),\n ),\n );\n }\n // Compose content\n let mut contents = Vec::new();\n contents.push(Content::new().set_role(\"user\".to_string()).set_parts(parts));\n // Compose system instruction if present\n let system_instruction = request.system_prompt.as_ref().map(|sys| {\n Content::new()\n .set_role(\"system\".to_string())\n .set_parts(vec![Part::new().set_text(sys.to_string())])\n });\n\n // Compose generation config\n let mut generation_config = None;\n if let Some(OutputFormat::JsonSchema { schema, .. }) = &request.output_format {\n let schema_json = serde_json::to_value(schema)?;\n generation_config = Some(\n GenerationConfig::new()\n .set_response_mime_type(\"application/json\".to_string())\n .set_response_schema(serde_json::from_value::(schema_json)?),\n );\n }\n\n let mut req = self\n .client\n .generate_content()\n .set_model(self.get_model_path(request.model))\n .set_contents(contents);\n if let Some(sys) = system_instruction {\n req = req.set_system_instruction(sys);\n }\n if let Some(config) = generation_config {\n req = req.set_generation_config(config);\n }\n\n // Call the API\n let resp = req.send().await?;\n // Extract text from response\n let Some(Data::Text(text)) = resp\n .candidates\n .into_iter()\n .next()\n .and_then(|c| c.content)\n .and_then(|content| content.parts.into_iter().next())\n .and_then(|part| part.data)\n else {\n bail!(\"No text in response\");\n };\n Ok(super::LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for VertexAiClient {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n // Create the instances for the request\n let mut instance = serde_json::json!({\n \"content\": request.text\n });\n // Add task type if specified\n if let Some(task_type) = &request.task_type {\n instance[\"task_type\"] = serde_json::Value::String(task_type.to_string());\n }\n\n let instances = vec![instance];\n\n // Prepare the request parameters\n let mut parameters = serde_json::json!({});\n if let Some(output_dimension) = request.output_dimension {\n parameters[\"outputDimensionality\"] = serde_json::Value::Number(output_dimension.into());\n }\n\n // Build the prediction request using the raw predict builder\n let response = self\n .client\n .predict()\n .set_endpoint(self.get_model_path(request.model))\n .set_instances(instances)\n .set_parameters(parameters)\n .send()\n .await?;\n\n // Extract the embedding from the response\n let embeddings = response\n .predictions\n .into_iter()\n .next()\n .and_then(|mut e| e.get_mut(\"embeddings\").map(|v| v.take()))\n .ok_or_else(|| anyhow::anyhow!(\"No embeddings in response\"))?;\n let embedding: ContentEmbedding = serde_json::from_value(embeddings)?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding.values,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n"], ["/cocoindex/src/utils/yaml_ser.rs", "use base64::prelude::*;\nuse serde::ser::{self, Serialize};\nuse yaml_rust2::yaml::Yaml;\n\n#[derive(Debug)]\npub struct YamlSerializerError {\n msg: String,\n}\n\nimpl std::fmt::Display for YamlSerializerError {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"YamlSerializerError: {}\", self.msg)\n }\n}\n\nimpl std::error::Error for YamlSerializerError {}\n\nimpl ser::Error for YamlSerializerError {\n fn custom(msg: T) -> Self\n where\n T: std::fmt::Display,\n {\n YamlSerializerError {\n msg: format!(\"{msg}\"),\n }\n }\n}\n\npub struct YamlSerializer;\n\nimpl YamlSerializer {\n pub fn serialize(value: &T) -> Result\n where\n T: Serialize,\n {\n value.serialize(YamlSerializer)\n }\n}\n\nimpl ser::Serializer for YamlSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n type SerializeSeq = SeqSerializer;\n type SerializeTuple = SeqSerializer;\n type SerializeTupleStruct = SeqSerializer;\n type SerializeTupleVariant = VariantSeqSerializer;\n type SerializeMap = MapSerializer;\n type SerializeStruct = MapSerializer;\n type SerializeStructVariant = VariantMapSerializer;\n\n fn serialize_bool(self, v: bool) -> Result {\n Ok(Yaml::Boolean(v))\n }\n\n fn serialize_i8(self, v: i8) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i16(self, v: i16) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i32(self, v: i32) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_i64(self, v: i64) -> Result {\n Ok(Yaml::Integer(v))\n }\n\n fn serialize_u8(self, v: u8) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u16(self, v: u16) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u32(self, v: u32) -> Result {\n Ok(Yaml::Integer(v as i64))\n }\n\n fn serialize_u64(self, v: u64) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_f32(self, v: f32) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_f64(self, v: f64) -> Result {\n Ok(Yaml::Real(v.to_string()))\n }\n\n fn serialize_char(self, v: char) -> Result {\n Ok(Yaml::String(v.to_string()))\n }\n\n fn serialize_str(self, v: &str) -> Result {\n Ok(Yaml::String(v.to_owned()))\n }\n\n fn serialize_bytes(self, v: &[u8]) -> Result {\n let encoded = BASE64_STANDARD.encode(v);\n Ok(Yaml::String(encoded))\n }\n\n fn serialize_none(self) -> Result {\n Ok(Yaml::Null)\n }\n\n fn serialize_some(self, value: &T) -> Result\n where\n T: Serialize + ?Sized,\n {\n value.serialize(self)\n }\n\n fn serialize_unit(self) -> Result {\n Ok(Yaml::Hash(Default::default()))\n }\n\n fn serialize_unit_struct(self, _name: &'static str) -> Result {\n Ok(Yaml::Hash(Default::default()))\n }\n\n fn serialize_unit_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n ) -> Result {\n Ok(Yaml::String(variant.to_owned()))\n }\n\n fn serialize_newtype_struct(\n self,\n _name: &'static str,\n value: &T,\n ) -> Result\n where\n T: Serialize + ?Sized,\n {\n value.serialize(self)\n }\n\n fn serialize_newtype_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n value: &T,\n ) -> Result\n where\n T: Serialize + ?Sized,\n {\n let mut hash = yaml_rust2::yaml::Hash::new();\n hash.insert(Yaml::String(variant.to_owned()), value.serialize(self)?);\n Ok(Yaml::Hash(hash))\n }\n\n fn serialize_seq(self, len: Option) -> Result {\n Ok(SeqSerializer {\n vec: Vec::with_capacity(len.unwrap_or(0)),\n })\n }\n\n fn serialize_tuple(self, len: usize) -> Result {\n self.serialize_seq(Some(len))\n }\n\n fn serialize_tuple_struct(\n self,\n _name: &'static str,\n len: usize,\n ) -> Result {\n self.serialize_seq(Some(len))\n }\n\n fn serialize_tuple_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n len: usize,\n ) -> Result {\n Ok(VariantSeqSerializer {\n variant_name: variant.to_owned(),\n vec: Vec::with_capacity(len),\n })\n }\n\n fn serialize_map(self, _len: Option) -> Result {\n Ok(MapSerializer {\n map: yaml_rust2::yaml::Hash::new(),\n next_key: None,\n })\n }\n\n fn serialize_struct(\n self,\n _name: &'static str,\n len: usize,\n ) -> Result {\n self.serialize_map(Some(len))\n }\n\n fn serialize_struct_variant(\n self,\n _name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n Ok(VariantMapSerializer {\n variant_name: variant.to_owned(),\n map: yaml_rust2::yaml::Hash::new(),\n })\n }\n}\n\npub struct SeqSerializer {\n vec: Vec,\n}\n\nimpl ser::SerializeSeq for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.vec.push(value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n Ok(Yaml::Array(self.vec))\n }\n}\n\nimpl ser::SerializeTuple for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeSeq::serialize_element(self, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeSeq::end(self)\n }\n}\n\nimpl ser::SerializeTupleStruct for SeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeSeq::serialize_element(self, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeSeq::end(self)\n }\n}\n\npub struct MapSerializer {\n map: yaml_rust2::yaml::Hash,\n next_key: Option,\n}\n\nimpl ser::SerializeMap for MapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.next_key = Some(key.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n let key = self.next_key.take().unwrap();\n self.map.insert(key, value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n Ok(Yaml::Hash(self.map))\n }\n}\n\nimpl ser::SerializeStruct for MapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n ser::SerializeMap::serialize_entry(self, key, value)\n }\n\n fn end(self) -> Result {\n ser::SerializeMap::end(self)\n }\n}\n\npub struct VariantMapSerializer {\n variant_name: String,\n map: yaml_rust2::yaml::Hash,\n}\n\nimpl ser::SerializeStructVariant for VariantMapSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.map.insert(\n Yaml::String(key.to_owned()),\n value.serialize(YamlSerializer)?,\n );\n Ok(())\n }\n\n fn end(self) -> Result {\n let mut outer_map = yaml_rust2::yaml::Hash::new();\n outer_map.insert(Yaml::String(self.variant_name), Yaml::Hash(self.map));\n Ok(Yaml::Hash(outer_map))\n }\n}\n\npub struct VariantSeqSerializer {\n variant_name: String,\n vec: Vec,\n}\n\nimpl ser::SerializeTupleVariant for VariantSeqSerializer {\n type Ok = Yaml;\n type Error = YamlSerializerError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: Serialize + ?Sized,\n {\n self.vec.push(value.serialize(YamlSerializer)?);\n Ok(())\n }\n\n fn end(self) -> Result {\n let mut map = yaml_rust2::yaml::Hash::new();\n map.insert(Yaml::String(self.variant_name), Yaml::Array(self.vec));\n Ok(Yaml::Hash(map))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use serde::ser::Error as SerdeSerError;\n use serde::{Serialize, Serializer};\n use std::collections::BTreeMap;\n use yaml_rust2::yaml::{Hash, Yaml};\n\n fn assert_yaml_serialization(value: T, expected_yaml: Yaml) {\n let result = YamlSerializer::serialize(&value);\n println!(\"Serialized value: {result:?}, Expected value: {expected_yaml:?}\");\n\n assert!(\n result.is_ok(),\n \"Serialization failed when it should have succeeded. Error: {:?}\",\n result.err()\n );\n assert_eq!(\n result.unwrap(),\n expected_yaml,\n \"Serialized YAML did not match expected YAML.\"\n );\n }\n\n #[test]\n fn test_serialize_bool() {\n assert_yaml_serialization(true, Yaml::Boolean(true));\n assert_yaml_serialization(false, Yaml::Boolean(false));\n }\n\n #[test]\n fn test_serialize_integers() {\n assert_yaml_serialization(42i8, Yaml::Integer(42));\n assert_yaml_serialization(-100i16, Yaml::Integer(-100));\n assert_yaml_serialization(123456i32, Yaml::Integer(123456));\n assert_yaml_serialization(7890123456789i64, Yaml::Integer(7890123456789));\n assert_yaml_serialization(255u8, Yaml::Integer(255));\n assert_yaml_serialization(65535u16, Yaml::Integer(65535));\n assert_yaml_serialization(4000000000u32, Yaml::Integer(4000000000));\n // u64 is serialized as Yaml::Real(String) in your implementation\n assert_yaml_serialization(\n 18446744073709551615u64,\n Yaml::Real(\"18446744073709551615\".to_string()),\n );\n }\n\n #[test]\n fn test_serialize_floats() {\n assert_yaml_serialization(3.14f32, Yaml::Real(\"3.14\".to_string()));\n assert_yaml_serialization(-0.001f64, Yaml::Real(\"-0.001\".to_string()));\n assert_yaml_serialization(1.0e10f64, Yaml::Real(\"10000000000\".to_string()));\n }\n\n #[test]\n fn test_serialize_char() {\n assert_yaml_serialization('X', Yaml::String(\"X\".to_string()));\n assert_yaml_serialization('✨', Yaml::String(\"✨\".to_string()));\n }\n\n #[test]\n fn test_serialize_str_and_string() {\n assert_yaml_serialization(\"hello YAML\", Yaml::String(\"hello YAML\".to_string()));\n assert_yaml_serialization(\"\".to_string(), Yaml::String(\"\".to_string()));\n }\n\n #[test]\n fn test_serialize_raw_bytes() {\n let bytes_slice: &[u8] = &[0x48, 0x65, 0x6c, 0x6c, 0x6f]; // \"Hello\"\n let expected = Yaml::Array(vec![\n Yaml::Integer(72),\n Yaml::Integer(101),\n Yaml::Integer(108),\n Yaml::Integer(108),\n Yaml::Integer(111),\n ]);\n assert_yaml_serialization(bytes_slice, expected.clone());\n\n let bytes_vec: Vec = bytes_slice.to_vec();\n assert_yaml_serialization(bytes_vec, expected);\n\n let empty_bytes_slice: &[u8] = &[];\n assert_yaml_serialization(empty_bytes_slice, Yaml::Array(vec![]));\n }\n\n struct MyBytesWrapper<'a>(&'a [u8]);\n\n impl<'a> Serialize for MyBytesWrapper<'a> {\n fn serialize(&self, serializer: S) -> Result\n where\n S: Serializer,\n {\n serializer.serialize_bytes(self.0)\n }\n }\n\n #[test]\n fn test_custom_wrapper_serializes_bytes_as_base64_string() {\n let data: &[u8] = &[72, 101, 108, 108, 111]; // \"Hello\"\n let wrapped_data = MyBytesWrapper(data);\n\n let base64_encoded = BASE64_STANDARD.encode(data);\n let expected_yaml = Yaml::String(base64_encoded);\n\n assert_yaml_serialization(wrapped_data, expected_yaml);\n\n let empty_data: &[u8] = &[];\n let wrapped_empty_data = MyBytesWrapper(empty_data);\n let empty_base64_encoded = BASE64_STANDARD.encode(empty_data);\n let expected_empty_yaml = Yaml::String(empty_base64_encoded);\n assert_yaml_serialization(wrapped_empty_data, expected_empty_yaml);\n }\n\n #[test]\n fn test_serialize_option() {\n let val_none: Option = None;\n assert_yaml_serialization(val_none, Yaml::Null);\n\n let val_some: Option = Some(\"has value\".to_string());\n assert_yaml_serialization(val_some, Yaml::String(\"has value\".to_string()));\n }\n\n #[test]\n fn test_serialize_unit() {\n assert_yaml_serialization((), Yaml::Hash(Hash::new()));\n }\n\n #[test]\n fn test_serialize_unit_struct() {\n #[derive(Serialize)]\n struct MyUnitStruct;\n\n assert_yaml_serialization(MyUnitStruct, Yaml::Hash(Hash::new()));\n }\n\n #[test]\n fn test_serialize_newtype_struct() {\n #[derive(Serialize)]\n struct MyNewtypeStruct(u64);\n\n assert_yaml_serialization(MyNewtypeStruct(12345u64), Yaml::Real(\"12345\".to_string()));\n }\n\n #[test]\n fn test_serialize_seq() {\n let empty_vec: Vec = vec![];\n assert_yaml_serialization(empty_vec, Yaml::Array(vec![]));\n\n let simple_vec = vec![10, 20, 30];\n assert_yaml_serialization(\n simple_vec,\n Yaml::Array(vec![\n Yaml::Integer(10),\n Yaml::Integer(20),\n Yaml::Integer(30),\n ]),\n );\n\n let string_vec = vec![\"a\".to_string(), \"b\".to_string()];\n assert_yaml_serialization(\n string_vec,\n Yaml::Array(vec![\n Yaml::String(\"a\".to_string()),\n Yaml::String(\"b\".to_string()),\n ]),\n );\n }\n\n #[test]\n fn test_serialize_tuple() {\n let tuple_val = (42i32, \"text\", false);\n assert_yaml_serialization(\n tuple_val,\n Yaml::Array(vec![\n Yaml::Integer(42),\n Yaml::String(\"text\".to_string()),\n Yaml::Boolean(false),\n ]),\n );\n }\n\n #[test]\n fn test_serialize_tuple_struct() {\n #[derive(Serialize)]\n struct MyTupleStruct(String, i64);\n\n assert_yaml_serialization(\n MyTupleStruct(\"value\".to_string(), -500),\n Yaml::Array(vec![Yaml::String(\"value\".to_string()), Yaml::Integer(-500)]),\n );\n }\n\n #[test]\n fn test_serialize_map() {\n let mut map = BTreeMap::new(); // BTreeMap for ordered keys, matching yaml::Hash\n map.insert(\"key1\".to_string(), 100);\n map.insert(\"key2\".to_string(), 200);\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"key1\".to_string()), Yaml::Integer(100));\n expected_hash.insert(Yaml::String(\"key2\".to_string()), Yaml::Integer(200));\n assert_yaml_serialization(map, Yaml::Hash(expected_hash));\n\n let empty_map: BTreeMap = BTreeMap::new();\n assert_yaml_serialization(empty_map, Yaml::Hash(Hash::new()));\n }\n\n #[derive(Serialize)]\n struct SimpleStruct {\n id: u32,\n name: String,\n is_active: bool,\n }\n\n #[test]\n fn test_serialize_struct() {\n let s = SimpleStruct {\n id: 101,\n name: \"A Struct\".to_string(),\n is_active: true,\n };\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"id\".to_string()), Yaml::Integer(101));\n expected_hash.insert(\n Yaml::String(\"name\".to_string()),\n Yaml::String(\"A Struct\".to_string()),\n );\n expected_hash.insert(Yaml::String(\"is_active\".to_string()), Yaml::Boolean(true));\n assert_yaml_serialization(s, Yaml::Hash(expected_hash));\n }\n\n #[derive(Serialize)]\n struct NestedStruct {\n description: String,\n data: SimpleStruct,\n tags: Vec,\n }\n\n #[test]\n fn test_serialize_nested_struct() {\n let ns = NestedStruct {\n description: \"Contains another struct and a vec\".to_string(),\n data: SimpleStruct {\n id: 202,\n name: \"Inner\".to_string(),\n is_active: false,\n },\n tags: vec![\"nested\".to_string(), \"complex\".to_string()],\n };\n\n let mut inner_struct_hash = Hash::new();\n inner_struct_hash.insert(Yaml::String(\"id\".to_string()), Yaml::Integer(202));\n inner_struct_hash.insert(\n Yaml::String(\"name\".to_string()),\n Yaml::String(\"Inner\".to_string()),\n );\n inner_struct_hash.insert(Yaml::String(\"is_active\".to_string()), Yaml::Boolean(false));\n\n let tags_array = Yaml::Array(vec![\n Yaml::String(\"nested\".to_string()),\n Yaml::String(\"complex\".to_string()),\n ]);\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(\n Yaml::String(\"description\".to_string()),\n Yaml::String(\"Contains another struct and a vec\".to_string()),\n );\n expected_hash.insert(\n Yaml::String(\"data\".to_string()),\n Yaml::Hash(inner_struct_hash),\n );\n expected_hash.insert(Yaml::String(\"tags\".to_string()), tags_array);\n\n assert_yaml_serialization(ns, Yaml::Hash(expected_hash));\n }\n\n #[derive(Serialize)]\n enum MyEnum {\n Unit,\n Newtype(i32),\n Tuple(String, bool),\n Struct { field_a: u16, field_b: char },\n }\n\n #[test]\n fn test_serialize_enum_unit_variant() {\n assert_yaml_serialization(MyEnum::Unit, Yaml::String(\"Unit\".to_string()));\n }\n\n #[test]\n fn test_serialize_enum_newtype_variant() {\n let mut expected_hash = Hash::new();\n expected_hash.insert(Yaml::String(\"Newtype\".to_string()), Yaml::Integer(999));\n assert_yaml_serialization(MyEnum::Newtype(999), Yaml::Hash(expected_hash));\n }\n\n #[test]\n fn test_serialize_enum_tuple_variant() {\n let mut expected_hash = Hash::new();\n let inner_array = Yaml::Array(vec![\n Yaml::String(\"tuple_data\".to_string()),\n Yaml::Boolean(true),\n ]);\n expected_hash.insert(Yaml::String(\"Tuple\".to_string()), inner_array);\n assert_yaml_serialization(\n MyEnum::Tuple(\"tuple_data\".to_string(), true),\n Yaml::Hash(expected_hash),\n );\n }\n\n #[test]\n fn test_serialize_enum_struct_variant() {\n let mut inner_struct_hash = Hash::new();\n inner_struct_hash.insert(Yaml::String(\"field_a\".to_string()), Yaml::Integer(123));\n inner_struct_hash.insert(\n Yaml::String(\"field_b\".to_string()),\n Yaml::String(\"Z\".to_string()),\n );\n\n let mut expected_hash = Hash::new();\n expected_hash.insert(\n Yaml::String(\"Struct\".to_string()),\n Yaml::Hash(inner_struct_hash),\n );\n assert_yaml_serialization(\n MyEnum::Struct {\n field_a: 123,\n field_b: 'Z',\n },\n Yaml::Hash(expected_hash),\n );\n }\n\n #[test]\n fn test_yaml_serializer_error_display() {\n let error = YamlSerializerError {\n msg: \"A test error message\".to_string(),\n };\n assert_eq!(\n format!(\"{error}\"),\n \"YamlSerializerError: A test error message\"\n );\n }\n\n #[test]\n fn test_yaml_serializer_error_custom() {\n let error = YamlSerializerError::custom(\"Custom error detail\");\n assert_eq!(error.msg, \"Custom error detail\");\n assert_eq!(\n format!(\"{error}\"),\n \"YamlSerializerError: Custom error detail\"\n );\n let _err_trait_obj: Box = Box::new(error);\n }\n}\n"], ["/cocoindex/src/execution/db_tracking.rs", "use crate::prelude::*;\n\nuse super::{db_tracking_setup::TrackingTableSetupState, memoization::StoredMemoizationInfo};\nuse crate::utils::{db::WriteAction, fingerprint::Fingerprint};\nuse futures::Stream;\nuse serde::de::{self, Deserializer, SeqAccess, Visitor};\nuse serde::ser::SerializeSeq;\nuse sqlx::PgPool;\nuse std::fmt;\n\n#[derive(Debug, Clone)]\npub struct TrackedTargetKeyInfo {\n pub key: serde_json::Value,\n pub additional_key: serde_json::Value,\n pub process_ordinal: i64,\n pub fingerprint: Option,\n}\n\nimpl Serialize for TrackedTargetKeyInfo {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n let mut seq = serializer.serialize_seq(None)?;\n seq.serialize_element(&self.key)?;\n seq.serialize_element(&self.process_ordinal)?;\n seq.serialize_element(&self.fingerprint)?;\n if !self.additional_key.is_null() {\n seq.serialize_element(&self.additional_key)?;\n }\n seq.end()\n }\n}\n\nimpl<'de> serde::Deserialize<'de> for TrackedTargetKeyInfo {\n fn deserialize(deserializer: D) -> Result\n where\n D: Deserializer<'de>,\n {\n struct TrackedTargetKeyVisitor;\n\n impl<'de> Visitor<'de> for TrackedTargetKeyVisitor {\n type Value = TrackedTargetKeyInfo;\n\n fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n formatter.write_str(\"a sequence of 3 or 4 elements for TrackedTargetKey\")\n }\n\n fn visit_seq(self, mut seq: A) -> Result\n where\n A: SeqAccess<'de>,\n {\n let target_key: serde_json::Value = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(0, &self))?;\n let process_ordinal: i64 = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(1, &self))?;\n let fingerprint: Option = seq\n .next_element()?\n .ok_or_else(|| de::Error::invalid_length(2, &self))?;\n let additional_key: Option = seq.next_element()?;\n\n Ok(TrackedTargetKeyInfo {\n key: target_key,\n process_ordinal,\n fingerprint,\n additional_key: additional_key.unwrap_or(serde_json::Value::Null),\n })\n }\n }\n\n deserializer.deserialize_seq(TrackedTargetKeyVisitor)\n }\n}\n\n/// (source_id, target_key)\npub type TrackedTargetKeyForSource = Vec<(i32, Vec)>;\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForProcessing {\n pub memoization_info: Option>>,\n\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub max_process_ordinal: Option,\n pub process_ordinal: Option,\n}\n\npub async fn read_source_tracking_info_for_processing(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n pool: &PgPool,\n) -> Result> {\n let query_str = format!(\n \"SELECT memoization_info, processed_source_ordinal, process_logic_fingerprint, max_process_ordinal, process_ordinal FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(pool)\n .await?;\n\n Ok(tracking_info)\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForPrecommit {\n pub max_process_ordinal: i64,\n pub staging_target_keys: sqlx::types::Json,\n\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub process_ordinal: Option,\n pub target_keys: Option>,\n}\n\npub async fn read_source_tracking_info_for_precommit(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT max_process_ordinal, staging_target_keys, processed_source_ordinal, process_logic_fingerprint, process_ordinal, target_keys FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let precommit_tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(db_executor)\n .await?;\n\n Ok(precommit_tracking_info)\n}\n\n#[allow(clippy::too_many_arguments)]\npub async fn precommit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n max_process_ordinal: i64,\n staging_target_keys: TrackedTargetKeyForSource,\n memoization_info: Option<&StoredMemoizationInfo>,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {} (source_id, source_key, max_process_ordinal, staging_target_keys, memoization_info) VALUES ($1, $2, $3, $4, $5)\",\n db_setup.table_name\n ),\n WriteAction::Update => format!(\n \"UPDATE {} SET max_process_ordinal = $3, staging_target_keys = $4, memoization_info = $5 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n ),\n };\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(max_process_ordinal) // $3\n .bind(sqlx::types::Json(staging_target_keys)) // $4\n .bind(memoization_info.map(sqlx::types::Json)) // $5\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceTrackingInfoForCommit {\n pub staging_target_keys: sqlx::types::Json,\n pub process_ordinal: Option,\n}\n\npub async fn read_source_tracking_info_for_commit(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result> {\n let query_str = format!(\n \"SELECT staging_target_keys, process_ordinal FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let commit_tracking_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(db_executor)\n .await?;\n Ok(commit_tracking_info)\n}\n\n#[allow(clippy::too_many_arguments)]\npub async fn commit_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n staging_target_keys: TrackedTargetKeyForSource,\n processed_source_ordinal: Option,\n logic_fingerprint: &[u8],\n process_ordinal: i64,\n process_time_micros: i64,\n target_keys: TrackedTargetKeyForSource,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n action: WriteAction,\n) -> Result<()> {\n let query_str = match action {\n WriteAction::Insert => format!(\n \"INSERT INTO {} ( \\\n source_id, source_key, \\\n max_process_ordinal, staging_target_keys, \\\n processed_source_ordinal, process_logic_fingerprint, process_ordinal, process_time_micros, target_keys) \\\n VALUES ($1, $2, $6 + 1, $3, $4, $5, $6, $7, $8)\",\n db_setup.table_name\n ),\n WriteAction::Update => format!(\n \"UPDATE {} SET staging_target_keys = $3, processed_source_ordinal = $4, process_logic_fingerprint = $5, process_ordinal = $6, process_time_micros = $7, target_keys = $8 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n ),\n };\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(sqlx::types::Json(staging_target_keys)) // $3\n .bind(processed_source_ordinal) // $4\n .bind(logic_fingerprint) // $5\n .bind(process_ordinal) // $6\n .bind(process_time_micros) // $7\n .bind(sqlx::types::Json(target_keys)) // $8\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\npub async fn delete_source_tracking_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"DELETE FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n sqlx::query(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .execute(db_executor)\n .await?;\n Ok(())\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct TrackedSourceKeyMetadata {\n pub source_key: serde_json::Value,\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n}\n\npub struct ListTrackedSourceKeyMetadataState {\n query_str: String,\n}\n\nimpl ListTrackedSourceKeyMetadataState {\n pub fn new() -> Self {\n Self {\n query_str: String::new(),\n }\n }\n\n pub fn list<'a>(\n &'a mut self,\n source_id: i32,\n db_setup: &'a TrackingTableSetupState,\n pool: &'a PgPool,\n ) -> impl Stream> + 'a {\n self.query_str = format!(\n \"SELECT source_key, processed_source_ordinal, process_logic_fingerprint FROM {} WHERE source_id = $1\",\n db_setup.table_name\n );\n sqlx::query_as(&self.query_str).bind(source_id).fetch(pool)\n }\n}\n\n#[derive(sqlx::FromRow, Debug)]\npub struct SourceLastProcessedInfo {\n pub processed_source_ordinal: Option,\n pub process_logic_fingerprint: Option>,\n pub process_time_micros: Option,\n}\n\npub async fn read_source_last_processed_info(\n source_id: i32,\n source_key_json: &serde_json::Value,\n db_setup: &TrackingTableSetupState,\n pool: &PgPool,\n) -> Result> {\n let query_str = format!(\n \"SELECT processed_source_ordinal, process_logic_fingerprint, process_time_micros FROM {} WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n let last_processed_info = sqlx::query_as(&query_str)\n .bind(source_id)\n .bind(source_key_json)\n .fetch_optional(pool)\n .await?;\n Ok(last_processed_info)\n}\n\npub async fn update_source_tracking_ordinal(\n source_id: i32,\n source_key_json: &serde_json::Value,\n processed_source_ordinal: Option,\n db_setup: &TrackingTableSetupState,\n db_executor: impl sqlx::Executor<'_, Database = sqlx::Postgres>,\n) -> Result<()> {\n let query_str = format!(\n \"UPDATE {} SET processed_source_ordinal = $3 WHERE source_id = $1 AND source_key = $2\",\n db_setup.table_name\n );\n sqlx::query(&query_str)\n .bind(source_id) // $1\n .bind(source_key_json) // $2\n .bind(processed_source_ordinal) // $3\n .execute(db_executor)\n .await?;\n Ok(())\n}\n"], ["/cocoindex/src/llm/anthropic.rs", "use crate::prelude::*;\nuse base64::prelude::*;\n\nuse crate::llm::{\n LlmGenerateRequest, LlmGenerateResponse, LlmGenerationClient, OutputFormat,\n ToJsonSchemaOptions, detect_image_mime_type,\n};\nuse anyhow::Context;\nuse urlencoding::encode;\n\npub struct Client {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl Client {\n pub async fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Anthropic doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"ANTHROPIC_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"ANTHROPIC_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result {\n let mut user_content_parts: Vec = Vec::new();\n\n // Add image part if present\n if let Some(image_bytes) = &request.image {\n let base64_image = BASE64_STANDARD.encode(image_bytes.as_ref());\n let mime_type = detect_image_mime_type(image_bytes.as_ref())?;\n user_content_parts.push(serde_json::json!({\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": mime_type,\n \"data\": base64_image,\n }\n }));\n }\n\n // Add text part\n user_content_parts.push(serde_json::json!({\n \"type\": \"text\",\n \"text\": request.user_prompt\n }));\n\n let messages = vec![serde_json::json!({\n \"role\": \"user\",\n \"content\": user_content_parts\n })];\n\n let mut payload = serde_json::json!({\n \"model\": request.model,\n \"messages\": messages,\n \"max_tokens\": 4096\n });\n\n // Add system prompt as top-level field if present (required)\n if let Some(system) = request.system_prompt {\n payload[\"system\"] = serde_json::json!(system);\n }\n\n // Extract schema from output_format, error if not JsonSchema\n let schema = match request.output_format.as_ref() {\n Some(OutputFormat::JsonSchema { schema, .. }) => schema,\n _ => api_bail!(\"Anthropic client expects OutputFormat::JsonSchema for all requests\"),\n };\n\n let schema_json = serde_json::to_value(schema)?;\n payload[\"tools\"] = serde_json::json!([\n { \"type\": \"custom\", \"name\": \"report_result\", \"input_schema\": schema_json }\n ]);\n\n let url = \"https://api.anthropic.com/v1/messages\";\n\n let encoded_api_key = encode(&self.api_key);\n\n let resp = retryable::run(\n || {\n self.client\n .post(url)\n .header(\"x-api-key\", encoded_api_key.as_ref())\n .header(\"anthropic-version\", \"2023-06-01\")\n .json(&payload)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Anthropic API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let mut resp_json: serde_json::Value = resp.json().await.context(\"Invalid JSON\")?;\n if let Some(error) = resp_json.get(\"error\") {\n bail!(\"Anthropic API error: {:?}\", error);\n }\n\n // Debug print full response\n // println!(\"Anthropic API full response: {resp_json:?}\");\n\n let resp_content = &resp_json[\"content\"];\n let tool_name = \"report_result\";\n let mut extracted_json: Option = None;\n if let Some(array) = resp_content.as_array() {\n for item in array {\n if item.get(\"type\") == Some(&serde_json::Value::String(\"tool_use\".to_string()))\n && item.get(\"name\") == Some(&serde_json::Value::String(tool_name.to_string()))\n {\n if let Some(input) = item.get(\"input\") {\n extracted_json = Some(input.clone());\n break;\n }\n }\n }\n }\n let text = if let Some(json) = extracted_json {\n // Try strict JSON serialization first\n serde_json::to_string(&json)?\n } else {\n // Fallback: try text if no tool output found\n match &mut resp_json[\"content\"][0][\"text\"] {\n serde_json::Value::String(s) => {\n // Try strict JSON parsing first\n match serde_json::from_str::(s) {\n Ok(_) => std::mem::take(s),\n Err(e) => {\n // Try permissive json5 parsing as fallback\n match json5::from_str::(s) {\n Ok(value) => {\n println!(\"[Anthropic] Used permissive JSON5 parser for output\");\n serde_json::to_string(&value)?\n }\n Err(e2) => {\n return Err(anyhow::anyhow!(format!(\n \"No structured tool output or text found in response, and permissive JSON5 parsing also failed: {e}; {e2}\"\n )));\n }\n }\n }\n }\n }\n _ => {\n return Err(anyhow::anyhow!(\n \"No structured tool output or text found in response\"\n ));\n }\n }\n };\n\n Ok(LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions {\n ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n"], ["/cocoindex/src/llm/openai.rs", "use crate::api_bail;\n\nuse super::{LlmEmbeddingClient, LlmGenerationClient, detect_image_mime_type};\nuse anyhow::Result;\nuse async_openai::{\n Client as OpenAIClient,\n config::OpenAIConfig,\n types::{\n ChatCompletionRequestMessage, ChatCompletionRequestMessageContentPartImage,\n ChatCompletionRequestMessageContentPartText, ChatCompletionRequestSystemMessage,\n ChatCompletionRequestSystemMessageContent, ChatCompletionRequestUserMessage,\n ChatCompletionRequestUserMessageContent, ChatCompletionRequestUserMessageContentPart,\n CreateChatCompletionRequest, CreateEmbeddingRequest, EmbeddingInput, ImageDetail,\n ResponseFormat, ResponseFormatJsonSchema,\n },\n};\nuse async_trait::async_trait;\nuse base64::prelude::*;\nuse phf::phf_map;\n\nstatic DEFAULT_EMBEDDING_DIMENSIONS: phf::Map<&str, u32> = phf_map! {\n \"text-embedding-3-small\" => 1536,\n \"text-embedding-3-large\" => 3072,\n \"text-embedding-ada-002\" => 1536,\n};\n\npub struct Client {\n client: async_openai::Client,\n}\n\nimpl Client {\n pub(crate) fn from_parts(client: async_openai::Client) -> Self {\n Self { client }\n }\n\n pub fn new(address: Option) -> Result {\n if let Some(address) = address {\n api_bail!(\"OpenAI doesn't support custom API address: {address}\");\n }\n // Verify API key is set\n if std::env::var(\"OPENAI_API_KEY\").is_err() {\n api_bail!(\"OPENAI_API_KEY environment variable must be set\");\n }\n Ok(Self {\n // OpenAI client will use OPENAI_API_KEY env variable by default\n client: OpenAIClient::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n let mut messages = Vec::new();\n\n // Add system prompt if provided\n if let Some(system) = request.system_prompt {\n messages.push(ChatCompletionRequestMessage::System(\n ChatCompletionRequestSystemMessage {\n content: ChatCompletionRequestSystemMessageContent::Text(system.into_owned()),\n ..Default::default()\n },\n ));\n }\n\n // Add user message\n let user_message_content = match request.image {\n Some(img_bytes) => {\n let base64_image = BASE64_STANDARD.encode(img_bytes.as_ref());\n let mime_type = detect_image_mime_type(img_bytes.as_ref())?;\n let image_url = format!(\"data:{mime_type};base64,{base64_image}\");\n ChatCompletionRequestUserMessageContent::Array(vec![\n ChatCompletionRequestUserMessageContentPart::Text(\n ChatCompletionRequestMessageContentPartText {\n text: request.user_prompt.into_owned(),\n },\n ),\n ChatCompletionRequestUserMessageContentPart::ImageUrl(\n ChatCompletionRequestMessageContentPartImage {\n image_url: async_openai::types::ImageUrl {\n url: image_url,\n detail: Some(ImageDetail::Auto),\n },\n },\n ),\n ])\n }\n None => ChatCompletionRequestUserMessageContent::Text(request.user_prompt.into_owned()),\n };\n messages.push(ChatCompletionRequestMessage::User(\n ChatCompletionRequestUserMessage {\n content: user_message_content,\n ..Default::default()\n },\n ));\n\n // Create the chat completion request\n let request = CreateChatCompletionRequest {\n model: request.model.to_string(),\n messages,\n response_format: match request.output_format {\n Some(super::OutputFormat::JsonSchema { name, schema }) => {\n Some(ResponseFormat::JsonSchema {\n json_schema: ResponseFormatJsonSchema {\n name: name.into_owned(),\n description: None,\n schema: Some(serde_json::to_value(&schema)?),\n strict: Some(true),\n },\n })\n }\n None => None,\n },\n ..Default::default()\n };\n\n // Send request and get response\n let response = self.client.chat().create(request).await?;\n\n // Extract the response text from the first choice\n let text = response\n .choices\n .into_iter()\n .next()\n .and_then(|choice| choice.message.content)\n .ok_or_else(|| anyhow::anyhow!(\"No response from OpenAI\"))?;\n\n Ok(super::LlmGenerateResponse { text })\n }\n\n fn json_schema_options(&self) -> super::ToJsonSchemaOptions {\n super::ToJsonSchemaOptions {\n fields_always_required: true,\n supports_format: false,\n extract_descriptions: false,\n top_level_must_be_object: true,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let response = self\n .client\n .embeddings()\n .create(CreateEmbeddingRequest {\n model: request.model.to_string(),\n input: EmbeddingInput::String(request.text.to_string()),\n dimensions: request.output_dimension,\n ..Default::default()\n })\n .await?;\n Ok(super::LlmEmbeddingResponse {\n embedding: response\n .data\n .into_iter()\n .next()\n .ok_or_else(|| anyhow::anyhow!(\"No embedding returned from OpenAI\"))?\n .embedding,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n DEFAULT_EMBEDDING_DIMENSIONS.get(model).copied()\n }\n}\n"], ["/cocoindex/src/llm/ollama.rs", "use crate::prelude::*;\n\nuse super::{LlmEmbeddingClient, LlmGenerationClient};\nuse schemars::schema::SchemaObject;\nuse serde_with::{base64::Base64, serde_as};\n\nfn get_embedding_dimension(model: &str) -> Option {\n match model.to_ascii_lowercase().as_str() {\n \"mxbai-embed-large\"\n | \"bge-m3\"\n | \"bge-large\"\n | \"snowflake-arctic-embed\"\n | \"snowflake-arctic-embed2\" => Some(1024),\n\n \"nomic-embed-text\"\n | \"paraphrase-multilingual\"\n | \"snowflake-arctic-embed:110m\"\n | \"snowflake-arctic-embed:137m\"\n | \"granite-embedding:278m\" => Some(768),\n\n \"all-minilm\"\n | \"snowflake-arctic-embed:22m\"\n | \"snowflake-arctic-embed:33m\"\n | \"granite-embedding\" => Some(384),\n\n _ => None,\n }\n}\n\npub struct Client {\n generate_url: String,\n embed_url: String,\n reqwest_client: reqwest::Client,\n}\n\n#[derive(Debug, Serialize)]\nenum OllamaFormat<'a> {\n #[serde(untagged)]\n JsonSchema(&'a SchemaObject),\n}\n\n#[serde_as]\n#[derive(Debug, Serialize)]\nstruct OllamaRequest<'a> {\n pub model: &'a str,\n pub prompt: &'a str,\n #[serde_as(as = \"Option>\")]\n pub images: Option>,\n pub format: Option>,\n pub system: Option<&'a str>,\n pub stream: Option,\n}\n\n#[derive(Debug, Deserialize)]\nstruct OllamaResponse {\n pub response: String,\n}\n\n#[derive(Debug, Serialize)]\nstruct OllamaEmbeddingRequest<'a> {\n pub model: &'a str,\n pub input: &'a str,\n}\n\n#[derive(Debug, Deserialize)]\nstruct OllamaEmbeddingResponse {\n pub embedding: Vec,\n}\n\nconst OLLAMA_DEFAULT_ADDRESS: &str = \"http://localhost:11434\";\n\nimpl Client {\n pub async fn new(address: Option) -> Result {\n let address = match &address {\n Some(addr) => addr.trim_end_matches('/'),\n None => OLLAMA_DEFAULT_ADDRESS,\n };\n Ok(Self {\n generate_url: format!(\"{address}/api/generate\"),\n embed_url: format!(\"{address}/api/embed\"),\n reqwest_client: reqwest::Client::new(),\n })\n }\n}\n\n#[async_trait]\nimpl LlmGenerationClient for Client {\n async fn generate<'req>(\n &self,\n request: super::LlmGenerateRequest<'req>,\n ) -> Result {\n let req = OllamaRequest {\n model: request.model,\n prompt: request.user_prompt.as_ref(),\n images: request.image.as_deref().map(|img| vec![img]),\n format: request.output_format.as_ref().map(\n |super::OutputFormat::JsonSchema { schema, .. }| {\n OllamaFormat::JsonSchema(schema.as_ref())\n },\n ),\n system: request.system_prompt.as_ref().map(|s| s.as_ref()),\n stream: Some(false),\n };\n let res = retryable::run(\n || {\n self.reqwest_client\n .post(self.generate_url.as_str())\n .json(&req)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !res.status().is_success() {\n bail!(\n \"Ollama API error: {:?}\\n{}\\n\",\n res.status(),\n res.text().await?\n );\n }\n let json: OllamaResponse = res.json().await?;\n Ok(super::LlmGenerateResponse {\n text: json.response,\n })\n }\n\n fn json_schema_options(&self) -> super::ToJsonSchemaOptions {\n super::ToJsonSchemaOptions {\n fields_always_required: false,\n supports_format: true,\n extract_descriptions: true,\n top_level_must_be_object: false,\n }\n }\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: super::LlmEmbeddingRequest<'req>,\n ) -> Result {\n let req = OllamaEmbeddingRequest {\n model: request.model,\n input: request.text.as_ref(),\n };\n let resp = retryable::run(\n || {\n self.reqwest_client\n .post(self.embed_url.as_str())\n .json(&req)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n if !resp.status().is_success() {\n bail!(\n \"Ollama API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n let embedding_resp: OllamaEmbeddingResponse = resp.json().await.context(\"Invalid JSON\")?;\n Ok(super::LlmEmbeddingResponse {\n embedding: embedding_resp.embedding,\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n get_embedding_dimension(model)\n }\n}\n"], ["/cocoindex/src/ops/functions/embed_text.rs", "use crate::{\n llm::{\n LlmApiConfig, LlmApiType, LlmEmbeddingClient, LlmEmbeddingRequest, new_llm_embedding_client,\n },\n ops::sdk::*,\n};\n\n#[derive(Deserialize)]\nstruct Spec {\n api_type: LlmApiType,\n model: String,\n address: Option,\n api_config: Option,\n output_dimension: Option,\n task_type: Option,\n}\n\nstruct Args {\n client: Box,\n text: ResolvedOpArg,\n}\n\nstruct Executor {\n spec: Spec,\n args: Args,\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n fn behavior_version(&self) -> Option {\n Some(1)\n }\n\n fn enable_cache(&self) -> bool {\n true\n }\n\n async fn evaluate(&self, input: Vec) -> Result {\n let text = self.args.text.value(&input)?.as_str()?;\n let req = LlmEmbeddingRequest {\n model: &self.spec.model,\n text: Cow::Borrowed(text),\n output_dimension: self.spec.output_dimension,\n task_type: self\n .spec\n .task_type\n .as_ref()\n .map(|s| Cow::Borrowed(s.as_str())),\n };\n let embedding = self.args.client.embed_text(req).await?;\n Ok(embedding.embedding.into())\n }\n}\n\nstruct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = Spec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"EmbedText\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n spec: &'a Spec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Self::ResolvedArgs, EnrichedValueType)> {\n let text = args_resolver.next_arg(\"text\")?;\n let client =\n new_llm_embedding_client(spec.api_type, spec.address.clone(), spec.api_config.clone())\n .await?;\n let output_dimension = match spec.output_dimension {\n Some(output_dimension) => output_dimension,\n None => {\n client.get_default_embedding_dimension(spec.model.as_str())\n .ok_or_else(|| api_error!(\"model \\\"{}\\\" is unknown for {:?}, needs to specify `output_dimension` explicitly\", spec.model, spec.api_type))?\n }\n };\n let output_schema = make_output_type(BasicValueType::Vector(VectorTypeSchema {\n dimension: Some(output_dimension as usize),\n element_type: Box::new(BasicValueType::Float32),\n }));\n Ok((Args { client, text }, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n spec: Spec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor { spec, args }))\n }\n}\n\npub fn register(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n Factory.register(registry)\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n\n #[tokio::test]\n #[ignore = \"This test requires OpenAI API key or a configured local LLM and may make network calls.\"]\n async fn test_embed_text() {\n let spec = Spec {\n api_type: LlmApiType::OpenAi,\n model: \"text-embedding-ada-002\".to_string(),\n address: None,\n api_config: None,\n output_dimension: None,\n task_type: None,\n };\n\n let factory = Arc::new(Factory);\n let text_content = \"CocoIndex is a performant data transformation framework for AI.\";\n\n let input_args_values = vec![text_content.to_string().into()];\n\n let input_arg_schemas = vec![build_arg_schema(\"text\", BasicValueType::Str)];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n if result.is_err() {\n eprintln!(\n \"test_embed_text: test_flow_function returned error (potentially expected for evaluate): {:?}\",\n result.as_ref().err()\n );\n }\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed. NOTE: This test may require network access/API keys for OpenAI. Error: {:?}\",\n result.err()\n );\n\n let value = result.unwrap();\n\n match value {\n Value::Basic(BasicValue::Vector(arc_vec)) => {\n assert_eq!(arc_vec.len(), 1536, \"Embedding vector dimension mismatch\");\n for item in arc_vec.iter() {\n match item {\n BasicValue::Float32(_) => {}\n _ => panic!(\"Embedding vector element is not Float32: {item:?}\"),\n }\n }\n }\n _ => panic!(\"Expected Value::Basic(BasicValue::Vector), got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/utils/retryable.rs", "use log::trace;\nuse std::{future::Future, time::Duration};\n\npub trait IsRetryable {\n fn is_retryable(&self) -> bool;\n}\n\npub struct Error {\n error: anyhow::Error,\n is_retryable: bool,\n}\n\nimpl std::fmt::Display for Error {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Display::fmt(&self.error, f)\n }\n}\n\nimpl std::fmt::Debug for Error {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Debug::fmt(&self.error, f)\n }\n}\n\nimpl IsRetryable for Error {\n fn is_retryable(&self) -> bool {\n self.is_retryable\n }\n}\n\nimpl IsRetryable for reqwest::Error {\n fn is_retryable(&self) -> bool {\n self.status() == Some(reqwest::StatusCode::TOO_MANY_REQUESTS)\n }\n}\n\nimpl Error {\n pub fn always_retryable(error: anyhow::Error) -> Self {\n Self {\n error,\n is_retryable: true,\n }\n }\n}\n\nimpl From for Error {\n fn from(error: anyhow::Error) -> Self {\n Self {\n error,\n is_retryable: false,\n }\n }\n}\n\nimpl From for anyhow::Error {\n fn from(val: Error) -> Self {\n val.error\n }\n}\n\nimpl From for Error {\n fn from(error: E) -> Self {\n Self {\n is_retryable: error.is_retryable(),\n error: anyhow::Error::new(error),\n }\n }\n}\n\npub type Result = std::result::Result;\n\n#[allow(non_snake_case)]\npub fn Ok(value: T) -> Result {\n Result::Ok(value)\n}\n\npub struct RetryOptions {\n pub max_retries: Option,\n pub initial_backoff: Duration,\n pub max_backoff: Duration,\n}\n\nimpl Default for RetryOptions {\n fn default() -> Self {\n Self {\n max_retries: Some(10),\n initial_backoff: Duration::from_millis(100),\n max_backoff: Duration::from_secs(10),\n }\n }\n}\n\npub static HEAVY_LOADED_OPTIONS: RetryOptions = RetryOptions {\n max_retries: Some(10),\n initial_backoff: Duration::from_secs(1),\n max_backoff: Duration::from_secs(60),\n};\n\npub async fn run<\n Ok,\n Err: std::fmt::Display + IsRetryable,\n Fut: Future>,\n F: Fn() -> Fut,\n>(\n f: F,\n options: &RetryOptions,\n) -> Result {\n let mut retries = 0;\n let mut backoff = options.initial_backoff;\n\n loop {\n match f().await {\n Result::Ok(result) => return Result::Ok(result),\n Result::Err(err) => {\n if !err.is_retryable()\n || options\n .max_retries\n .is_some_and(|max_retries| retries >= max_retries)\n {\n return Result::Err(err);\n }\n retries += 1;\n trace!(\n \"Will retry #{} in {}ms for error: {}\",\n retries,\n backoff.as_millis(),\n err\n );\n tokio::time::sleep(backoff).await;\n if backoff < options.max_backoff {\n backoff = std::cmp::min(\n Duration::from_micros(\n (backoff.as_micros() * rand::random_range(1618..=2000) / 1000) as u64,\n ),\n options.max_backoff,\n );\n }\n }\n }\n }\n}\n"], ["/cocoindex/src/utils/fingerprint.rs", "use anyhow::bail;\nuse base64::prelude::*;\nuse blake2::digest::typenum;\nuse blake2::{Blake2b, Digest};\nuse serde::Deserialize;\nuse serde::ser::{\n Serialize, SerializeMap, SerializeSeq, SerializeStruct, SerializeStructVariant, SerializeTuple,\n SerializeTupleStruct, SerializeTupleVariant, Serializer,\n};\n\n#[derive(Debug)]\npub struct FingerprinterError {\n msg: String,\n}\n\nimpl std::fmt::Display for FingerprinterError {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"FingerprinterError: {}\", self.msg)\n }\n}\nimpl std::error::Error for FingerprinterError {}\nimpl serde::ser::Error for FingerprinterError {\n fn custom(msg: T) -> Self\n where\n T: std::fmt::Display,\n {\n FingerprinterError {\n msg: format!(\"{msg}\"),\n }\n }\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]\npub struct Fingerprint(pub [u8; 16]);\n\nimpl Fingerprint {\n pub fn to_base64(self) -> String {\n BASE64_STANDARD.encode(self.0)\n }\n\n pub fn from_base64(s: &str) -> anyhow::Result {\n let bytes = match s.len() {\n 24 => BASE64_STANDARD.decode(s)?,\n\n // For backward compatibility. Some old version (<= v0.1.2) is using hex encoding.\n 32 => hex::decode(s)?,\n _ => bail!(\"Encoded fingerprint length is unexpected: {}\", s.len()),\n };\n match bytes.try_into() {\n Ok(bytes) => Ok(Fingerprint(bytes)),\n Err(e) => bail!(\"Fingerprint bytes length is unexpected: {}\", e.len()),\n }\n }\n}\n\nimpl Serialize for Fingerprint {\n fn serialize(&self, serializer: S) -> Result\n where\n S: serde::Serializer,\n {\n serializer.serialize_str(&self.to_base64())\n }\n}\n\nimpl<'de> Deserialize<'de> for Fingerprint {\n fn deserialize(deserializer: D) -> Result\n where\n D: serde::Deserializer<'de>,\n {\n let s = String::deserialize(deserializer)?;\n Self::from_base64(&s).map_err(serde::de::Error::custom)\n }\n}\n#[derive(Clone, Default)]\npub struct Fingerprinter {\n hasher: Blake2b,\n}\n\nimpl Fingerprinter {\n pub fn into_fingerprint(self) -> Fingerprint {\n Fingerprint(self.hasher.finalize().into())\n }\n\n pub fn with(self, value: &S) -> Result {\n let mut fingerprinter = self;\n value.serialize(&mut fingerprinter)?;\n Ok(fingerprinter)\n }\n\n pub fn write(&mut self, value: &S) -> Result<(), FingerprinterError> {\n value.serialize(self)\n }\n\n fn write_type_tag(&mut self, tag: &str) {\n self.hasher.update(tag.as_bytes());\n self.hasher.update(b\";\");\n }\n\n fn write_end_tag(&mut self) {\n self.hasher.update(b\".\");\n }\n\n fn write_varlen_bytes(&mut self, bytes: &[u8]) {\n self.write_usize(bytes.len());\n self.hasher.update(bytes);\n }\n\n fn write_usize(&mut self, value: usize) {\n self.hasher.update((value as u32).to_le_bytes());\n }\n}\n\nimpl Serializer for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n type SerializeSeq = Self;\n type SerializeTuple = Self;\n type SerializeTupleStruct = Self;\n type SerializeTupleVariant = Self;\n type SerializeMap = Self;\n type SerializeStruct = Self;\n type SerializeStructVariant = Self;\n\n fn serialize_bool(self, v: bool) -> Result<(), Self::Error> {\n self.write_type_tag(if v { \"t\" } else { \"f\" });\n Ok(())\n }\n\n fn serialize_i8(self, v: i8) -> Result<(), Self::Error> {\n self.write_type_tag(\"i1\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i16(self, v: i16) -> Result<(), Self::Error> {\n self.write_type_tag(\"i2\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i32(self, v: i32) -> Result<(), Self::Error> {\n self.write_type_tag(\"i4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_i64(self, v: i64) -> Result<(), Self::Error> {\n self.write_type_tag(\"i8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u8(self, v: u8) -> Result<(), Self::Error> {\n self.write_type_tag(\"u1\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u16(self, v: u16) -> Result<(), Self::Error> {\n self.write_type_tag(\"u2\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u32(self, v: u32) -> Result<(), Self::Error> {\n self.write_type_tag(\"u4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_u64(self, v: u64) -> Result<(), Self::Error> {\n self.write_type_tag(\"u8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_f32(self, v: f32) -> Result<(), Self::Error> {\n self.write_type_tag(\"f4\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_f64(self, v: f64) -> Result<(), Self::Error> {\n self.write_type_tag(\"f8\");\n self.hasher.update(v.to_le_bytes());\n Ok(())\n }\n\n fn serialize_char(self, v: char) -> Result<(), Self::Error> {\n self.write_type_tag(\"c\");\n self.write_usize(v as usize);\n Ok(())\n }\n\n fn serialize_str(self, v: &str) -> Result<(), Self::Error> {\n self.write_type_tag(\"s\");\n self.write_varlen_bytes(v.as_bytes());\n Ok(())\n }\n\n fn serialize_bytes(self, v: &[u8]) -> Result<(), Self::Error> {\n self.write_type_tag(\"b\");\n self.write_varlen_bytes(v);\n Ok(())\n }\n\n fn serialize_none(self) -> Result<(), Self::Error> {\n self.write_type_tag(\"\");\n Ok(())\n }\n\n fn serialize_some(self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(self)\n }\n\n fn serialize_unit(self) -> Result<(), Self::Error> {\n self.write_type_tag(\"()\");\n Ok(())\n }\n\n fn serialize_unit_struct(self, name: &'static str) -> Result<(), Self::Error> {\n self.write_type_tag(\"US\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(())\n }\n\n fn serialize_unit_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n ) -> Result<(), Self::Error> {\n self.write_type_tag(\"UV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(())\n }\n\n fn serialize_newtype_struct(self, name: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.write_type_tag(\"NS\");\n self.write_varlen_bytes(name.as_bytes());\n value.serialize(self)\n }\n\n fn serialize_newtype_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n value: &T,\n ) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.write_type_tag(\"NV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n value.serialize(self)\n }\n\n fn serialize_seq(self, _len: Option) -> Result {\n self.write_type_tag(\"L\");\n Ok(self)\n }\n\n fn serialize_tuple(self, _len: usize) -> Result {\n self.write_type_tag(\"T\");\n Ok(self)\n }\n\n fn serialize_tuple_struct(\n self,\n name: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"TS\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(self)\n }\n\n fn serialize_tuple_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"TV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(self)\n }\n\n fn serialize_map(self, _len: Option) -> Result {\n self.write_type_tag(\"M\");\n Ok(self)\n }\n\n fn serialize_struct(\n self,\n name: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"S\");\n self.write_varlen_bytes(name.as_bytes());\n Ok(self)\n }\n\n fn serialize_struct_variant(\n self,\n name: &'static str,\n _variant_index: u32,\n variant: &'static str,\n _len: usize,\n ) -> Result {\n self.write_type_tag(\"SV\");\n self.write_varlen_bytes(name.as_bytes());\n self.write_varlen_bytes(variant.as_bytes());\n Ok(self)\n }\n}\n\nimpl SerializeSeq for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTuple for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTupleStruct for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeTupleVariant for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeMap for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n key.serialize(&mut **self)\n }\n\n fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeStruct for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.hasher.update(key.as_bytes());\n self.hasher.update(b\"\\n\");\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n\nimpl SerializeStructVariant for &mut Fingerprinter {\n type Ok = ();\n type Error = FingerprinterError;\n\n fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error>\n where\n T: ?Sized + Serialize,\n {\n self.hasher.update(key.as_bytes());\n self.hasher.update(b\"\\n\");\n value.serialize(&mut **self)\n }\n\n fn end(self) -> Result<(), Self::Error> {\n self.write_end_tag();\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/sdk.rs", "pub(crate) use crate::prelude::*;\n\nuse crate::builder::plan::AnalyzedFieldReference;\nuse crate::builder::plan::AnalyzedLocalFieldReference;\nuse std::collections::BTreeMap;\n\npub use super::factory_bases::*;\npub use super::interface::*;\npub use crate::base::schema::*;\npub use crate::base::spec::*;\npub use crate::base::value::*;\n\n// Disambiguate the ExportTargetBuildOutput type.\npub use super::factory_bases::TypedExportDataCollectionBuildOutput;\npub use super::registry::ExecutorFactoryRegistry;\n/// Defined for all types convertible to ValueType, to ease creation for ValueType in various operation factories.\npub trait TypeCore {\n fn into_type(self) -> ValueType;\n}\n\nimpl TypeCore for BasicValueType {\n fn into_type(self) -> ValueType {\n ValueType::Basic(self)\n }\n}\n\nimpl TypeCore for StructSchema {\n fn into_type(self) -> ValueType {\n ValueType::Struct(self)\n }\n}\n\nimpl TypeCore for TableSchema {\n fn into_type(self) -> ValueType {\n ValueType::Table(self)\n }\n}\n\npub fn make_output_type(value_type: Type) -> EnrichedValueType {\n EnrichedValueType {\n typ: value_type.into_type(),\n attrs: Default::default(),\n nullable: false,\n }\n}\n\n#[derive(Debug, Deserialize)]\npub struct EmptySpec {}\n\n#[macro_export]\nmacro_rules! fields_value {\n ($($field:expr), +) => {\n $crate::base::value::FieldValues { fields: std::vec![ $(($field).into()),+ ] }\n };\n}\n\npub struct SchemaBuilderFieldRef(AnalyzedLocalFieldReference);\n\nimpl SchemaBuilderFieldRef {\n pub fn to_field_ref(&self) -> AnalyzedFieldReference {\n AnalyzedFieldReference {\n local: self.0.clone(),\n scope_up_level: 0,\n }\n }\n}\npub struct StructSchemaBuilder<'a> {\n base_fields_idx: Vec,\n target: &'a mut StructSchema,\n}\n\nimpl<'a> StructSchemaBuilder<'a> {\n pub fn new(target: &'a mut StructSchema) -> Self {\n Self {\n base_fields_idx: Vec::new(),\n target,\n }\n }\n\n pub fn _set_description(&mut self, description: impl Into>) {\n self.target.description = Some(description.into());\n }\n\n pub fn add_field(&mut self, field_schema: FieldSchema) -> SchemaBuilderFieldRef {\n let current_idx = self.target.fields.len() as u32;\n Arc::make_mut(&mut self.target.fields).push(field_schema);\n let mut fields_idx = self.base_fields_idx.clone();\n fields_idx.push(current_idx);\n SchemaBuilderFieldRef(AnalyzedLocalFieldReference { fields_idx })\n }\n\n pub fn _add_struct_field(\n &mut self,\n name: impl Into,\n nullable: bool,\n attrs: Arc>,\n ) -> (StructSchemaBuilder<'_>, SchemaBuilderFieldRef) {\n let field_schema = FieldSchema::new(\n name.into(),\n EnrichedValueType {\n typ: ValueType::Struct(StructSchema {\n fields: Arc::new(Vec::new()),\n description: None,\n }),\n nullable,\n attrs,\n },\n );\n let local_ref = self.add_field(field_schema);\n let struct_schema = match &mut Arc::make_mut(&mut self.target.fields)\n .last_mut()\n .unwrap()\n .value_type\n .typ\n {\n ValueType::Struct(s) => s,\n _ => unreachable!(),\n };\n (\n StructSchemaBuilder {\n base_fields_idx: local_ref.0.fields_idx.clone(),\n target: struct_schema,\n },\n local_ref,\n )\n }\n}\n"], ["/cocoindex/src/base/duration.rs", "use std::f64;\n\nuse anyhow::{Result, anyhow, bail};\nuse chrono::Duration;\n\n/// Parses a string of number-unit pairs into a vector of (number, unit),\n/// ensuring units are among the allowed ones.\nfn parse_components(\n s: &str,\n allowed_units: &[char],\n original_input: &str,\n) -> Result> {\n let mut result = Vec::new();\n let mut iter = s.chars().peekable();\n while iter.peek().is_some() {\n let mut num_str = String::new();\n let mut has_decimal = false;\n\n // Parse digits and optional decimal point\n while let Some(&c) = iter.peek() {\n if c.is_ascii_digit() || (c == '.' && !has_decimal) {\n if c == '.' {\n has_decimal = true;\n }\n num_str.push(iter.next().unwrap());\n } else {\n break;\n }\n }\n if num_str.is_empty() {\n bail!(\"Expected number in: {}\", original_input);\n }\n let num = num_str\n .parse::()\n .map_err(|_| anyhow!(\"Invalid number '{}' in: {}\", num_str, original_input))?;\n if let Some(&unit) = iter.peek() {\n if allowed_units.contains(&unit) {\n result.push((num, unit));\n iter.next();\n } else {\n bail!(\"Invalid unit '{}' in: {}\", unit, original_input);\n }\n } else {\n bail!(\n \"Missing unit after number '{}' in: {}\",\n num_str,\n original_input\n );\n }\n }\n Ok(result)\n}\n\n/// Parses an ISO 8601 duration string into a `chrono::Duration`.\nfn parse_iso8601_duration(s: &str, original_input: &str) -> Result {\n let (is_negative, s_after_sign) = if let Some(stripped) = s.strip_prefix('-') {\n (true, stripped)\n } else {\n (false, s)\n };\n\n if !s_after_sign.starts_with('P') {\n bail!(\"Duration must start with 'P' in: {}\", original_input);\n }\n let s_after_p = &s_after_sign[1..];\n\n let (date_part, time_part) = if let Some(pos) = s_after_p.find('T') {\n (&s_after_p[..pos], Some(&s_after_p[pos + 1..]))\n } else {\n (s_after_p, None)\n };\n\n // Date components (Y, M, W, D)\n let date_components = parse_components(date_part, &['Y', 'M', 'W', 'D'], original_input)?;\n\n // Time components (H, M, S)\n let time_components = if let Some(time_str) = time_part {\n let comps = parse_components(time_str, &['H', 'M', 'S'], original_input)?;\n if comps.is_empty() {\n bail!(\n \"Time part present but no time components in: {}\",\n original_input\n );\n }\n comps\n } else {\n vec![]\n };\n\n if date_components.is_empty() && time_components.is_empty() {\n bail!(\"No components in duration: {}\", original_input);\n }\n\n // Accumulate date duration\n let date_duration = date_components\n .iter()\n .fold(Duration::zero(), |acc, &(num, unit)| {\n let days = match unit {\n 'Y' => num * 365.0,\n 'M' => num * 30.0,\n 'W' => num * 7.0,\n 'D' => num,\n _ => unreachable!(\"Invalid date unit should be caught by prior validation\"),\n };\n let microseconds = (days * 86_400_000_000.0) as i64;\n acc + Duration::microseconds(microseconds)\n });\n\n // Accumulate time duration\n let time_duration =\n time_components\n .iter()\n .fold(Duration::zero(), |acc, &(num, unit)| match unit {\n 'H' => {\n let nanoseconds = (num * 3_600_000_000_000.0).round() as i64;\n acc + Duration::nanoseconds(nanoseconds)\n }\n 'M' => {\n let nanoseconds = (num * 60_000_000_000.0).round() as i64;\n acc + Duration::nanoseconds(nanoseconds)\n }\n 'S' => {\n let nanoseconds = (num.fract() * 1_000_000_000.0).round() as i64;\n acc + Duration::seconds(num as i64) + Duration::nanoseconds(nanoseconds)\n }\n _ => unreachable!(\"Invalid time unit should be caught by prior validation\"),\n });\n\n let mut total = date_duration + time_duration;\n if is_negative {\n total = -total;\n }\n\n Ok(total)\n}\n\n/// Parses a human-readable duration string into a `chrono::Duration`.\nfn parse_human_readable_duration(s: &str, original_input: &str) -> Result {\n let parts: Vec<&str> = s.split_whitespace().collect();\n if parts.is_empty() || parts.len() % 2 != 0 {\n bail!(\n \"Invalid human-readable duration format in: {}\",\n original_input\n );\n }\n\n let durations: Result> = parts\n .chunks(2)\n .map(|chunk| {\n let num: i64 = chunk[0]\n .parse()\n .map_err(|_| anyhow!(\"Invalid number '{}' in: {}\", chunk[0], original_input))?;\n\n match chunk[1].to_lowercase().as_str() {\n \"day\" | \"days\" => Ok(Duration::days(num)),\n \"hour\" | \"hours\" => Ok(Duration::hours(num)),\n \"minute\" | \"minutes\" => Ok(Duration::minutes(num)),\n \"second\" | \"seconds\" => Ok(Duration::seconds(num)),\n \"millisecond\" | \"milliseconds\" => Ok(Duration::milliseconds(num)),\n \"microsecond\" | \"microseconds\" => Ok(Duration::microseconds(num)),\n _ => bail!(\"Invalid unit '{}' in: {}\", chunk[1], original_input),\n }\n })\n .collect();\n\n durations.map(|durs| durs.into_iter().sum())\n}\n\n/// Parses a duration string into a `chrono::Duration`, trying ISO 8601 first, then human-readable format.\npub fn parse_duration(s: &str) -> Result {\n let original_input = s;\n let s = s.trim();\n if s.is_empty() {\n bail!(\"Empty duration string\");\n }\n\n let is_likely_iso8601 = match s.as_bytes() {\n [c, ..] if c.eq_ignore_ascii_case(&b'P') => true,\n [b'-', c, ..] if c.eq_ignore_ascii_case(&b'P') => true,\n _ => false,\n };\n\n if is_likely_iso8601 {\n parse_iso8601_duration(s, original_input)\n } else {\n parse_human_readable_duration(s, original_input)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n fn check_ok(res: Result, expected: Duration, input_str: &str) {\n match res {\n Ok(duration) => assert_eq!(duration, expected, \"Input: '{input_str}'\"),\n Err(e) => panic!(\"Input: '{input_str}', expected Ok({expected:?}), but got Err: {e}\"),\n }\n }\n\n fn check_err_contains(res: Result, expected_substring: &str, input_str: &str) {\n match res {\n Ok(d) => panic!(\n \"Input: '{input_str}', expected error containing '{expected_substring}', but got Ok({d:?})\"\n ),\n Err(e) => {\n let err_msg = e.to_string();\n assert!(\n err_msg.contains(expected_substring),\n \"Input: '{input_str}', error message '{err_msg}' does not contain expected substring '{expected_substring}'\"\n );\n }\n }\n }\n\n #[test]\n fn test_empty_string() {\n check_err_contains(parse_duration(\"\"), \"Empty duration string\", \"\\\"\\\"\");\n }\n\n #[test]\n fn test_whitespace_string() {\n check_err_contains(parse_duration(\" \"), \"Empty duration string\", \"\\\" \\\"\");\n }\n\n #[test]\n fn test_iso_just_p() {\n check_err_contains(parse_duration(\"P\"), \"No components in duration: P\", \"\\\"P\\\"\");\n }\n\n #[test]\n fn test_iso_pt() {\n check_err_contains(\n parse_duration(\"PT\"),\n \"Time part present but no time components in: PT\",\n \"\\\"PT\\\"\",\n );\n }\n\n #[test]\n fn test_iso_missing_number_before_unit_in_date_part() {\n check_err_contains(parse_duration(\"PD\"), \"Expected number in: PD\", \"\\\"PD\\\"\");\n }\n #[test]\n fn test_iso_missing_number_before_unit_in_time_part() {\n check_err_contains(parse_duration(\"PTM\"), \"Expected number in: PTM\", \"\\\"PTM\\\"\");\n }\n\n #[test]\n fn test_iso_time_unit_without_t() {\n check_err_contains(parse_duration(\"P1H\"), \"Invalid unit 'H' in: P1H\", \"\\\"P1H\\\"\");\n check_err_contains(parse_duration(\"P1S\"), \"Invalid unit 'S' in: P1S\", \"\\\"P1S\\\"\");\n }\n\n #[test]\n fn test_iso_invalid_unit() {\n check_err_contains(parse_duration(\"P1X\"), \"Invalid unit 'X' in: P1X\", \"\\\"P1X\\\"\");\n check_err_contains(\n parse_duration(\"PT1X\"),\n \"Invalid unit 'X' in: PT1X\",\n \"\\\"PT1X\\\"\",\n );\n }\n\n #[test]\n fn test_iso_valid_lowercase_unit_is_not_allowed() {\n check_err_contains(\n parse_duration(\"p1h\"),\n \"Duration must start with 'P' in: p1h\",\n \"\\\"p1h\\\"\",\n );\n check_err_contains(\n parse_duration(\"PT1h\"),\n \"Invalid unit 'h' in: PT1h\",\n \"\\\"PT1h\\\"\",\n );\n }\n\n #[test]\n fn test_iso_trailing_number_error() {\n check_err_contains(\n parse_duration(\"P1D2\"),\n \"Missing unit after number '2' in: P1D2\",\n \"\\\"P1D2\\\"\",\n );\n }\n\n #[test]\n fn test_iso_invalid_fractional_format() {\n check_err_contains(\n parse_duration(\"PT1..5S\"),\n \"Invalid unit '.' in: PT1..5S\",\n \"\\\"PT1..5S\\\"\",\n );\n check_err_contains(\n parse_duration(\"PT1.5.5S\"),\n \"Invalid unit '.' in: PT1.5.5S\",\n \"\\\"PT1.5.5S\\\"\",\n );\n check_err_contains(\n parse_duration(\"P1..5D\"),\n \"Invalid unit '.' in: P1..5D\",\n \"\\\"P1..5D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_misplaced_t() {\n check_err_contains(\n parse_duration(\"P1DT2H T3M\"),\n \"Expected number in: P1DT2H T3M\",\n \"\\\"P1DT2H T3M\\\"\",\n );\n check_err_contains(\n parse_duration(\"P1T2H\"),\n \"Missing unit after number '1' in: P1T2H\",\n \"\\\"P1T2H\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_number_after_p() {\n check_err_contains(\n parse_duration(\"P-1D\"),\n \"Expected number in: P-1D\",\n \"\\\"P-1D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_valid_months() {\n check_ok(parse_duration(\"P1M\"), Duration::days(30), \"\\\"P1M\\\"\");\n check_ok(parse_duration(\" P13M\"), Duration::days(13 * 30), \"\\\"P13M\\\"\");\n }\n\n #[test]\n fn test_iso_valid_weeks() {\n check_ok(parse_duration(\"P1W\"), Duration::days(7), \"\\\"P1W\\\"\");\n check_ok(parse_duration(\" P1W \"), Duration::days(7), \"\\\"P1W\\\"\");\n }\n\n #[test]\n fn test_iso_valid_days() {\n check_ok(parse_duration(\"P1D\"), Duration::days(1), \"\\\"P1D\\\"\");\n }\n\n #[test]\n fn test_iso_valid_hours() {\n check_ok(parse_duration(\"PT2H\"), Duration::hours(2), \"\\\"PT2H\\\"\");\n }\n\n #[test]\n fn test_iso_valid_minutes() {\n check_ok(parse_duration(\"PT3M\"), Duration::minutes(3), \"\\\"PT3M\\\"\");\n }\n\n #[test]\n fn test_iso_valid_seconds() {\n check_ok(parse_duration(\"PT4S\"), Duration::seconds(4), \"\\\"PT4S\\\"\");\n }\n\n #[test]\n fn test_iso_combined_units() {\n check_ok(\n parse_duration(\"P1Y2M3W4DT5H6M7S\"),\n Duration::days(365 + 60 + 3 * 7 + 4)\n + Duration::hours(5)\n + Duration::minutes(6)\n + Duration::seconds(7),\n \"\\\"P1Y2M3DT4H5M6S\\\"\",\n );\n check_ok(\n parse_duration(\"P1DT2H3M4S\"),\n Duration::days(1) + Duration::hours(2) + Duration::minutes(3) + Duration::seconds(4),\n \"\\\"P1DT2H3M4S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_duplicated_unit() {\n check_ok(parse_duration(\"P1D1D\"), Duration::days(2), \"\\\"P1D1D\\\"\");\n check_ok(parse_duration(\"PT1H1H\"), Duration::hours(2), \"\\\"PT1H1H\\\"\");\n }\n\n #[test]\n fn test_iso_out_of_order_unit() {\n check_ok(\n parse_duration(\"P1W1Y\"),\n Duration::days(365 + 7),\n \"\\\"P1W1Y\\\"\",\n );\n check_ok(\n parse_duration(\"PT2S1H\"),\n Duration::hours(1) + Duration::seconds(2),\n \"\\\"PT2S1H\\\"\",\n );\n check_ok(parse_duration(\"P3M\"), Duration::days(90), \"\\\"PT2S1H\\\"\");\n check_ok(parse_duration(\"PT3M\"), Duration::minutes(3), \"\\\"PT2S1H\\\"\");\n check_err_contains(\n parse_duration(\"P1H2D\"),\n \"Invalid unit 'H' in: P1H2D\", // Time part without 'T' is invalid\n \"\\\"P1H2D\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_duration_p1d() {\n check_ok(parse_duration(\"-P1D\"), -Duration::days(1), \"\\\"-P1D\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pd0() {\n check_ok(parse_duration(\"P0D\"), Duration::zero(), \"\\\"P0D\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pt0s() {\n check_ok(parse_duration(\"PT0S\"), Duration::zero(), \"\\\"PT0S\\\"\");\n }\n\n #[test]\n fn test_iso_zero_duration_pt0h0m0s() {\n check_ok(parse_duration(\"PT0H0M0S\"), Duration::zero(), \"\\\"PT0H0M0S\\\"\");\n }\n\n #[test]\n fn test_iso_fractional_seconds() {\n check_ok(\n parse_duration(\"PT1.5S\"),\n Duration::seconds(1) + Duration::milliseconds(500),\n \"\\\"PT1.5S\\\"\",\n );\n check_ok(\n parse_duration(\"PT441010.456123S\"),\n Duration::seconds(441010) + Duration::microseconds(456123),\n \"\\\"PT441010.456123S\\\"\",\n );\n check_ok(\n parse_duration(\"PT0.000001S\"),\n Duration::microseconds(1),\n \"\\\"PT0.000001S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_fractional_date_units() {\n check_ok(\n parse_duration(\"P1.5D\"),\n Duration::microseconds((1.5 * 86_400_000_000.0) as i64),\n \"\\\"P1.5D\\\"\",\n );\n check_ok(\n parse_duration(\"P1.25Y\"),\n Duration::microseconds((1.25 * 365.0 * 86_400_000_000.0) as i64),\n \"\\\"P1.25Y\\\"\",\n );\n check_ok(\n parse_duration(\"P2.75M\"),\n Duration::microseconds((2.75 * 30.0 * 86_400_000_000.0) as i64),\n \"\\\"P2.75M\\\"\",\n );\n check_ok(\n parse_duration(\"P0.5W\"),\n Duration::microseconds((0.5 * 7.0 * 86_400_000_000.0) as i64),\n \"\\\"P0.5W\\\"\",\n );\n }\n\n #[test]\n fn test_iso_negative_fractional_date_units() {\n check_ok(\n parse_duration(\"-P1.5D\"),\n -Duration::microseconds((1.5 * 86_400_000_000.0) as i64),\n \"\\\"-P1.5D\\\"\",\n );\n check_ok(\n parse_duration(\"-P0.25Y\"),\n -Duration::microseconds((0.25 * 365.0 * 86_400_000_000.0) as i64),\n \"\\\"-P0.25Y\\\"\",\n );\n }\n\n #[test]\n fn test_iso_combined_fractional_units() {\n check_ok(\n parse_duration(\"P1.5DT2.5H3.5M4.5S\"),\n Duration::microseconds((1.5 * 86_400_000_000.0) as i64)\n + Duration::microseconds((2.5 * 3_600_000_000.0) as i64)\n + Duration::microseconds((3.5 * 60_000_000.0) as i64)\n + Duration::seconds(4)\n + Duration::milliseconds(500),\n \"\\\"1.5DT2.5H3.5M4.5S\\\"\",\n );\n }\n\n #[test]\n fn test_iso_multiple_fractional_time_units() {\n check_ok(\n parse_duration(\"PT1.5S2.5S\"),\n Duration::seconds(1 + 2) + Duration::milliseconds(500) + Duration::milliseconds(500),\n \"\\\"PT1.5S2.5S\\\"\",\n );\n check_ok(\n parse_duration(\"PT1.1H2.2M3.3S\"),\n Duration::hours(1)\n + Duration::seconds((0.1 * 3600.0) as i64)\n + Duration::minutes(2)\n + Duration::seconds((0.2 * 60.0) as i64)\n + Duration::seconds(3)\n + Duration::milliseconds(300),\n \"\\\"PT1.1H2.2M3.3S\\\"\",\n );\n }\n\n // Human-readable Tests\n #[test]\n fn test_human_missing_unit() {\n check_err_contains(\n parse_duration(\"1\"),\n \"Invalid human-readable duration format in: 1\",\n \"\\\"1\\\"\",\n );\n }\n\n #[test]\n fn test_human_missing_number() {\n check_err_contains(\n parse_duration(\"day\"),\n \"Invalid human-readable duration format in: day\",\n \"\\\"day\\\"\",\n );\n }\n\n #[test]\n fn test_human_incomplete_pair() {\n check_err_contains(\n parse_duration(\"1 day 2\"),\n \"Invalid human-readable duration format in: 1 day 2\",\n \"\\\"1 day 2\\\"\",\n );\n }\n\n #[test]\n fn test_human_invalid_number_at_start() {\n check_err_contains(\n parse_duration(\"one day\"),\n \"Invalid number 'one' in: one day\",\n \"\\\"one day\\\"\",\n );\n }\n\n #[test]\n fn test_human_invalid_unit() {\n check_err_contains(\n parse_duration(\"1 hour 2 minutes 3 seconds four seconds\"),\n \"Invalid number 'four' in: 1 hour 2 minutes 3 seconds four seconds\",\n \"\\\"1 hour 2 minutes 3 seconds four seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_float_number_fail() {\n check_err_contains(\n parse_duration(\"1.5 hours\"),\n \"Invalid number '1.5' in: 1.5 hours\",\n \"\\\"1.5 hours\\\"\",\n );\n }\n\n #[test]\n fn test_invalid_human_readable_no_pairs() {\n check_err_contains(\n parse_duration(\"just some words\"),\n \"Invalid human-readable duration format in: just some words\",\n \"\\\"just some words\\\"\",\n );\n }\n\n #[test]\n fn test_human_unknown_unit() {\n check_err_contains(\n parse_duration(\"1 year\"),\n \"Invalid unit 'year' in: 1 year\",\n \"\\\"1 year\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_day() {\n check_ok(parse_duration(\"1 day\"), Duration::days(1), \"\\\"1 day\\\"\");\n }\n\n #[test]\n fn test_human_valid_days_uppercase() {\n check_ok(parse_duration(\"2 DAYS\"), Duration::days(2), \"\\\"2 DAYS\\\"\");\n }\n\n #[test]\n fn test_human_valid_hour() {\n check_ok(parse_duration(\"3 hour\"), Duration::hours(3), \"\\\"3 hour\\\"\");\n }\n\n #[test]\n fn test_human_valid_hours_mixedcase() {\n check_ok(parse_duration(\"4 HoUrS\"), Duration::hours(4), \"\\\"4 HoUrS\\\"\");\n }\n\n #[test]\n fn test_human_valid_minute() {\n check_ok(\n parse_duration(\"5 minute\"),\n Duration::minutes(5),\n \"\\\"5 minute\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_minutes() {\n check_ok(\n parse_duration(\"6 minutes\"),\n Duration::minutes(6),\n \"\\\"6 minutes\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_second() {\n check_ok(\n parse_duration(\"7 second\"),\n Duration::seconds(7),\n \"\\\"7 second\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_seconds() {\n check_ok(\n parse_duration(\"8 seconds\"),\n Duration::seconds(8),\n \"\\\"8 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_millisecond() {\n check_ok(\n parse_duration(\"9 millisecond\"),\n Duration::milliseconds(9),\n \"\\\"9 millisecond\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_milliseconds() {\n check_ok(\n parse_duration(\"10 milliseconds\"),\n Duration::milliseconds(10),\n \"\\\"10 milliseconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_microsecond() {\n check_ok(\n parse_duration(\"11 microsecond\"),\n Duration::microseconds(11),\n \"\\\"11 microsecond\\\"\",\n );\n }\n\n #[test]\n fn test_human_valid_microseconds() {\n check_ok(\n parse_duration(\"12 microseconds\"),\n Duration::microseconds(12),\n \"\\\"12 microseconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_combined() {\n let expected =\n Duration::days(1) + Duration::hours(2) + Duration::minutes(3) + Duration::seconds(4);\n check_ok(\n parse_duration(\"1 day 2 hours 3 minutes 4 seconds\"),\n expected,\n \"\\\"1 day 2 hours 3 minutes 4 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_out_of_order() {\n check_ok(\n parse_duration(\"1 second 2 hours\"),\n Duration::hours(2) + Duration::seconds(1),\n \"\\\"1 second 2 hours\\\"\",\n );\n check_ok(\n parse_duration(\"7 minutes 6 hours 5 days\"),\n Duration::days(5) + Duration::hours(6) + Duration::minutes(7),\n \"\\\"7 minutes 6 hours 5 days\\\"\",\n )\n }\n\n #[test]\n fn test_human_zero_duration_seconds() {\n check_ok(\n parse_duration(\"0 seconds\"),\n Duration::zero(),\n \"\\\"0 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_zero_duration_days_hours() {\n check_ok(\n parse_duration(\"0 day 0 hour\"),\n Duration::zero(),\n \"\\\"0 day 0 hour\\\"\",\n );\n }\n\n #[test]\n fn test_human_zero_duration_multiple_zeros() {\n check_ok(\n parse_duration(\"0 days 0 hours 0 minutes 0 seconds\"),\n Duration::zero(),\n \"\\\"0 days 0 hours 0 minutes 0 seconds\\\"\",\n );\n }\n\n #[test]\n fn test_human_no_space_between_num_unit() {\n check_err_contains(\n parse_duration(\"1day\"),\n \"Invalid human-readable duration format in: 1day\",\n \"\\\"1day\\\"\",\n );\n }\n\n #[test]\n fn test_human_trimmed() {\n check_ok(parse_duration(\" 1 day \"), Duration::days(1), \"\\\" 1 day \\\"\");\n }\n\n #[test]\n fn test_human_extra_whitespace() {\n check_ok(\n parse_duration(\" 1 day 2 hours \"),\n Duration::days(1) + Duration::hours(2),\n \"\\\" 1 day 2 hours \\\"\",\n );\n }\n\n #[test]\n fn test_human_negative_numbers() {\n check_ok(\n parse_duration(\"-1 day 2 hours\"),\n Duration::days(-1) + Duration::hours(2),\n \"\\\"-1 day 2 hours\\\"\",\n );\n check_ok(\n parse_duration(\"1 day -2 hours\"),\n Duration::days(1) + Duration::hours(-2),\n \"\\\"1 day -2 hours\\\"\",\n );\n }\n}\n"], ["/cocoindex/src/execution/stats.rs", "use crate::prelude::*;\n\nuse std::{\n ops::AddAssign,\n sync::atomic::{AtomicI64, Ordering::Relaxed},\n};\n\n#[derive(Default, Serialize)]\npub struct Counter(pub AtomicI64);\n\nimpl Counter {\n pub fn inc(&self, by: i64) {\n self.0.fetch_add(by, Relaxed);\n }\n\n pub fn get(&self) -> i64 {\n self.0.load(Relaxed)\n }\n\n pub fn delta(&self, base: &Self) -> Counter {\n Counter(AtomicI64::new(self.get() - base.get()))\n }\n\n pub fn into_inner(self) -> i64 {\n self.0.into_inner()\n }\n\n pub fn merge(&self, delta: &Self) {\n self.0.fetch_add(delta.get(), Relaxed);\n }\n}\n\nimpl AddAssign for Counter {\n fn add_assign(&mut self, rhs: Self) {\n self.0.fetch_add(rhs.into_inner(), Relaxed);\n }\n}\n\nimpl Clone for Counter {\n fn clone(&self) -> Self {\n Self(AtomicI64::new(self.get()))\n }\n}\n\nimpl std::fmt::Display for Counter {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.get())\n }\n}\n\nimpl std::fmt::Debug for Counter {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}\", self.get())\n }\n}\n\n#[derive(Debug, Serialize, Default, Clone)]\npub struct UpdateStats {\n pub num_no_change: Counter,\n pub num_insertions: Counter,\n pub num_deletions: Counter,\n /// Number of source rows that were updated.\n pub num_updates: Counter,\n /// Number of source rows that were reprocessed because of logic change.\n pub num_reprocesses: Counter,\n pub num_errors: Counter,\n}\n\nimpl UpdateStats {\n pub fn delta(&self, base: &Self) -> Self {\n UpdateStats {\n num_no_change: self.num_no_change.delta(&base.num_no_change),\n num_insertions: self.num_insertions.delta(&base.num_insertions),\n num_deletions: self.num_deletions.delta(&base.num_deletions),\n num_updates: self.num_updates.delta(&base.num_updates),\n num_reprocesses: self.num_reprocesses.delta(&base.num_reprocesses),\n num_errors: self.num_errors.delta(&base.num_errors),\n }\n }\n\n pub fn merge(&self, delta: &Self) {\n self.num_no_change.merge(&delta.num_no_change);\n self.num_insertions.merge(&delta.num_insertions);\n self.num_deletions.merge(&delta.num_deletions);\n self.num_updates.merge(&delta.num_updates);\n self.num_reprocesses.merge(&delta.num_reprocesses);\n self.num_errors.merge(&delta.num_errors);\n }\n\n pub fn has_any_change(&self) -> bool {\n self.num_insertions.get() > 0\n || self.num_deletions.get() > 0\n || self.num_updates.get() > 0\n || self.num_reprocesses.get() > 0\n || self.num_errors.get() > 0\n }\n}\n\nimpl std::fmt::Display for UpdateStats {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n let mut messages = Vec::new();\n let num_errors = self.num_errors.get();\n if num_errors > 0 {\n messages.push(format!(\"{num_errors} source rows FAILED\"));\n }\n\n let num_skipped = self.num_no_change.get();\n if num_skipped > 0 {\n messages.push(format!(\"{num_skipped} source rows NO CHANGE\"));\n }\n\n let num_insertions = self.num_insertions.get();\n let num_deletions = self.num_deletions.get();\n let num_updates = self.num_updates.get();\n let num_reprocesses = self.num_reprocesses.get();\n let num_source_rows = num_insertions + num_deletions + num_updates + num_reprocesses;\n if num_source_rows > 0 {\n messages.push(format!(\n \"{num_source_rows} source rows processed ({num_insertions} ADDED, {num_deletions} REMOVED, {num_updates} UPDATED, {num_reprocesses} REPROCESSED on flow change)\",\n ));\n }\n\n if !messages.is_empty() {\n write!(f, \"{}\", messages.join(\"; \"))?;\n } else {\n write!(f, \"No changes\")?;\n }\n\n Ok(())\n }\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceUpdateInfo {\n pub source_name: String,\n pub stats: UpdateStats,\n}\n\nimpl std::fmt::Display for SourceUpdateInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n write!(f, \"{}: {}\", self.source_name, self.stats)\n }\n}\n\n#[derive(Debug, Serialize)]\npub struct IndexUpdateInfo {\n pub sources: Vec,\n}\n\nimpl std::fmt::Display for IndexUpdateInfo {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n for source in self.sources.iter() {\n writeln!(f, \"{source}\")?;\n }\n Ok(())\n }\n}\n"], ["/cocoindex/src/ops/functions/parse_json.rs", "use crate::ops::sdk::*;\nuse anyhow::Result;\nuse std::collections::HashMap;\nuse std::sync::{Arc, LazyLock};\nuse unicase::UniCase;\n\npub struct Args {\n text: ResolvedOpArg,\n language: Option,\n}\n\ntype ParseFn = fn(&str) -> Result;\nstruct LanguageConfig {\n parse_fn: ParseFn,\n}\n\nfn add_language(\n output: &mut HashMap, Arc>,\n name: &'static str,\n aliases: impl IntoIterator,\n parse_fn: ParseFn,\n) {\n let lang_config = Arc::new(LanguageConfig { parse_fn });\n for name in std::iter::once(name).chain(aliases.into_iter()) {\n if output.insert(name.into(), lang_config.clone()).is_some() {\n panic!(\"Language `{name}` already exists\");\n }\n }\n}\n\nfn parse_json(text: &str) -> Result {\n Ok(serde_json::from_str(text)?)\n}\n\nstatic PARSE_FN_BY_LANG: LazyLock, Arc>> =\n LazyLock::new(|| {\n let mut map = HashMap::new();\n add_language(&mut map, \"json\", [\".json\"], parse_json);\n map\n });\n\nstruct Executor {\n args: Args,\n}\n\n#[async_trait]\nimpl SimpleFunctionExecutor for Executor {\n async fn evaluate(&self, input: Vec) -> Result {\n let text = self.args.text.value(&input)?.as_str()?;\n let lang_config = {\n let language = self.args.language.value(&input)?;\n language\n .optional()\n .map(|v| anyhow::Ok(v.as_str()?.as_ref()))\n .transpose()?\n .and_then(|lang| PARSE_FN_BY_LANG.get(&UniCase::new(lang)))\n };\n let parse_fn = lang_config.map(|c| c.parse_fn).unwrap_or(parse_json);\n let parsed_value = parse_fn(text)?;\n Ok(value::Value::Basic(value::BasicValue::Json(Arc::new(\n parsed_value,\n ))))\n }\n}\n\npub struct Factory;\n\n#[async_trait]\nimpl SimpleFunctionFactoryBase for Factory {\n type Spec = EmptySpec;\n type ResolvedArgs = Args;\n\n fn name(&self) -> &str {\n \"ParseJson\"\n }\n\n async fn resolve_schema<'a>(\n &'a self,\n _spec: &'a EmptySpec,\n args_resolver: &mut OpArgsResolver<'a>,\n _context: &FlowInstanceContext,\n ) -> Result<(Args, EnrichedValueType)> {\n let args = Args {\n text: args_resolver\n .next_arg(\"text\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n language: args_resolver\n .next_optional_arg(\"language\")?\n .expect_type(&ValueType::Basic(BasicValueType::Str))?,\n };\n\n let output_schema = make_output_type(BasicValueType::Json);\n Ok((args, output_schema))\n }\n\n async fn build_executor(\n self: Arc,\n _spec: EmptySpec,\n args: Args,\n _context: Arc,\n ) -> Result> {\n Ok(Box::new(Executor { args }))\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::ops::functions::test_utils::{build_arg_schema, test_flow_function};\n use serde_json::json;\n\n #[tokio::test]\n async fn test_parse_json() {\n let spec = EmptySpec {};\n\n let factory = Arc::new(Factory);\n let json_string_content = r#\"{\"city\": \"Magdeburg\"}\"#;\n let lang_value: Value = \"json\".to_string().into();\n\n let input_args_values = vec![json_string_content.to_string().into(), lang_value.clone()];\n\n let input_arg_schemas = vec![\n build_arg_schema(\"text\", BasicValueType::Str),\n build_arg_schema(\"language\", BasicValueType::Str),\n ];\n\n let result = test_flow_function(factory, spec, input_arg_schemas, input_args_values).await;\n\n assert!(\n result.is_ok(),\n \"test_flow_function failed: {:?}\",\n result.err()\n );\n let value = result.unwrap();\n\n match value {\n Value::Basic(BasicValue::Json(arc_json_value)) => {\n let expected_json = json!({\"city\": \"Magdeburg\"});\n assert_eq!(\n *arc_json_value, expected_json,\n \"Parsed JSON value mismatch with specified language\"\n );\n }\n _ => panic!(\"Expected Value::Basic(BasicValue::Json), got {value:?}\"),\n }\n }\n}\n"], ["/cocoindex/src/ops/functions/test_utils.rs", "use crate::builder::plan::{\n AnalyzedFieldReference, AnalyzedLocalFieldReference, AnalyzedValueMapping,\n};\nuse crate::ops::sdk::{\n AuthRegistry, BasicValueType, EnrichedValueType, FlowInstanceContext, OpArgSchema,\n OpArgsResolver, SimpleFunctionExecutor, SimpleFunctionFactoryBase, Value, make_output_type,\n};\nuse anyhow::Result;\nuse serde::de::DeserializeOwned;\nuse std::sync::Arc;\n\n// This function builds an argument schema for a flow function.\npub fn build_arg_schema(\n name: &str,\n value_type: BasicValueType,\n) -> (Option<&str>, EnrichedValueType) {\n (Some(name), make_output_type(value_type))\n}\n\n// This function tests a flow function by providing a spec, input argument schemas, and values.\npub async fn test_flow_function(\n factory: Arc,\n spec: S,\n input_arg_schemas: Vec<(Option<&str>, EnrichedValueType)>,\n input_arg_values: Vec,\n) -> Result\nwhere\n S: DeserializeOwned + Send + Sync + 'static,\n R: Send + Sync + 'static,\n F: SimpleFunctionFactoryBase + ?Sized,\n{\n // 1. Construct OpArgSchema\n let op_arg_schemas: Vec = input_arg_schemas\n .into_iter()\n .enumerate()\n .map(|(idx, (name, value_type))| OpArgSchema {\n name: name.map_or(crate::base::spec::OpArgName(None), |n| {\n crate::base::spec::OpArgName(Some(n.to_string()))\n }),\n value_type,\n analyzed_value: AnalyzedValueMapping::Field(AnalyzedFieldReference {\n local: AnalyzedLocalFieldReference {\n fields_idx: vec![idx as u32],\n },\n scope_up_level: 0,\n }),\n })\n .collect();\n\n // 2. Resolve Schema & Args\n let mut args_resolver = OpArgsResolver::new(&op_arg_schemas)?;\n let context = Arc::new(FlowInstanceContext {\n flow_instance_name: \"test_flow_function\".to_string(),\n auth_registry: Arc::new(AuthRegistry::default()),\n py_exec_ctx: None,\n });\n\n let (resolved_args_from_schema, _output_schema): (R, EnrichedValueType) = factory\n .resolve_schema(&spec, &mut args_resolver, &context)\n .await?;\n\n args_resolver.done()?;\n\n // 3. Build Executor\n let executor: Box = factory\n .build_executor(spec, resolved_args_from_schema, Arc::clone(&context))\n .await?;\n\n // 4. Evaluate\n let result = executor.evaluate(input_arg_values).await?;\n\n Ok(result)\n}\n"], ["/cocoindex/src/server.rs", "use crate::prelude::*;\n\nuse crate::{lib_context::LibContext, service};\nuse axum::{Router, routing};\nuse tower::ServiceBuilder;\nuse tower_http::{\n cors::{AllowOrigin, CorsLayer},\n trace::TraceLayer,\n};\n\n#[derive(Deserialize, Debug)]\npub struct ServerSettings {\n pub address: String,\n #[serde(default)]\n pub cors_origins: Vec,\n}\n\n/// Initialize the server and return a future that will actually handle requests.\npub async fn init_server(\n lib_context: Arc,\n settings: ServerSettings,\n) -> Result> {\n let mut cors = CorsLayer::default();\n if !settings.cors_origins.is_empty() {\n let origins: Vec<_> = settings\n .cors_origins\n .iter()\n .map(|origin| origin.parse())\n .collect::>()?;\n cors = cors\n .allow_origin(AllowOrigin::list(origins))\n .allow_methods([\n axum::http::Method::GET,\n axum::http::Method::POST,\n axum::http::Method::DELETE,\n ])\n .allow_headers([axum::http::header::CONTENT_TYPE]);\n }\n let app = Router::new()\n .route(\n \"/cocoindex\",\n routing::get(|| async { \"CocoIndex is running!\" }),\n )\n .nest(\n \"/cocoindex/api\",\n Router::new()\n .route(\"/flows\", routing::get(service::flows::list_flows))\n .route(\n \"/flows/{flowInstName}\",\n routing::get(service::flows::get_flow),\n )\n .route(\n \"/flows/{flowInstName}/schema\",\n routing::get(service::flows::get_flow_schema),\n )\n .route(\n \"/flows/{flowInstName}/keys\",\n routing::get(service::flows::get_keys),\n )\n .route(\n \"/flows/{flowInstName}/data\",\n routing::get(service::flows::evaluate_data),\n )\n .route(\n \"/flows/{flowInstName}/rowStatus\",\n routing::get(service::flows::get_row_indexing_status),\n )\n .route(\n \"/flows/{flowInstName}/update\",\n routing::post(service::flows::update),\n )\n .layer(\n ServiceBuilder::new()\n .layer(TraceLayer::new_for_http())\n .layer(cors),\n )\n .with_state(lib_context.clone()),\n );\n\n let listener = tokio::net::TcpListener::bind(&settings.address)\n .await\n .context(format!(\"Failed to bind to address: {}\", settings.address))?;\n\n println!(\n \"Server running at http://{}/cocoindex\",\n listener.local_addr()?\n );\n let serve_fut = async { axum::serve(listener, app).await.unwrap() };\n Ok(serve_fut.boxed())\n}\n"], ["/cocoindex/src/utils/concur_control.rs", "use crate::prelude::*;\n\nuse tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore};\n\nstruct WeightedSemaphore {\n downscale_factor: u8,\n downscaled_quota: u32,\n sem: Arc,\n}\n\nimpl WeightedSemaphore {\n pub fn new(quota: usize) -> Self {\n let mut downscale_factor = 0;\n let mut downscaled_quota = quota;\n while downscaled_quota > u32::MAX as usize {\n downscaled_quota >>= 1;\n downscale_factor += 1;\n }\n let sem = Arc::new(Semaphore::new(downscaled_quota));\n Self {\n downscaled_quota: downscaled_quota as u32,\n downscale_factor,\n sem,\n }\n }\n\n async fn acquire_reservation(&self) -> Result {\n self.sem.clone().acquire_owned().await\n }\n\n async fn acquire(\n &self,\n weight: usize,\n reserved: bool,\n ) -> Result, AcquireError> {\n let downscaled_weight = (weight >> self.downscale_factor) as u32;\n let capped_weight = downscaled_weight.min(self.downscaled_quota);\n let reserved_weight = if reserved { 1 } else { 0 };\n if reserved_weight >= capped_weight {\n return Ok(None);\n }\n Ok(Some(\n self.sem\n .clone()\n .acquire_many_owned(capped_weight - reserved_weight)\n .await?,\n ))\n }\n}\n\npub struct Options {\n pub max_inflight_rows: Option,\n pub max_inflight_bytes: Option,\n}\n\npub struct ConcurrencyControllerPermit {\n _inflight_count_permit: Option,\n _inflight_bytes_permit: Option,\n}\n\npub struct ConcurrencyController {\n inflight_count_sem: Option>,\n inflight_bytes_sem: Option,\n}\n\npub static BYTES_UNKNOWN_YET: Option usize> = None;\n\nimpl ConcurrencyController {\n pub fn new(exec_options: &Options) -> Self {\n Self {\n inflight_count_sem: exec_options\n .max_inflight_rows\n .map(|max| Arc::new(Semaphore::new(max))),\n inflight_bytes_sem: exec_options.max_inflight_bytes.map(WeightedSemaphore::new),\n }\n }\n\n /// If `bytes_fn` is `None`, it means the number of bytes is not known yet.\n /// The controller will reserve a minimum number of bytes.\n /// The caller should call `acquire_bytes_with_reservation` with the actual number of bytes later.\n pub async fn acquire(\n &self,\n bytes_fn: Option usize>,\n ) -> Result {\n let inflight_count_permit = if let Some(sem) = &self.inflight_count_sem {\n Some(sem.clone().acquire_owned().await?)\n } else {\n None\n };\n let inflight_bytes_permit = if let Some(sem) = &self.inflight_bytes_sem {\n if let Some(bytes_fn) = bytes_fn {\n sem.acquire(bytes_fn(), false).await?\n } else {\n Some(sem.acquire_reservation().await?)\n }\n } else {\n None\n };\n Ok(ConcurrencyControllerPermit {\n _inflight_count_permit: inflight_count_permit,\n _inflight_bytes_permit: inflight_bytes_permit,\n })\n }\n\n pub async fn acquire_bytes_with_reservation(\n &self,\n bytes_fn: impl FnOnce() -> usize,\n ) -> Result, AcquireError> {\n if let Some(sem) = &self.inflight_bytes_sem {\n sem.acquire(bytes_fn(), true).await\n } else {\n Ok(None)\n }\n }\n}\n\npub struct CombinedConcurrencyControllerPermit {\n _permit: ConcurrencyControllerPermit,\n _global_permit: ConcurrencyControllerPermit,\n}\n\npub struct CombinedConcurrencyController {\n controller: ConcurrencyController,\n global_controller: Arc,\n needs_num_bytes: bool,\n}\n\nimpl CombinedConcurrencyController {\n pub fn new(exec_options: &Options, global_controller: Arc) -> Self {\n Self {\n controller: ConcurrencyController::new(exec_options),\n needs_num_bytes: exec_options.max_inflight_bytes.is_some()\n || global_controller.inflight_bytes_sem.is_some(),\n global_controller,\n }\n }\n\n pub async fn acquire(\n &self,\n bytes_fn: Option usize>,\n ) -> Result {\n let num_bytes_fn = if let Some(bytes_fn) = bytes_fn\n && self.needs_num_bytes\n {\n let num_bytes = bytes_fn();\n Some(move || num_bytes)\n } else {\n None\n };\n\n let permit = self.controller.acquire(num_bytes_fn).await?;\n let global_permit = self.global_controller.acquire(num_bytes_fn).await?;\n Ok(CombinedConcurrencyControllerPermit {\n _permit: permit,\n _global_permit: global_permit,\n })\n }\n\n pub async fn acquire_bytes_with_reservation(\n &self,\n bytes_fn: impl FnOnce() -> usize,\n ) -> Result<(Option, Option), AcquireError> {\n let num_bytes = bytes_fn();\n let permit = self\n .controller\n .acquire_bytes_with_reservation(move || num_bytes)\n .await?;\n let global_permit = self\n .global_controller\n .acquire_bytes_with_reservation(move || num_bytes)\n .await?;\n Ok((permit, global_permit))\n }\n}\n"], ["/cocoindex/src/llm/voyage.rs", "use crate::prelude::*;\n\nuse crate::llm::{LlmEmbeddingClient, LlmEmbeddingRequest, LlmEmbeddingResponse};\nuse phf::phf_map;\n\nstatic DEFAULT_EMBEDDING_DIMENSIONS: phf::Map<&str, u32> = phf_map! {\n // Current models\n \"voyage-3-large\" => 1024,\n \"voyage-3.5\" => 1024,\n \"voyage-3.5-lite\" => 1024,\n \"voyage-code-3\" => 1024,\n \"voyage-finance-2\" => 1024,\n \"voyage-law-2\" => 1024,\n \"voyage-code-2\" => 1536,\n\n // Legacy models\n \"voyage-3\" => 1024,\n \"voyage-3-lite\" => 512,\n \"voyage-multilingual-2\" => 1024,\n \"voyage-large-2-instruct\" => 1024,\n \"voyage-large-2\" => 1536,\n \"voyage-2\" => 1024,\n \"voyage-lite-02-instruct\" => 1024,\n \"voyage-02\" => 1024,\n \"voyage-01\" => 1024,\n \"voyage-lite-01\" => 1024,\n \"voyage-lite-01-instruct\" => 1024,\n};\n\npub struct Client {\n api_key: String,\n client: reqwest::Client,\n}\n\nimpl Client {\n pub fn new(address: Option) -> Result {\n if address.is_some() {\n api_bail!(\"Voyage AI doesn't support custom API address\");\n }\n let api_key = match std::env::var(\"VOYAGE_API_KEY\") {\n Ok(val) => val,\n Err(_) => api_bail!(\"VOYAGE_API_KEY environment variable must be set\"),\n };\n Ok(Self {\n api_key,\n client: reqwest::Client::new(),\n })\n }\n}\n\n#[derive(Deserialize)]\nstruct EmbeddingData {\n embedding: Vec,\n}\n\n#[derive(Deserialize)]\nstruct EmbedResponse {\n data: Vec,\n}\n\n#[async_trait]\nimpl LlmEmbeddingClient for Client {\n async fn embed_text<'req>(\n &self,\n request: LlmEmbeddingRequest<'req>,\n ) -> Result {\n let url = \"https://api.voyageai.com/v1/embeddings\";\n\n let mut payload = serde_json::json!({\n \"input\": request.text,\n \"model\": request.model,\n });\n\n if let Some(task_type) = request.task_type {\n payload[\"input_type\"] = serde_json::Value::String(task_type.into());\n }\n\n let resp = retryable::run(\n || {\n self.client\n .post(url)\n .header(\"Authorization\", format!(\"Bearer {}\", self.api_key))\n .json(&payload)\n .send()\n },\n &retryable::HEAVY_LOADED_OPTIONS,\n )\n .await?;\n\n if !resp.status().is_success() {\n bail!(\n \"Voyage AI API error: {:?}\\n{}\\n\",\n resp.status(),\n resp.text().await?\n );\n }\n\n let embedding_resp: EmbedResponse = resp.json().await.context(\"Invalid JSON\")?;\n\n if embedding_resp.data.is_empty() {\n bail!(\"No embedding data in response\");\n }\n\n Ok(LlmEmbeddingResponse {\n embedding: embedding_resp.data[0].embedding.clone(),\n })\n }\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option {\n DEFAULT_EMBEDDING_DIMENSIONS.get(model).copied()\n }\n}\n"], ["/cocoindex/src/service/error.rs", "use crate::prelude::*;\n\nuse axum::{\n Json,\n http::StatusCode,\n response::{IntoResponse, Response},\n};\nuse pyo3::{exceptions::PyException, prelude::*};\nuse std::{\n error::Error,\n fmt::{Debug, Display},\n};\n\n#[derive(Debug)]\npub struct ApiError {\n pub err: anyhow::Error,\n pub status_code: StatusCode,\n}\n\nimpl ApiError {\n pub fn new(message: &str, status_code: StatusCode) -> Self {\n Self {\n err: anyhow!(\"{}\", message),\n status_code,\n }\n }\n}\n\nimpl Display for ApiError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Display::fmt(&self.err, f)\n }\n}\n\nimpl Error for ApiError {\n fn source(&self) -> Option<&(dyn Error + 'static)> {\n self.err.source()\n }\n}\n\n#[derive(Serialize)]\nstruct ErrorResponse {\n error: String,\n}\n\nimpl IntoResponse for ApiError {\n fn into_response(self) -> Response {\n debug!(\"Internal server error:\\n{:?}\", self.err);\n let error_response = ErrorResponse {\n error: self.err.to_string(),\n };\n (self.status_code, Json(error_response)).into_response()\n }\n}\n\nimpl From for ApiError {\n fn from(err: anyhow::Error) -> ApiError {\n if err.is::() {\n return err.downcast::().unwrap();\n }\n Self {\n err,\n status_code: StatusCode::INTERNAL_SERVER_ERROR,\n }\n }\n}\n\nimpl From for PyErr {\n fn from(val: ApiError) -> Self {\n PyException::new_err(val.err.to_string())\n }\n}\n\n#[derive(Clone)]\npub struct SharedError {\n pub err: Arc,\n}\n\nimpl SharedError {\n pub fn new(err: anyhow::Error) -> Self {\n Self { err: Arc::new(err) }\n }\n}\nimpl Debug for SharedError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.err, f)\n }\n}\n\nimpl Display for SharedError {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Display::fmt(&self.err, f)\n }\n}\n\nimpl From for SharedError {\n fn from(err: E) -> Self {\n Self {\n err: Arc::new(anyhow::Error::from(err)),\n }\n }\n}\n\nimpl AsRef for SharedError {\n fn as_ref(&self) -> &(dyn std::error::Error + 'static) {\n self.err.as_ref().as_ref()\n }\n}\n\nimpl AsRef for SharedError {\n fn as_ref(&self) -> &(dyn std::error::Error + Send + Sync + 'static) {\n self.err.as_ref().as_ref()\n }\n}\n\npub fn shared_ok(value: T) -> Result {\n Ok(value)\n}\n\npub type SharedResult = Result;\n\npub struct SharedErrorWrapper(SharedError);\n\nimpl Display for SharedErrorWrapper {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.0, f)\n }\n}\n\nimpl Debug for SharedErrorWrapper {\n fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n Debug::fmt(&self.0, f)\n }\n}\n\nimpl Error for SharedErrorWrapper {\n fn source(&self) -> Option<&(dyn Error + 'static)> {\n self.0.err.as_ref().source()\n }\n}\n\npub trait SharedResultExt {\n fn std_result(self) -> Result;\n}\n\nimpl SharedResultExt for Result {\n fn std_result(self) -> Result {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(SharedErrorWrapper(err)),\n }\n }\n}\n\npub trait SharedResultExtRef<'a, T> {\n fn std_result(self) -> Result<&'a T, SharedErrorWrapper>;\n}\n\nimpl<'a, T> SharedResultExtRef<'a, T> for &'a Result {\n fn std_result(self) -> Result<&'a T, SharedErrorWrapper> {\n match self {\n Ok(value) => Ok(value),\n Err(err) => Err(SharedErrorWrapper(err.clone())),\n }\n }\n}\n\npub fn invariance_violation() -> anyhow::Error {\n anyhow::anyhow!(\"Invariance violation\")\n}\n\n#[macro_export]\nmacro_rules! api_bail {\n ( $fmt:literal $(, $($arg:tt)*)?) => {\n return Err($crate::service::error::ApiError::new(&format!($fmt $(, $($arg)*)?), axum::http::StatusCode::BAD_REQUEST).into())\n };\n}\n\n#[macro_export]\nmacro_rules! api_error {\n ( $fmt:literal $(, $($arg:tt)*)?) => {\n $crate::service::error::ApiError::new(&format!($fmt $(, $($arg)*)?), axum::http::StatusCode::BAD_REQUEST)\n };\n}\n"], ["/cocoindex/src/llm/mod.rs", "use crate::prelude::*;\n\nuse crate::base::json_schema::ToJsonSchemaOptions;\nuse infer::Infer;\nuse schemars::schema::SchemaObject;\nuse std::borrow::Cow;\n\nstatic INFER: LazyLock = LazyLock::new(Infer::new);\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub enum LlmApiType {\n Ollama,\n OpenAi,\n Gemini,\n Anthropic,\n LiteLlm,\n OpenRouter,\n Voyage,\n Vllm,\n VertexAi,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct VertexAiConfig {\n pub project: String,\n pub region: Option,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(tag = \"kind\")]\npub enum LlmApiConfig {\n VertexAi(VertexAiConfig),\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct LlmSpec {\n pub api_type: LlmApiType,\n pub address: Option,\n pub model: String,\n pub api_config: Option,\n}\n\n#[derive(Debug)]\npub enum OutputFormat<'a> {\n JsonSchema {\n name: Cow<'a, str>,\n schema: Cow<'a, SchemaObject>,\n },\n}\n\n#[derive(Debug)]\npub struct LlmGenerateRequest<'a> {\n pub model: &'a str,\n pub system_prompt: Option>,\n pub user_prompt: Cow<'a, str>,\n pub image: Option>,\n pub output_format: Option>,\n}\n\n#[derive(Debug)]\npub struct LlmGenerateResponse {\n pub text: String,\n}\n\n#[async_trait]\npub trait LlmGenerationClient: Send + Sync {\n async fn generate<'req>(\n &self,\n request: LlmGenerateRequest<'req>,\n ) -> Result;\n\n fn json_schema_options(&self) -> ToJsonSchemaOptions;\n}\n\n#[derive(Debug)]\npub struct LlmEmbeddingRequest<'a> {\n pub model: &'a str,\n pub text: Cow<'a, str>,\n pub output_dimension: Option,\n pub task_type: Option>,\n}\n\npub struct LlmEmbeddingResponse {\n pub embedding: Vec,\n}\n\n#[async_trait]\npub trait LlmEmbeddingClient: Send + Sync {\n async fn embed_text<'req>(\n &self,\n request: LlmEmbeddingRequest<'req>,\n ) -> Result;\n\n fn get_default_embedding_dimension(&self, model: &str) -> Option;\n}\n\nmod anthropic;\nmod gemini;\nmod litellm;\nmod ollama;\nmod openai;\nmod openrouter;\nmod vllm;\nmod voyage;\n\npub async fn new_llm_generation_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::Anthropic => {\n Box::new(anthropic::Client::new(address).await?) as Box\n }\n LlmApiType::LiteLlm => {\n Box::new(litellm::Client::new_litellm(address).await?) as Box\n }\n LlmApiType::OpenRouter => Box::new(openrouter::Client::new_openrouter(address).await?)\n as Box,\n LlmApiType::Voyage => {\n api_bail!(\"Voyage is not supported for generation\")\n }\n LlmApiType::Vllm => {\n Box::new(vllm::Client::new_vllm(address).await?) as Box\n }\n };\n Ok(client)\n}\n\npub async fn new_llm_embedding_client(\n api_type: LlmApiType,\n address: Option,\n api_config: Option,\n) -> Result> {\n let client = match api_type {\n LlmApiType::Ollama => {\n Box::new(ollama::Client::new(address).await?) as Box\n }\n LlmApiType::Gemini => {\n Box::new(gemini::AiStudioClient::new(address)?) as Box\n }\n LlmApiType::OpenAi => {\n Box::new(openai::Client::new(address)?) as Box\n }\n LlmApiType::Voyage => {\n Box::new(voyage::Client::new(address)?) as Box\n }\n LlmApiType::VertexAi => Box::new(gemini::VertexAiClient::new(address, api_config).await?)\n as Box,\n LlmApiType::OpenRouter | LlmApiType::LiteLlm | LlmApiType::Vllm | LlmApiType::Anthropic => {\n api_bail!(\"Embedding is not supported for API type {:?}\", api_type)\n }\n };\n Ok(client)\n}\n\npub fn detect_image_mime_type(bytes: &[u8]) -> Result<&'static str> {\n let infer = &*INFER;\n match infer.get(bytes) {\n Some(info) if info.mime_type().starts_with(\"image/\") => Ok(info.mime_type()),\n _ => bail!(\"Unknown or unsupported image format\"),\n }\n}\n"], ["/cocoindex/src/utils/db.rs", "#[derive(Debug, Clone, PartialEq, Eq)]\npub struct ValidIdentifier(pub String);\n\nimpl TryFrom for ValidIdentifier {\n type Error = anyhow::Error;\n\n fn try_from(s: String) -> Result {\n if !s.is_empty() && s.chars().all(|c| c.is_alphanumeric() || c == '_') {\n Ok(ValidIdentifier(s))\n } else {\n Err(anyhow::anyhow!(\"Invalid identifier: {s:?}\"))\n }\n }\n}\n\nimpl std::fmt::Display for ValidIdentifier {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n std::fmt::Display::fmt(&self.0, f)\n }\n}\n\nimpl std::ops::Deref for ValidIdentifier {\n type Target = String;\n\n fn deref(&self) -> &Self::Target {\n &self.0\n }\n}\n\npub enum WriteAction {\n Insert,\n Update,\n}\n\npub fn sanitize_identifier(s: &str) -> String {\n let mut result = String::new();\n for c in s.chars() {\n if c.is_alphanumeric() || c == '_' {\n result.push(c);\n } else {\n result.push_str(\"__\");\n }\n }\n result\n}\n"], ["/cocoindex/src/utils/immutable.rs", "#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]\npub enum RefList<'a, T> {\n #[default]\n Nil,\n\n Cons(T, &'a RefList<'a, T>),\n}\n\nimpl<'a, T> RefList<'a, T> {\n pub fn prepend(&'a self, head: T) -> Self {\n Self::Cons(head, self)\n }\n\n pub fn iter(&'a self) -> impl Iterator {\n self\n }\n\n pub fn head(&'a self) -> Option<&'a T> {\n match self {\n RefList::Nil => None,\n RefList::Cons(head, _) => Some(head),\n }\n }\n\n pub fn headn(&'a self, n: usize) -> Option<&'a T> {\n match self {\n RefList::Nil => None,\n RefList::Cons(head, tail) => {\n if n == 0 {\n Some(head)\n } else {\n tail.headn(n - 1)\n }\n }\n }\n }\n\n pub fn tail(&'a self) -> Option<&'a RefList<'a, T>> {\n match self {\n RefList::Nil => None,\n RefList::Cons(_, tail) => Some(tail),\n }\n }\n\n pub fn tailn(&'a self, n: usize) -> Option<&'a RefList<'a, T>> {\n if n == 0 {\n Some(self)\n } else {\n match self {\n RefList::Nil => None,\n RefList::Cons(_, tail) => tail.tailn(n - 1),\n }\n }\n }\n}\n\nimpl<'a, T> Iterator for &'a RefList<'a, T> {\n type Item = &'a T;\n\n fn next(&mut self) -> Option {\n let current = *self;\n match current {\n RefList::Nil => None,\n RefList::Cons(head, tail) => {\n *self = *tail;\n Some(head)\n }\n }\n }\n}\n"], ["/cocoindex/src/setup/auth_registry.rs", "use std::collections::hash_map;\n\nuse crate::prelude::*;\n\npub struct AuthRegistry {\n entries: RwLock>,\n}\n\nimpl Default for AuthRegistry {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl AuthRegistry {\n pub fn new() -> Self {\n Self {\n entries: RwLock::new(HashMap::new()),\n }\n }\n\n pub fn add(&self, key: String, value: serde_json::Value) -> Result<()> {\n let mut entries = self.entries.write().unwrap();\n match entries.entry(key) {\n hash_map::Entry::Occupied(entry) => {\n api_bail!(\"Auth entry already exists: {}\", entry.key());\n }\n hash_map::Entry::Vacant(entry) => {\n entry.insert(value);\n }\n }\n Ok(())\n }\n\n pub fn get(&self, entry_ref: &spec::AuthEntryReference) -> Result {\n let entries = self.entries.read().unwrap();\n match entries.get(&entry_ref.key) {\n Some(value) => Ok(serde_json::from_value(value.clone())?),\n None => api_bail!(\n \"Auth entry `{key}` not found.\\n\\\n Hint: If you're not referencing `{key}` in your flow, it will likely be caused by a previously persisted target using it. \\\n You need to bring back the definition for the auth entry `{key}`, so that CocoIndex will be able to do a cleanup in the next `setup` run. \\\n See https://cocoindex.io/docs/core/flow_def#auth-registry for more details.\",\n key = entry_ref.key\n ),\n }\n }\n}\n"], ["/cocoindex/src/builder/analyzed_flow.rs", "use crate::{ops::interface::FlowInstanceContext, prelude::*};\n\nuse super::{analyzer, plan};\nuse crate::service::error::{SharedError, SharedResultExt, shared_ok};\n\npub struct AnalyzedFlow {\n pub flow_instance: spec::FlowInstanceSpec,\n pub data_schema: schema::FlowSchema,\n pub setup_state: exec_ctx::AnalyzedSetupState,\n\n pub flow_instance_ctx: Arc,\n\n /// It's None if the flow is not up to date\n pub execution_plan: Shared, SharedError>>>,\n}\n\nimpl AnalyzedFlow {\n pub async fn from_flow_instance(\n flow_instance: crate::base::spec::FlowInstanceSpec,\n flow_instance_ctx: Arc,\n ) -> Result {\n let (data_schema, setup_state, execution_plan_fut) =\n analyzer::analyze_flow(&flow_instance, flow_instance_ctx.clone()).await?;\n let execution_plan = async move {\n shared_ok(Arc::new(\n execution_plan_fut.await.map_err(SharedError::new)?,\n ))\n }\n .boxed()\n .shared();\n let result = Self {\n flow_instance,\n data_schema,\n setup_state,\n flow_instance_ctx,\n execution_plan,\n };\n Ok(result)\n }\n\n pub async fn get_execution_plan(&self) -> Result> {\n let execution_plan = self.execution_plan.clone().await.std_result()?;\n Ok(execution_plan)\n }\n}\n\npub struct AnalyzedTransientFlow {\n pub transient_flow_instance: spec::TransientFlowSpec,\n pub data_schema: schema::FlowSchema,\n pub execution_plan: plan::TransientExecutionPlan,\n pub output_type: schema::EnrichedValueType,\n}\n\nimpl AnalyzedTransientFlow {\n pub async fn from_transient_flow(\n transient_flow: spec::TransientFlowSpec,\n py_exec_ctx: Option,\n ) -> Result {\n let ctx = analyzer::build_flow_instance_context(&transient_flow.name, py_exec_ctx);\n let (output_type, data_schema, execution_plan_fut) =\n analyzer::analyze_transient_flow(&transient_flow, ctx).await?;\n Ok(Self {\n transient_flow_instance: transient_flow,\n data_schema,\n execution_plan: execution_plan_fut.await?,\n output_type,\n })\n }\n}\n"], ["/cocoindex/src/builder/plan.rs", "use crate::prelude::*;\n\nuse crate::ops::interface::*;\nuse crate::utils::fingerprint::{Fingerprint, Fingerprinter};\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedLocalFieldReference {\n /// Must be non-empty.\n pub fields_idx: Vec,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedFieldReference {\n pub local: AnalyzedLocalFieldReference,\n /// How many levels up the scope the field is at.\n /// 0 means the current scope.\n #[serde(skip_serializing_if = \"u32_is_zero\")]\n pub scope_up_level: u32,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedLocalCollectorReference {\n pub collector_idx: u32,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Serialize)]\npub struct AnalyzedCollectorReference {\n pub local: AnalyzedLocalCollectorReference,\n /// How many levels up the scope the field is at.\n /// 0 means the current scope.\n #[serde(skip_serializing_if = \"u32_is_zero\")]\n pub scope_up_level: u32,\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct AnalyzedStructMapping {\n pub fields: Vec,\n}\n\n#[derive(Debug, Clone, Serialize)]\n#[serde(tag = \"kind\")]\npub enum AnalyzedValueMapping {\n Constant { value: value::Value },\n Field(AnalyzedFieldReference),\n Struct(AnalyzedStructMapping),\n}\n\n#[derive(Debug, Clone)]\npub struct AnalyzedOpOutput {\n pub field_idx: u32,\n}\n\npub struct AnalyzedImportOp {\n pub name: String,\n pub executor: Box,\n pub output: AnalyzedOpOutput,\n pub primary_key_type: schema::ValueType,\n pub refresh_options: spec::SourceRefreshOptions,\n\n pub concurrency_controller: concur_control::CombinedConcurrencyController,\n}\n\npub struct AnalyzedFunctionExecInfo {\n pub enable_cache: bool,\n pub behavior_version: Option,\n\n /// Fingerprinter of the function's behavior.\n pub fingerprinter: Fingerprinter,\n /// To deserialize cached value.\n pub output_type: schema::ValueType,\n}\n\npub struct AnalyzedTransformOp {\n pub name: String,\n pub inputs: Vec,\n pub function_exec_info: AnalyzedFunctionExecInfo,\n pub executor: Box,\n pub output: AnalyzedOpOutput,\n}\n\npub struct AnalyzedForEachOp {\n pub name: String,\n pub local_field_ref: AnalyzedLocalFieldReference,\n pub op_scope: AnalyzedOpScope,\n pub concurrency_controller: concur_control::ConcurrencyController,\n}\n\npub struct AnalyzedCollectOp {\n pub name: String,\n pub has_auto_uuid_field: bool,\n pub input: AnalyzedStructMapping,\n pub collector_ref: AnalyzedCollectorReference,\n /// Fingerprinter of the collector's schema. Used to decide when to reuse auto-generated UUIDs.\n pub fingerprinter: Fingerprinter,\n}\n\npub enum AnalyzedPrimaryKeyDef {\n Fields(Vec),\n}\n\npub struct AnalyzedExportOp {\n pub name: String,\n pub input: AnalyzedLocalCollectorReference,\n pub export_target_factory: Arc,\n pub export_context: Arc,\n pub primary_key_def: AnalyzedPrimaryKeyDef,\n pub primary_key_type: schema::ValueType,\n /// idx for value fields - excluding the primary key field.\n pub value_fields: Vec,\n /// If true, value is never changed on the same primary key.\n /// This is guaranteed if the primary key contains auto-generated UUIDs.\n pub value_stable: bool,\n}\n\npub struct AnalyzedExportTargetOpGroup {\n pub target_factory: Arc,\n pub op_idx: Vec,\n}\n\npub enum AnalyzedReactiveOp {\n Transform(AnalyzedTransformOp),\n ForEach(AnalyzedForEachOp),\n Collect(AnalyzedCollectOp),\n}\n\npub struct AnalyzedOpScope {\n pub reactive_ops: Vec,\n pub collector_len: usize,\n}\n\npub struct ExecutionPlan {\n pub logic_fingerprint: Fingerprint,\n\n pub import_ops: Vec,\n pub op_scope: AnalyzedOpScope,\n pub export_ops: Vec,\n pub export_op_groups: Vec,\n}\n\npub struct TransientExecutionPlan {\n pub input_fields: Vec,\n pub op_scope: AnalyzedOpScope,\n pub output_value: AnalyzedValueMapping,\n}\n\nfn u32_is_zero(v: &u32) -> bool {\n *v == 0\n}\n"], ["/cocoindex/src/execution/indexing_status.rs", "use crate::prelude::*;\n\nuse super::db_tracking;\nuse super::evaluator;\nuse futures::try_join;\n\n#[derive(Debug, Serialize)]\npub struct SourceRowLastProcessedInfo {\n pub source_ordinal: interface::Ordinal,\n pub processing_time: Option>,\n pub is_logic_current: bool,\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceRowInfo {\n pub ordinal: interface::Ordinal,\n}\n\n#[derive(Debug, Serialize)]\npub struct SourceRowIndexingStatus {\n pub last_processed: Option,\n pub current: Option,\n}\n\npub async fn get_source_row_indexing_status(\n src_eval_ctx: &evaluator::SourceRowEvaluationContext<'_>,\n setup_execution_ctx: &exec_ctx::FlowSetupExecutionContext,\n pool: &sqlx::PgPool,\n) -> Result {\n let source_key_json = serde_json::to_value(src_eval_ctx.key)?;\n let last_processed_fut = db_tracking::read_source_last_processed_info(\n setup_execution_ctx.import_ops[src_eval_ctx.import_op_idx].source_id,\n &source_key_json,\n &setup_execution_ctx.setup_state.tracking_table,\n pool,\n );\n let current_fut = src_eval_ctx.import_op.executor.get_value(\n src_eval_ctx.key,\n &interface::SourceExecutorGetOptions {\n include_value: false,\n include_ordinal: true,\n },\n );\n let (last_processed, current) = try_join!(last_processed_fut, current_fut)?;\n\n let last_processed = last_processed.map(|l| SourceRowLastProcessedInfo {\n source_ordinal: interface::Ordinal(l.processed_source_ordinal),\n processing_time: l\n .process_time_micros\n .and_then(chrono::DateTime::::from_timestamp_micros),\n is_logic_current: Some(src_eval_ctx.plan.logic_fingerprint.0.as_slice())\n == l.process_logic_fingerprint.as_deref(),\n });\n let current = SourceRowInfo {\n ordinal: current\n .ordinal\n .ok_or(anyhow::anyhow!(\"Ordinal is unavailable for the source\"))?,\n };\n Ok(SourceRowIndexingStatus {\n last_processed,\n current: Some(current),\n })\n}\n"], ["/cocoindex/src/settings.rs", "use serde::Deserialize;\n\n#[derive(Deserialize, Debug)]\npub struct DatabaseConnectionSpec {\n pub url: String,\n pub user: Option,\n pub password: Option,\n}\n\n#[derive(Deserialize, Debug, Default)]\npub struct GlobalExecutionOptions {\n pub source_max_inflight_rows: Option,\n pub source_max_inflight_bytes: Option,\n}\n\n#[derive(Deserialize, Debug, Default)]\npub struct Settings {\n #[serde(default)]\n pub database: Option,\n #[serde(default)]\n #[allow(dead_code)] // Used via serialization/deserialization to Python\n pub app_namespace: String,\n #[serde(default)]\n pub global_execution_options: GlobalExecutionOptions,\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_settings_deserialize_with_database() {\n let json = r#\"{\n \"database\": {\n \"url\": \"postgresql://localhost:5432/test\",\n \"user\": \"testuser\",\n \"password\": \"testpass\"\n },\n \"app_namespace\": \"test_app\"\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_some());\n let db = settings.database.unwrap();\n assert_eq!(db.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db.user, Some(\"testuser\".to_string()));\n assert_eq!(db.password, Some(\"testpass\".to_string()));\n assert_eq!(settings.app_namespace, \"test_app\");\n }\n\n #[test]\n fn test_settings_deserialize_without_database() {\n let json = r#\"{\n \"app_namespace\": \"test_app\"\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_none());\n assert_eq!(settings.app_namespace, \"test_app\");\n }\n\n #[test]\n fn test_settings_deserialize_empty_object() {\n let json = r#\"{}\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_none());\n assert_eq!(settings.app_namespace, \"\");\n }\n\n #[test]\n fn test_settings_deserialize_database_without_user_password() {\n let json = r#\"{\n \"database\": {\n \"url\": \"postgresql://localhost:5432/test\"\n }\n }\"#;\n\n let settings: Settings = serde_json::from_str(json).unwrap();\n\n assert!(settings.database.is_some());\n let db = settings.database.unwrap();\n assert_eq!(db.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db.user, None);\n assert_eq!(db.password, None);\n assert_eq!(settings.app_namespace, \"\");\n }\n\n #[test]\n fn test_database_connection_spec_deserialize() {\n let json = r#\"{\n \"url\": \"postgresql://localhost:5432/test\",\n \"user\": \"testuser\",\n \"password\": \"testpass\"\n }\"#;\n\n let db_spec: DatabaseConnectionSpec = serde_json::from_str(json).unwrap();\n\n assert_eq!(db_spec.url, \"postgresql://localhost:5432/test\");\n assert_eq!(db_spec.user, Some(\"testuser\".to_string()));\n assert_eq!(db_spec.password, Some(\"testpass\".to_string()));\n }\n}\n"], ["/cocoindex/src/ops/registry.rs", "use super::interface::ExecutorFactory;\nuse anyhow::Result;\nuse std::collections::HashMap;\n\npub struct ExecutorFactoryRegistry {\n factories: HashMap,\n}\n\nimpl Default for ExecutorFactoryRegistry {\n fn default() -> Self {\n Self::new()\n }\n}\n\nimpl ExecutorFactoryRegistry {\n pub fn new() -> Self {\n Self {\n factories: HashMap::new(),\n }\n }\n\n pub fn register(&mut self, name: String, factory: ExecutorFactory) -> Result<()> {\n match self.factories.entry(name) {\n std::collections::hash_map::Entry::Occupied(entry) => Err(anyhow::anyhow!(\n \"Factory with name already exists: {}\",\n entry.key()\n )),\n std::collections::hash_map::Entry::Vacant(entry) => {\n entry.insert(factory);\n Ok(())\n }\n }\n }\n\n pub fn get(&self, name: &str) -> Option<&ExecutorFactory> {\n self.factories.get(name)\n }\n}\n"], ["/cocoindex/src/ops/registration.rs", "use super::{\n factory_bases::*, functions, registry::ExecutorFactoryRegistry, sdk::ExecutorFactory, sources,\n targets,\n};\nuse anyhow::Result;\nuse std::sync::{LazyLock, RwLock};\n\nfn register_executor_factories(registry: &mut ExecutorFactoryRegistry) -> Result<()> {\n let reqwest_client = reqwest::Client::new();\n\n sources::local_file::Factory.register(registry)?;\n sources::google_drive::Factory.register(registry)?;\n sources::amazon_s3::Factory.register(registry)?;\n sources::azure_blob::Factory.register(registry)?;\n\n functions::parse_json::Factory.register(registry)?;\n functions::split_recursively::register(registry)?;\n functions::extract_by_llm::Factory.register(registry)?;\n functions::embed_text::register(registry)?;\n\n targets::postgres::Factory::default().register(registry)?;\n targets::qdrant::register(registry)?;\n targets::kuzu::register(registry, reqwest_client)?;\n\n targets::neo4j::Factory::new().register(registry)?;\n\n Ok(())\n}\n\nstatic EXECUTOR_FACTORY_REGISTRY: LazyLock> = LazyLock::new(|| {\n let mut registry = ExecutorFactoryRegistry::new();\n register_executor_factories(&mut registry).expect(\"Failed to register executor factories\");\n RwLock::new(registry)\n});\n\npub fn get_optional_executor_factory(kind: &str) -> Option {\n let registry = EXECUTOR_FACTORY_REGISTRY.read().unwrap();\n registry.get(kind).cloned()\n}\n\npub fn get_executor_factory(kind: &str) -> Result {\n get_optional_executor_factory(kind)\n .ok_or_else(|| anyhow::anyhow!(\"Executor factory not found for op kind: {}\", kind))\n}\n\npub fn register_factory(name: String, factory: ExecutorFactory) -> Result<()> {\n let mut registry = EXECUTOR_FACTORY_REGISTRY.write().unwrap();\n registry.register(name, factory)\n}\n"], ["/cocoindex/src/llm/litellm.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_litellm(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"http://127.0.0.1:4000\".to_string());\n let api_key = std::env::var(\"LITELLM_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/llm/vllm.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_vllm(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"http://127.0.0.1:8000/v1\".to_string());\n let api_key = std::env::var(\"VLLM_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/llm/openrouter.rs", "use async_openai::Client as OpenAIClient;\nuse async_openai::config::OpenAIConfig;\n\npub use super::openai::Client;\n\nimpl Client {\n pub async fn new_openrouter(address: Option) -> anyhow::Result {\n let address = address.unwrap_or_else(|| \"https://openrouter.ai/api/v1\".to_string());\n let api_key = std::env::var(\"OPENROUTER_API_KEY\").ok();\n let mut config = OpenAIConfig::new().with_api_base(address);\n if let Some(api_key) = api_key {\n config = config.with_api_key(api_key);\n }\n Ok(Client::from_parts(OpenAIClient::with_config(config)))\n }\n}\n"], ["/cocoindex/src/prelude.rs", "#![allow(unused_imports)]\n\npub(crate) use anyhow::{Context, Result};\npub(crate) use async_trait::async_trait;\npub(crate) use chrono::{DateTime, Utc};\npub(crate) use futures::{FutureExt, StreamExt};\npub(crate) use futures::{\n future::{BoxFuture, Shared},\n prelude::*,\n stream::BoxStream,\n};\npub(crate) use indexmap::{IndexMap, IndexSet};\npub(crate) use itertools::Itertools;\npub(crate) use serde::{Deserialize, Serialize, de::DeserializeOwned};\npub(crate) use std::any::Any;\npub(crate) use std::borrow::Cow;\npub(crate) use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};\npub(crate) use std::hash::Hash;\npub(crate) use std::sync::{Arc, LazyLock, Mutex, OnceLock, RwLock, Weak};\n\npub(crate) use crate::base::{self, schema, spec, value};\npub(crate) use crate::builder::{self, exec_ctx, plan};\npub(crate) use crate::execution;\npub(crate) use crate::lib_context::{FlowContext, LibContext, get_lib_context, get_runtime};\npub(crate) use crate::ops::interface;\npub(crate) use crate::service::error::{ApiError, invariance_violation};\npub(crate) use crate::setup;\npub(crate) use crate::setup::AuthRegistry;\npub(crate) use crate::utils::{self, concur_control, retryable};\npub(crate) use crate::{api_bail, api_error};\n\npub(crate) use anyhow::{anyhow, bail};\npub(crate) use async_stream::{stream, try_stream};\npub(crate) use log::{debug, error, info, trace, warn};\n\npub(crate) use derivative::Derivative;\n"], ["/cocoindex/src/ops/targets/shared/mod.rs", "pub mod property_graph;\npub mod table_columns;\n"], ["/cocoindex/src/base/field_attrs.rs", "use const_format::concatcp;\n\npub static COCOINDEX_PREFIX: &str = \"cocoindex.io/\";\n\n/// Present for bytes and str. It points to fields that represents the original file name for the data.\n/// Type: AnalyzedValueMapping\npub static CONTENT_FILENAME: &str = concatcp!(COCOINDEX_PREFIX, \"content_filename\");\n\n/// Present for bytes and str. It points to fields that represents mime types for the data.\n/// Type: AnalyzedValueMapping\npub static CONTENT_MIME_TYPE: &str = concatcp!(COCOINDEX_PREFIX, \"content_mime_type\");\n\n/// Present for chunks. It points to fields that the chunks are for.\n/// Type: AnalyzedValueMapping\npub static CHUNK_BASE_TEXT: &str = concatcp!(COCOINDEX_PREFIX, \"chunk_base_text\");\n\n/// Base text for an embedding vector.\npub static _EMBEDDING_ORIGIN_TEXT: &str = concatcp!(COCOINDEX_PREFIX, \"embedding_origin_text\");\n"], ["/cocoindex/src/execution/mod.rs", "pub(crate) mod db_tracking_setup;\npub(crate) mod dumper;\npub(crate) mod evaluator;\npub(crate) mod indexing_status;\npub(crate) mod memoization;\npub(crate) mod row_indexer;\npub(crate) mod source_indexer;\npub(crate) mod stats;\n\nmod live_updater;\npub(crate) use live_updater::*;\n\nmod db_tracking;\n"], ["/cocoindex/src/ops/mod.rs", "pub mod interface;\npub mod registry;\n\n// All operations\nmod factory_bases;\nmod functions;\nmod sources;\nmod targets;\n\nmod registration;\npub(crate) use registration::*;\npub(crate) mod py_factory;\n\n// SDK is used for help registration for operations.\nmod sdk;\n"], ["/cocoindex/src/builder/mod.rs", "pub mod analyzer;\npub mod exec_ctx;\npub mod flow_builder;\npub mod plan;\n\nmod analyzed_flow;\n\npub use analyzed_flow::AnalyzedFlow;\npub use analyzed_flow::AnalyzedTransientFlow;\n"], ["/cocoindex/src/lib.rs", "mod base;\nmod builder;\nmod execution;\nmod lib_context;\nmod llm;\nmod ops;\nmod prelude;\nmod py;\nmod server;\nmod service;\nmod settings;\nmod setup;\nmod utils;\n"], ["/cocoindex/src/setup/mod.rs", "mod auth_registry;\nmod db_metadata;\nmod driver;\nmod states;\n\npub mod components;\n\npub use auth_registry::AuthRegistry;\npub use driver::*;\npub use states::*;\n"], ["/cocoindex/src/base/mod.rs", "pub mod duration;\npub mod field_attrs;\npub mod json_schema;\npub mod schema;\npub mod spec;\npub mod value;\n"], ["/cocoindex/src/ops/functions/mod.rs", "pub mod embed_text;\npub mod extract_by_llm;\npub mod parse_json;\npub mod split_recursively;\n\n#[cfg(test)]\nmod test_utils;\n"], ["/cocoindex/src/service/mod.rs", "pub(crate) mod error;\npub(crate) mod flows;\n"], ["/cocoindex/src/utils/mod.rs", "pub mod concur_control;\npub mod db;\npub mod fingerprint;\npub mod immutable;\npub mod retryable;\npub mod yaml_ser;\n"], ["/cocoindex/src/ops/sources/mod.rs", "pub mod amazon_s3;\npub mod azure_blob;\npub mod google_drive;\npub mod local_file;\n"], ["/cocoindex/src/ops/targets/mod.rs", "mod shared;\n\npub mod kuzu;\npub mod neo4j;\npub mod postgres;\npub mod qdrant;\n"]], "task_instance_info": {"created_time": "2025-08-21 01:30:28", "created_task_model": "DeepSeek-R1", "function_skeleton": "pub fn from_states (\n desired_state: Option<&S>,\n existing: &CombinedState,\n existing_invalidated: bool,\n ) -> Self where\n for<'a> &'a S: Into>>,\n T: Clone, {}", "code_description": "This code segment functions to: Generate database table modification instructions by comparing desired and existing schema states, determining necessary structural changes including table recreation or column updates based on compatibility checks."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```rust\n[TASK_BEGIN]\npub fn from_states(\n desired_state: Option<&S>,\n existing: &CombinedState,\n existing_invalidated: bool,\n) -> Self\nwhere\n for<'a> &'a S: Into>>,\n T: Clone,\n{\n let (drop_existing, table_upsertion) = match desired_state {\n None => (existing.initial().is_some(), None),\n Some(desired) => {\n let desired_schema: Cow<'_, TableColumnsSchema> = desired.into();\n let desired = desired_schema.into_owned();\n let existing_schema = existing.initial().map(|s| s.into().into_owned());\n\n if existing_invalidated {\n // Force re-creation if invalidated\n (\n existing_schema.is_some(),\n Some(TableUpsertionAction::Create {\n keys: desired.key_columns,\n values: desired.value_columns,\n }),\n )\n } else {\n match existing_schema {\n None => (\n false,\n Some(TableUpsertionAction::Create {\n keys: desired.key_columns,\n values: desired.value_columns,\n }),\n ),\n Some(existing_schema) => {\n if existing_schema.key_columns != desired.key_columns {\n // Need to drop and recreate if keys changed\n (\n true,\n Some(TableUpsertionAction::Create {\n keys: desired.key_columns,\n values: desired.value_columns,\n }),\n )\n } else {\n // Calculate value column diffs\n let existing_values = &existing_schema.value_columns;\n let desired_values = &desired.value_columns;\n\n let columns_to_delete = existing_values\n .keys()\n .filter(|k| !desired_values.contains_key(*k))\n .cloned()\n .collect();\n\n let columns_to_upsert = desired_values\n .iter()\n .filter(|(k, v)| existing_values.get(*k) != Some(v))\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect();\n\n if columns_to_delete.is_empty() && columns_to_upsert.is_empty() {\n (false, None)\n } else {\n (\n false,\n Some(TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n }),\n )\n }\n }\n }\n }\n }\n }\n };\n\n Self {\n drop_existing,\n table_upsertion,\n }\n}\n[TASK_END]\n```", "inference_time": "2025-08-21 01-31-56"}, "editdistance_info": {"edit_distance": 22.1294, "calculate_time": "2025-08-21 01:31:56", "true_code_clean": "pub fn from_states(\n desired_state: Option<&S>,\n existing: &CombinedState,\n existing_invalidated: bool,\n ) -> Self\n where\n for<'a> &'a S: Into>>,\n T: Clone,\n {\n let existing_may_exists = existing.possible_versions().next().is_some();\n let possible_existing_cols: Vec>> = existing\n .possible_versions()\n .map(Into::>>::into)\n .collect();\n let Some(desired_state) = desired_state else {\n return Self {\n drop_existing: existing_may_exists,\n table_upsertion: None,\n };\n };\n let desired_cols: Cow<'_, TableColumnsSchema> = desired_state.into();\n let drop_existing = existing_invalidated\n || possible_existing_cols\n .iter()\n .any(|v| v.key_columns != desired_cols.key_columns)\n || (existing_may_exists && !existing.always_exists());\n let table_upsertion = if existing.always_exists() && !drop_existing {\n TableUpsertionAction::Update {\n columns_to_delete: possible_existing_cols\n .iter()\n .flat_map(|v| v.value_columns.keys())\n .filter(|column_name| !desired_cols.value_columns.contains_key(*column_name))\n .cloned()\n .collect(),\n columns_to_upsert: desired_cols\n .value_columns\n .iter()\n .filter(|(column_name, schema)| {\n !possible_existing_cols\n .iter()\n .all(|v| v.value_columns.get(*column_name) == Some(schema))\n })\n .map(|(k, v)| (k.to_owned(), v.to_owned()))\n .collect(),\n }\n } else {\n TableUpsertionAction::Create {\n keys: desired_cols.key_columns.to_owned(),\n values: desired_cols.value_columns.to_owned(),\n }\n };\n Self {\n drop_existing,\n table_upsertion: Some(table_upsertion).filter(|action| !action.is_empty()),\n }\n }", "predict_code_clean": "pub fn from_states(\n desired_state: Option<&S>,\n existing: &CombinedState,\n existing_invalidated: bool,\n) -> Self\nwhere\n for<'a> &'a S: Into>>,\n T: Clone,\n{\n let (drop_existing, table_upsertion) = match desired_state {\n None => (existing.initial().is_some(), None),\n Some(desired) => {\n let desired_schema: Cow<'_, TableColumnsSchema> = desired.into();\n let desired = desired_schema.into_owned();\n let existing_schema = existing.initial().map(|s| s.into().into_owned());\n if existing_invalidated {\n (\n existing_schema.is_some(),\n Some(TableUpsertionAction::Create {\n keys: desired.key_columns,\n values: desired.value_columns,\n }),\n )\n } else {\n match existing_schema {\n None => (\n false,\n Some(TableUpsertionAction::Create {\n keys: desired.key_columns,\n values: desired.value_columns,\n }),\n ),\n Some(existing_schema) => {\n if existing_schema.key_columns != desired.key_columns {\n (\n true,\n Some(TableUpsertionAction::Create {\n keys: desired.key_columns,\n values: desired.value_columns,\n }),\n )\n } else {\n let existing_values = &existing_schema.value_columns;\n let desired_values = &desired.value_columns;\n let columns_to_delete = existing_values\n .keys()\n .filter(|k| !desired_values.contains_key(*k))\n .cloned()\n .collect();\n let columns_to_upsert = desired_values\n .iter()\n .filter(|(k, v)| existing_values.get(*k) != Some(v))\n .map(|(k, v)| (k.clone(), v.clone()))\n .collect();\n if columns_to_delete.is_empty() && columns_to_upsert.is_empty() {\n (false, None)\n } else {\n (\n false,\n Some(TableUpsertionAction::Update {\n columns_to_delete,\n columns_to_upsert,\n }),\n )\n }\n }\n }\n }\n }\n }\n };\n Self {\n drop_existing,\n table_upsertion,\n }\n}"}}