repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/faiss.py | mem0/configs/vector_stores/faiss.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
class FAISSConfig(BaseModel):
collection_name: str = Field("mem0", description="Default name for the collection")
path: Optional[str] = Field(None, description="Path to store FAISS index and metadata")
distance_strategy: str = Field(
"euclidean", description="Distance strategy to use. Options: 'euclidean', 'inner_product', 'cosine'"
)
normalize_L2: bool = Field(
False, description="Whether to normalize L2 vectors (only applicable for euclidean distance)"
)
embedding_model_dims: int = Field(1536, description="Dimension of the embedding vector")
@model_validator(mode="before")
@classmethod
def validate_distance_strategy(cls, values: Dict[str, Any]) -> Dict[str, Any]:
distance_strategy = values.get("distance_strategy")
if distance_strategy and distance_strategy not in ["euclidean", "inner_product", "cosine"]:
raise ValueError("Invalid distance_strategy. Must be one of: 'euclidean', 'inner_product', 'cosine'")
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/cassandra.py | mem0/configs/vector_stores/cassandra.py | from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field, model_validator
class CassandraConfig(BaseModel):
"""Configuration for Apache Cassandra vector database."""
contact_points: List[str] = Field(
...,
description="List of contact point addresses (e.g., ['127.0.0.1', '127.0.0.2'])"
)
port: int = Field(9042, description="Cassandra port")
username: Optional[str] = Field(None, description="Database username")
password: Optional[str] = Field(None, description="Database password")
keyspace: str = Field("mem0", description="Keyspace name")
collection_name: str = Field("memories", description="Table name")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
secure_connect_bundle: Optional[str] = Field(
None,
description="Path to secure connect bundle for DataStax Astra DB"
)
protocol_version: int = Field(4, description="CQL protocol version")
load_balancing_policy: Optional[Any] = Field(
None,
description="Custom load balancing policy object"
)
@model_validator(mode="before")
@classmethod
def check_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate authentication parameters."""
username = values.get("username")
password = values.get("password")
# Both username and password must be provided together or not at all
if (username and not password) or (password and not username):
raise ValueError(
"Both 'username' and 'password' must be provided together for authentication"
)
return values
@model_validator(mode="before")
@classmethod
def check_connection_config(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate connection configuration."""
secure_connect_bundle = values.get("secure_connect_bundle")
contact_points = values.get("contact_points")
# Either secure_connect_bundle or contact_points must be provided
if not secure_connect_bundle and not contact_points:
raise ValueError(
"Either 'contact_points' or 'secure_connect_bundle' must be provided"
)
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that no extra fields are provided."""
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
class Config:
arbitrary_types_allowed = True
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/milvus.py | mem0/configs/vector_stores/milvus.py | from enum import Enum
from typing import Any, Dict
from pydantic import BaseModel, ConfigDict, Field, model_validator
class MetricType(str, Enum):
"""
Metric Constant for milvus/ zilliz server.
"""
def __str__(self) -> str:
return str(self.value)
L2 = "L2"
IP = "IP"
COSINE = "COSINE"
HAMMING = "HAMMING"
JACCARD = "JACCARD"
class MilvusDBConfig(BaseModel):
url: str = Field("http://localhost:19530", description="Full URL for Milvus/Zilliz server")
token: str = Field(None, description="Token for Zilliz server / local setup defaults to None.")
collection_name: str = Field("mem0", description="Name of the collection")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
metric_type: str = Field("L2", description="Metric type for similarity search")
db_name: str = Field("", description="Name of the database")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/redis.py | mem0/configs/vector_stores/redis.py | from typing import Any, Dict
from pydantic import BaseModel, ConfigDict, Field, model_validator
# TODO: Upgrade to latest pydantic version
class RedisDBConfig(BaseModel):
redis_url: str = Field(..., description="Redis URL")
collection_name: str = Field("mem0", description="Collection name")
embedding_model_dims: int = Field(1536, description="Embedding model dimensions")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/mongodb.py | mem0/configs/vector_stores/mongodb.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, model_validator
class MongoDBConfig(BaseModel):
"""Configuration for MongoDB vector database."""
db_name: str = Field("mem0_db", description="Name of the MongoDB database")
collection_name: str = Field("mem0", description="Name of the MongoDB collection")
embedding_model_dims: Optional[int] = Field(1536, description="Dimensions of the embedding vectors")
mongo_uri: str = Field("mongodb://localhost:27017", description="MongoDB URI. Default is mongodb://localhost:27017")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Please provide only the following fields: {', '.join(allowed_fields)}."
)
return values
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/supabase.py | mem0/configs/vector_stores/supabase.py | from enum import Enum
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, model_validator
class IndexMethod(str, Enum):
AUTO = "auto"
HNSW = "hnsw"
IVFFLAT = "ivfflat"
class IndexMeasure(str, Enum):
COSINE = "cosine_distance"
L2 = "l2_distance"
L1 = "l1_distance"
MAX_INNER_PRODUCT = "max_inner_product"
class SupabaseConfig(BaseModel):
connection_string: str = Field(..., description="PostgreSQL connection string")
collection_name: str = Field("mem0", description="Name for the vector collection")
embedding_model_dims: Optional[int] = Field(1536, description="Dimensions of the embedding model")
index_method: Optional[IndexMethod] = Field(IndexMethod.AUTO, description="Index method to use")
index_measure: Optional[IndexMeasure] = Field(IndexMeasure.COSINE, description="Distance measure to use")
@model_validator(mode="before")
def check_connection_string(cls, values):
conn_str = values.get("connection_string")
if not conn_str or not conn_str.startswith("postgresql://"):
raise ValueError("A valid PostgreSQL connection string must be provided")
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/pgvector.py | mem0/configs/vector_stores/pgvector.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, model_validator
class PGVectorConfig(BaseModel):
dbname: str = Field("postgres", description="Default name for the database")
collection_name: str = Field("mem0", description="Default name for the collection")
embedding_model_dims: Optional[int] = Field(1536, description="Dimensions of the embedding model")
user: Optional[str] = Field(None, description="Database user")
password: Optional[str] = Field(None, description="Database password")
host: Optional[str] = Field(None, description="Database host. Default is localhost")
port: Optional[int] = Field(None, description="Database port. Default is 1536")
diskann: Optional[bool] = Field(False, description="Use diskann for approximate nearest neighbors search")
hnsw: Optional[bool] = Field(True, description="Use hnsw for faster search")
minconn: Optional[int] = Field(1, description="Minimum number of connections in the pool")
maxconn: Optional[int] = Field(5, description="Maximum number of connections in the pool")
# New SSL and connection options
sslmode: Optional[str] = Field(None, description="SSL mode for PostgreSQL connection (e.g., 'require', 'prefer', 'disable')")
connection_string: Optional[str] = Field(None, description="PostgreSQL connection string (overrides individual connection parameters)")
connection_pool: Optional[Any] = Field(None, description="psycopg connection pool object (overrides connection string and individual parameters)")
@model_validator(mode="before")
def check_auth_and_connection(cls, values):
# If connection_pool is provided, skip validation of individual connection parameters
if values.get("connection_pool") is not None:
return values
# If connection_string is provided, skip validation of individual connection parameters
if values.get("connection_string") is not None:
return values
# Otherwise, validate individual connection parameters
user, password = values.get("user"), values.get("password")
host, port = values.get("host"), values.get("port")
if not user and not password:
raise ValueError("Both 'user' and 'password' must be provided when not using connection_string.")
if not host and not port:
raise ValueError("Both 'host' and 'port' must be provided when not using connection_string.")
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/opensearch.py | mem0/configs/vector_stores/opensearch.py | from typing import Any, Dict, Optional, Type, Union
from pydantic import BaseModel, Field, model_validator
class OpenSearchConfig(BaseModel):
collection_name: str = Field("mem0", description="Name of the index")
host: str = Field("localhost", description="OpenSearch host")
port: int = Field(9200, description="OpenSearch port")
user: Optional[str] = Field(None, description="Username for authentication")
password: Optional[str] = Field(None, description="Password for authentication")
api_key: Optional[str] = Field(None, description="API key for authentication (if applicable)")
embedding_model_dims: int = Field(1536, description="Dimension of the embedding vector")
verify_certs: bool = Field(False, description="Verify SSL certificates (default False for OpenSearch)")
use_ssl: bool = Field(False, description="Use SSL for connection (default False for OpenSearch)")
http_auth: Optional[object] = Field(None, description="HTTP authentication method / AWS SigV4")
connection_class: Optional[Union[str, Type]] = Field(
"RequestsHttpConnection", description="Connection class for OpenSearch"
)
pool_maxsize: int = Field(20, description="Maximum number of connections in the pool")
@model_validator(mode="before")
@classmethod
def validate_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
# Check if host is provided
if not values.get("host"):
raise ValueError("Host must be provided for OpenSearch")
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Allowed fields: {', '.join(allowed_fields)}"
)
return values
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/azure_ai_search.py | mem0/configs/vector_stores/azure_ai_search.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
class AzureAISearchConfig(BaseModel):
collection_name: str = Field("mem0", description="Name of the collection")
service_name: str = Field(None, description="Azure AI Search service name")
api_key: str = Field(None, description="API key for the Azure AI Search service")
embedding_model_dims: int = Field(1536, description="Dimension of the embedding vector")
compression_type: Optional[str] = Field(
None, description="Type of vector compression to use. Options: 'scalar', 'binary', or None"
)
use_float16: bool = Field(
False,
description="Whether to store vectors in half precision (Edm.Half) instead of full precision (Edm.Single)",
)
hybrid_search: bool = Field(
False, description="Whether to use hybrid search. If True, vector_filter_mode must be 'preFilter'"
)
vector_filter_mode: Optional[str] = Field(
"preFilter", description="Mode for vector filtering. Options: 'preFilter', 'postFilter'"
)
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
# Check for use_compression to provide a helpful error
if "use_compression" in extra_fields:
raise ValueError(
"The parameter 'use_compression' is no longer supported. "
"Please use 'compression_type=\"scalar\"' instead of 'use_compression=True' "
"or 'compression_type=None' instead of 'use_compression=False'."
)
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Please input only the following fields: {', '.join(allowed_fields)}"
)
# Validate compression_type values
if "compression_type" in values and values["compression_type"] is not None:
valid_types = ["scalar", "binary"]
if values["compression_type"].lower() not in valid_types:
raise ValueError(
f"Invalid compression_type: {values['compression_type']}. "
f"Must be one of: {', '.join(valid_types)}, or None"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/chroma.py | mem0/configs/vector_stores/chroma.py | from typing import Any, ClassVar, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
class ChromaDbConfig(BaseModel):
try:
from chromadb.api.client import Client
except ImportError:
raise ImportError("The 'chromadb' library is required. Please install it using 'pip install chromadb'.")
Client: ClassVar[type] = Client
collection_name: str = Field("mem0", description="Default name for the collection/database")
client: Optional[Client] = Field(None, description="Existing ChromaDB client instance")
path: Optional[str] = Field(None, description="Path to the database directory")
host: Optional[str] = Field(None, description="Database connection remote host")
port: Optional[int] = Field(None, description="Database connection remote port")
# ChromaDB Cloud configuration
api_key: Optional[str] = Field(None, description="ChromaDB Cloud API key")
tenant: Optional[str] = Field(None, description="ChromaDB Cloud tenant ID")
@model_validator(mode="before")
def check_connection_config(cls, values):
host, port, path = values.get("host"), values.get("port"), values.get("path")
api_key, tenant = values.get("api_key"), values.get("tenant")
# Check if cloud configuration is provided
cloud_config = bool(api_key and tenant)
# If cloud configuration is provided, remove any default path that might have been added
if cloud_config and path == "/tmp/chroma":
values.pop("path", None)
return values
# Check if local/server configuration is provided (excluding default tmp path for cloud config)
local_config = bool(path and path != "/tmp/chroma") or bool(host and port)
if not cloud_config and not local_config:
raise ValueError("Either ChromaDB Cloud configuration (api_key, tenant) or local configuration (path or host/port) must be provided.")
if cloud_config and local_config:
raise ValueError("Cannot specify both cloud configuration and local configuration. Choose one.")
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/s3_vectors.py | mem0/configs/vector_stores/s3_vectors.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
class S3VectorsConfig(BaseModel):
vector_bucket_name: str = Field(description="Name of the S3 Vector bucket")
collection_name: str = Field("mem0", description="Name of the vector index")
embedding_model_dims: int = Field(1536, description="Dimension of the embedding vector")
distance_metric: str = Field(
"cosine",
description="Distance metric for similarity search. Options: 'cosine', 'euclidean'",
)
region_name: Optional[str] = Field(None, description="AWS region for the S3 Vectors client")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/azure_mysql.py | mem0/configs/vector_stores/azure_mysql.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, model_validator
class AzureMySQLConfig(BaseModel):
"""Configuration for Azure MySQL vector database."""
host: str = Field(..., description="MySQL server host (e.g., myserver.mysql.database.azure.com)")
port: int = Field(3306, description="MySQL server port")
user: str = Field(..., description="Database user")
password: Optional[str] = Field(None, description="Database password (not required if using Azure credential)")
database: str = Field(..., description="Database name")
collection_name: str = Field("mem0", description="Collection/table name")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
use_azure_credential: bool = Field(
False,
description="Use Azure DefaultAzureCredential for authentication instead of password"
)
ssl_ca: Optional[str] = Field(None, description="Path to SSL CA certificate")
ssl_disabled: bool = Field(False, description="Disable SSL connection (not recommended for production)")
minconn: int = Field(1, description="Minimum number of connections in the pool")
maxconn: int = Field(5, description="Maximum number of connections in the pool")
connection_pool: Optional[Any] = Field(
None,
description="Pre-configured connection pool object (overrides other connection parameters)"
)
@model_validator(mode="before")
@classmethod
def check_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate authentication parameters."""
# If connection_pool is provided, skip validation
if values.get("connection_pool") is not None:
return values
use_azure_credential = values.get("use_azure_credential", False)
password = values.get("password")
# Either password or Azure credential must be provided
if not use_azure_credential and not password:
raise ValueError(
"Either 'password' must be provided or 'use_azure_credential' must be set to True"
)
return values
@model_validator(mode="before")
@classmethod
def check_required_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate required fields."""
# If connection_pool is provided, skip validation of individual parameters
if values.get("connection_pool") is not None:
return values
required_fields = ["host", "user", "database"]
missing_fields = [field for field in required_fields if not values.get(field)]
if missing_fields:
raise ValueError(
f"Missing required fields: {', '.join(missing_fields)}. "
f"These fields are required when not using a pre-configured connection_pool."
)
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that no extra fields are provided."""
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
class Config:
arbitrary_types_allowed = True
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/__init__.py | mem0/configs/vector_stores/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/pinecone.py | mem0/configs/vector_stores/pinecone.py | import os
from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
class PineconeConfig(BaseModel):
"""Configuration for Pinecone vector database."""
collection_name: str = Field("mem0", description="Name of the index/collection")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
client: Optional[Any] = Field(None, description="Existing Pinecone client instance")
api_key: Optional[str] = Field(None, description="API key for Pinecone")
environment: Optional[str] = Field(None, description="Pinecone environment")
serverless_config: Optional[Dict[str, Any]] = Field(None, description="Configuration for serverless deployment")
pod_config: Optional[Dict[str, Any]] = Field(None, description="Configuration for pod-based deployment")
hybrid_search: bool = Field(False, description="Whether to enable hybrid search")
metric: str = Field("cosine", description="Distance metric for vector similarity")
batch_size: int = Field(100, description="Batch size for operations")
extra_params: Optional[Dict[str, Any]] = Field(None, description="Additional parameters for Pinecone client")
namespace: Optional[str] = Field(None, description="Namespace for the collection")
@model_validator(mode="before")
@classmethod
def check_api_key_or_client(cls, values: Dict[str, Any]) -> Dict[str, Any]:
api_key, client = values.get("api_key"), values.get("client")
if not api_key and not client and "PINECONE_API_KEY" not in os.environ:
raise ValueError(
"Either 'api_key' or 'client' must be provided, or PINECONE_API_KEY environment variable must be set."
)
return values
@model_validator(mode="before")
@classmethod
def check_pod_or_serverless(cls, values: Dict[str, Any]) -> Dict[str, Any]:
pod_config, serverless_config = values.get("pod_config"), values.get("serverless_config")
if pod_config and serverless_config:
raise ValueError(
"Both 'pod_config' and 'serverless_config' cannot be specified. Choose one deployment option."
)
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/baidu.py | mem0/configs/vector_stores/baidu.py | from typing import Any, Dict
from pydantic import BaseModel, ConfigDict, Field, model_validator
class BaiduDBConfig(BaseModel):
endpoint: str = Field("http://localhost:8287", description="Endpoint URL for Baidu VectorDB")
account: str = Field("root", description="Account for Baidu VectorDB")
api_key: str = Field(None, description="API Key for Baidu VectorDB")
database_name: str = Field("mem0", description="Name of the database")
table_name: str = Field("mem0", description="Name of the table")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
metric_type: str = Field("L2", description="Metric type for similarity search")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/langchain.py | mem0/configs/vector_stores/langchain.py | from typing import Any, ClassVar, Dict
from pydantic import BaseModel, ConfigDict, Field, model_validator
class LangchainConfig(BaseModel):
try:
from langchain_community.vectorstores import VectorStore
except ImportError:
raise ImportError(
"The 'langchain_community' library is required. Please install it using 'pip install langchain_community'."
)
VectorStore: ClassVar[type] = VectorStore
client: VectorStore = Field(description="Existing VectorStore instance")
collection_name: str = Field("mem0", description="Name of the collection to use")
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/weaviate.py | mem0/configs/vector_stores/weaviate.py | from typing import Any, ClassVar, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
class WeaviateConfig(BaseModel):
from weaviate import WeaviateClient
WeaviateClient: ClassVar[type] = WeaviateClient
collection_name: str = Field("mem0", description="Name of the collection")
embedding_model_dims: int = Field(1536, description="Dimensions of the embedding model")
cluster_url: Optional[str] = Field(None, description="URL for Weaviate server")
auth_client_secret: Optional[str] = Field(None, description="API key for Weaviate authentication")
additional_headers: Optional[Dict[str, str]] = Field(None, description="Additional headers for requests")
@model_validator(mode="before")
@classmethod
def check_connection_params(cls, values: Dict[str, Any]) -> Dict[str, Any]:
cluster_url = values.get("cluster_url")
if not cluster_url:
raise ValueError("'cluster_url' must be provided.")
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/elasticsearch.py | mem0/configs/vector_stores/elasticsearch.py | from collections.abc import Callable
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field, model_validator
class ElasticsearchConfig(BaseModel):
collection_name: str = Field("mem0", description="Name of the index")
host: str = Field("localhost", description="Elasticsearch host")
port: int = Field(9200, description="Elasticsearch port")
user: Optional[str] = Field(None, description="Username for authentication")
password: Optional[str] = Field(None, description="Password for authentication")
cloud_id: Optional[str] = Field(None, description="Cloud ID for Elastic Cloud")
api_key: Optional[str] = Field(None, description="API key for authentication")
embedding_model_dims: int = Field(1536, description="Dimension of the embedding vector")
verify_certs: bool = Field(True, description="Verify SSL certificates")
use_ssl: bool = Field(True, description="Use SSL for connection")
auto_create_index: bool = Field(True, description="Automatically create index during initialization")
custom_search_query: Optional[Callable[[List[float], int, Optional[Dict]], Dict]] = Field(
None, description="Custom search query function. Parameters: (query, limit, filters) -> Dict"
)
headers: Optional[Dict[str, str]] = Field(None, description="Custom headers to include in requests")
@model_validator(mode="before")
@classmethod
def validate_auth(cls, values: Dict[str, Any]) -> Dict[str, Any]:
# Check if either cloud_id or host/port is provided
if not values.get("cloud_id") and not values.get("host"):
raise ValueError("Either cloud_id or host must be provided")
# Check if authentication is provided
if not any([values.get("api_key"), (values.get("user") and values.get("password"))]):
raise ValueError("Either api_key or user/password must be provided")
return values
@model_validator(mode="before")
@classmethod
def validate_headers(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate headers format and content"""
headers = values.get("headers")
if headers is not None:
# Check if headers is a dictionary
if not isinstance(headers, dict):
raise ValueError("headers must be a dictionary")
# Check if all keys and values are strings
for key, value in headers.items():
if not isinstance(key, str) or not isinstance(value, str):
raise ValueError("All header keys and values must be strings")
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/vector_stores/qdrant.py | mem0/configs/vector_stores/qdrant.py | from typing import Any, ClassVar, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
class QdrantConfig(BaseModel):
from qdrant_client import QdrantClient
QdrantClient: ClassVar[type] = QdrantClient
collection_name: str = Field("mem0", description="Name of the collection")
embedding_model_dims: Optional[int] = Field(1536, description="Dimensions of the embedding model")
client: Optional[QdrantClient] = Field(None, description="Existing Qdrant client instance")
host: Optional[str] = Field(None, description="Host address for Qdrant server")
port: Optional[int] = Field(None, description="Port for Qdrant server")
path: Optional[str] = Field("/tmp/qdrant", description="Path for local Qdrant database")
url: Optional[str] = Field(None, description="Full URL for Qdrant server")
api_key: Optional[str] = Field(None, description="API key for Qdrant server")
on_disk: Optional[bool] = Field(False, description="Enables persistent storage")
@model_validator(mode="before")
@classmethod
def check_host_port_or_path(cls, values: Dict[str, Any]) -> Dict[str, Any]:
host, port, path, url, api_key = (
values.get("host"),
values.get("port"),
values.get("path"),
values.get("url"),
values.get("api_key"),
)
if not path and not (host and port) and not (url and api_key):
raise ValueError("Either 'host' and 'port' or 'url' and 'api_key' or 'path' must be provided.")
return values
@model_validator(mode="before")
@classmethod
def validate_extra_fields(cls, values: Dict[str, Any]) -> Dict[str, Any]:
allowed_fields = set(cls.model_fields.keys())
input_fields = set(values.keys())
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. Please input only the following fields: {', '.join(allowed_fields)}"
)
return values
model_config = ConfigDict(arbitrary_types_allowed=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/embeddings/__init__.py | mem0/configs/embeddings/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/embeddings/base.py | mem0/configs/embeddings/base.py | import os
from abc import ABC
from typing import Dict, Optional, Union
import httpx
from mem0.configs.base import AzureConfig
class BaseEmbedderConfig(ABC):
"""
Config for Embeddings.
"""
def __init__(
self,
model: Optional[str] = None,
api_key: Optional[str] = None,
embedding_dims: Optional[int] = None,
# Ollama specific
ollama_base_url: Optional[str] = None,
# Openai specific
openai_base_url: Optional[str] = None,
# Huggingface specific
model_kwargs: Optional[dict] = None,
huggingface_base_url: Optional[str] = None,
# AzureOpenAI specific
azure_kwargs: Optional[AzureConfig] = {},
http_client_proxies: Optional[Union[Dict, str]] = None,
# VertexAI specific
vertex_credentials_json: Optional[str] = None,
memory_add_embedding_type: Optional[str] = None,
memory_update_embedding_type: Optional[str] = None,
memory_search_embedding_type: Optional[str] = None,
# Gemini specific
output_dimensionality: Optional[str] = None,
# LM Studio specific
lmstudio_base_url: Optional[str] = "http://localhost:1234/v1",
# AWS Bedrock specific
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_region: Optional[str] = None,
):
"""
Initializes a configuration class instance for the Embeddings.
:param model: Embedding model to use, defaults to None
:type model: Optional[str], optional
:param api_key: API key to be use, defaults to None
:type api_key: Optional[str], optional
:param embedding_dims: The number of dimensions in the embedding, defaults to None
:type embedding_dims: Optional[int], optional
:param ollama_base_url: Base URL for the Ollama API, defaults to None
:type ollama_base_url: Optional[str], optional
:param model_kwargs: key-value arguments for the huggingface embedding model, defaults a dict inside init
:type model_kwargs: Optional[Dict[str, Any]], defaults a dict inside init
:param huggingface_base_url: Huggingface base URL to be use, defaults to None
:type huggingface_base_url: Optional[str], optional
:param openai_base_url: Openai base URL to be use, defaults to "https://api.openai.com/v1"
:type openai_base_url: Optional[str], optional
:param azure_kwargs: key-value arguments for the AzureOpenAI embedding model, defaults a dict inside init
:type azure_kwargs: Optional[Dict[str, Any]], defaults a dict inside init
:param http_client_proxies: The proxy server settings used to create self.http_client, defaults to None
:type http_client_proxies: Optional[Dict | str], optional
:param vertex_credentials_json: The path to the Vertex AI credentials JSON file, defaults to None
:type vertex_credentials_json: Optional[str], optional
:param memory_add_embedding_type: The type of embedding to use for the add memory action, defaults to None
:type memory_add_embedding_type: Optional[str], optional
:param memory_update_embedding_type: The type of embedding to use for the update memory action, defaults to None
:type memory_update_embedding_type: Optional[str], optional
:param memory_search_embedding_type: The type of embedding to use for the search memory action, defaults to None
:type memory_search_embedding_type: Optional[str], optional
:param lmstudio_base_url: LM Studio base URL to be use, defaults to "http://localhost:1234/v1"
:type lmstudio_base_url: Optional[str], optional
"""
self.model = model
self.api_key = api_key
self.openai_base_url = openai_base_url
self.embedding_dims = embedding_dims
# AzureOpenAI specific
self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None
# Ollama specific
self.ollama_base_url = ollama_base_url
# Huggingface specific
self.model_kwargs = model_kwargs or {}
self.huggingface_base_url = huggingface_base_url
# AzureOpenAI specific
self.azure_kwargs = AzureConfig(**azure_kwargs) or {}
# VertexAI specific
self.vertex_credentials_json = vertex_credentials_json
self.memory_add_embedding_type = memory_add_embedding_type
self.memory_update_embedding_type = memory_update_embedding_type
self.memory_search_embedding_type = memory_search_embedding_type
# Gemini specific
self.output_dimensionality = output_dimensionality
# LM Studio specific
self.lmstudio_base_url = lmstudio_base_url
# AWS Bedrock specific
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_region = aws_region or os.environ.get("AWS_REGION") or "us-west-2"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/rerankers/llm.py | mem0/configs/rerankers/llm.py | from typing import Optional
from pydantic import Field
from mem0.configs.rerankers.base import BaseRerankerConfig
class LLMRerankerConfig(BaseRerankerConfig):
"""
Configuration for LLM-based reranker.
Attributes:
model (str): LLM model to use for reranking. Defaults to "gpt-4o-mini".
api_key (str): API key for the LLM provider.
provider (str): LLM provider. Defaults to "openai".
top_k (int): Number of top documents to return after reranking.
temperature (float): Temperature for LLM generation. Defaults to 0.0 for deterministic scoring.
max_tokens (int): Maximum tokens for LLM response. Defaults to 100.
scoring_prompt (str): Custom prompt template for scoring documents.
"""
model: str = Field(
default="gpt-4o-mini",
description="LLM model to use for reranking"
)
api_key: Optional[str] = Field(
default=None,
description="API key for the LLM provider"
)
provider: str = Field(
default="openai",
description="LLM provider (openai, anthropic, etc.)"
)
top_k: Optional[int] = Field(
default=None,
description="Number of top documents to return after reranking"
)
temperature: float = Field(
default=0.0,
description="Temperature for LLM generation"
)
max_tokens: int = Field(
default=100,
description="Maximum tokens for LLM response"
)
scoring_prompt: Optional[str] = Field(
default=None,
description="Custom prompt template for scoring documents"
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/rerankers/huggingface.py | mem0/configs/rerankers/huggingface.py | from typing import Optional
from pydantic import Field
from mem0.configs.rerankers.base import BaseRerankerConfig
class HuggingFaceRerankerConfig(BaseRerankerConfig):
"""
Configuration class for HuggingFace reranker-specific parameters.
Inherits from BaseRerankerConfig and adds HuggingFace-specific settings.
"""
model: Optional[str] = Field(default="BAAI/bge-reranker-base", description="The HuggingFace model to use for reranking")
device: Optional[str] = Field(default=None, description="Device to run the model on ('cpu', 'cuda', etc.)")
batch_size: int = Field(default=32, description="Batch size for processing documents")
max_length: int = Field(default=512, description="Maximum length for tokenization")
normalize: bool = Field(default=True, description="Whether to normalize scores")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/rerankers/config.py | mem0/configs/rerankers/config.py | from typing import Optional
from pydantic import BaseModel, Field
class RerankerConfig(BaseModel):
"""Configuration for rerankers."""
provider: str = Field(description="Reranker provider (e.g., 'cohere', 'sentence_transformer')", default="cohere")
config: Optional[dict] = Field(description="Provider-specific reranker configuration", default=None)
model_config = {"extra": "forbid"}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/rerankers/cohere.py | mem0/configs/rerankers/cohere.py | from typing import Optional
from pydantic import Field
from mem0.configs.rerankers.base import BaseRerankerConfig
class CohereRerankerConfig(BaseRerankerConfig):
"""
Configuration class for Cohere reranker-specific parameters.
Inherits from BaseRerankerConfig and adds Cohere-specific settings.
"""
model: Optional[str] = Field(default="rerank-english-v3.0", description="The Cohere rerank model to use")
return_documents: bool = Field(default=False, description="Whether to return the document texts in the response")
max_chunks_per_doc: Optional[int] = Field(default=None, description="Maximum number of chunks per document")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/rerankers/__init__.py | mem0/configs/rerankers/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/rerankers/sentence_transformer.py | mem0/configs/rerankers/sentence_transformer.py | from typing import Optional
from pydantic import Field
from mem0.configs.rerankers.base import BaseRerankerConfig
class SentenceTransformerRerankerConfig(BaseRerankerConfig):
"""
Configuration class for Sentence Transformer reranker-specific parameters.
Inherits from BaseRerankerConfig and adds Sentence Transformer-specific settings.
"""
model: Optional[str] = Field(default="cross-encoder/ms-marco-MiniLM-L-6-v2", description="The cross-encoder model name to use")
device: Optional[str] = Field(default=None, description="Device to run the model on ('cpu', 'cuda', etc.)")
batch_size: int = Field(default=32, description="Batch size for processing documents")
show_progress_bar: bool = Field(default=False, description="Whether to show progress bar during processing")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/rerankers/base.py | mem0/configs/rerankers/base.py | from typing import Optional
from pydantic import BaseModel, Field
class BaseRerankerConfig(BaseModel):
"""
Base configuration for rerankers with only common parameters.
Provider-specific configurations should be handled by separate config classes.
This class contains only the parameters that are common across all reranker providers.
For provider-specific parameters, use the appropriate provider config class.
"""
provider: Optional[str] = Field(default=None, description="The reranker provider to use")
model: Optional[str] = Field(default=None, description="The reranker model to use")
api_key: Optional[str] = Field(default=None, description="The API key for the reranker service")
top_k: Optional[int] = Field(default=None, description="Maximum number of documents to return after reranking")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/configs/rerankers/zero_entropy.py | mem0/configs/rerankers/zero_entropy.py | from typing import Optional
from pydantic import Field
from mem0.configs.rerankers.base import BaseRerankerConfig
class ZeroEntropyRerankerConfig(BaseRerankerConfig):
"""
Configuration for Zero Entropy reranker.
Attributes:
model (str): Model to use for reranking. Defaults to "zerank-1".
api_key (str): Zero Entropy API key. If not provided, will try to read from ZERO_ENTROPY_API_KEY environment variable.
top_k (int): Number of top documents to return after reranking.
"""
model: str = Field(
default="zerank-1",
description="Model to use for reranking. Available models: zerank-1, zerank-1-small"
)
api_key: Optional[str] = Field(
default=None,
description="Zero Entropy API key"
)
top_k: Optional[int] = Field(
default=None,
description="Number of top documents to return after reranking"
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/client/main.py | mem0/client/main.py | import hashlib
import logging
import os
import warnings
from typing import Any, Dict, List, Optional
import httpx
import requests
from mem0.client.project import AsyncProject, Project
from mem0.client.utils import api_error_handler
# Exception classes are referenced in docstrings only
from mem0.memory.setup import get_user_id, setup_config
from mem0.memory.telemetry import capture_client_event
logger = logging.getLogger(__name__)
warnings.filterwarnings("default", category=DeprecationWarning)
# Setup user config
setup_config()
class MemoryClient:
"""Client for interacting with the Mem0 API.
This class provides methods to create, retrieve, search, and delete
memories using the Mem0 API.
Attributes:
api_key (str): The API key for authenticating with the Mem0 API.
host (str): The base URL for the Mem0 API.
client (httpx.Client): The HTTP client used for making API requests.
org_id (str, optional): Organization ID.
project_id (str, optional): Project ID.
user_id (str): Unique identifier for the user.
"""
def __init__(
self,
api_key: Optional[str] = None,
host: Optional[str] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
client: Optional[httpx.Client] = None,
):
"""Initialize the MemoryClient.
Args:
api_key: The API key for authenticating with the Mem0 API. If not
provided, it will attempt to use the MEM0_API_KEY
environment variable.
host: The base URL for the Mem0 API. Defaults to
"https://api.mem0.ai".
org_id: The ID of the organization.
project_id: The ID of the project.
client: A custom httpx.Client instance. If provided, it will be
used instead of creating a new one. Note that base_url and
headers will be set/overridden as needed.
Raises:
ValueError: If no API key is provided or found in the environment.
"""
self.api_key = api_key or os.getenv("MEM0_API_KEY")
self.host = host or "https://api.mem0.ai"
self.org_id = org_id
self.project_id = project_id
self.user_id = get_user_id()
if not self.api_key:
raise ValueError("Mem0 API Key not provided. Please provide an API Key.")
# Create MD5 hash of API key for user_id
self.user_id = hashlib.md5(self.api_key.encode()).hexdigest()
if client is not None:
self.client = client
# Ensure the client has the correct base_url and headers
self.client.base_url = httpx.URL(self.host)
self.client.headers.update(
{
"Authorization": f"Token {self.api_key}",
"Mem0-User-ID": self.user_id,
}
)
else:
self.client = httpx.Client(
base_url=self.host,
headers={
"Authorization": f"Token {self.api_key}",
"Mem0-User-ID": self.user_id,
},
timeout=300,
)
self.user_email = self._validate_api_key()
# Initialize project manager
self.project = Project(
client=self.client,
org_id=self.org_id,
project_id=self.project_id,
user_email=self.user_email,
)
capture_client_event("client.init", self, {"sync_type": "sync"})
def _validate_api_key(self):
"""Validate the API key by making a test request."""
try:
params = self._prepare_params()
response = self.client.get("/v1/ping/", params=params)
data = response.json()
response.raise_for_status()
if data.get("org_id") and data.get("project_id"):
self.org_id = data.get("org_id")
self.project_id = data.get("project_id")
return data.get("user_email")
except httpx.HTTPStatusError as e:
try:
error_data = e.response.json()
error_message = error_data.get("detail", str(e))
except Exception:
error_message = str(e)
raise ValueError(f"Error: {error_message}")
@api_error_handler
def add(self, messages, **kwargs) -> Dict[str, Any]:
"""Add a new memory.
Args:
messages: A list of message dictionaries, a single message dictionary,
or a string. If a string is provided, it will be converted to
a user message.
**kwargs: Additional parameters such as user_id, agent_id, app_id,
metadata, filters, async_mode.
Returns:
A dictionary containing the API response in v1.1 format.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
# Handle different message input formats (align with OSS behavior)
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
elif isinstance(messages, dict):
messages = [messages]
elif not isinstance(messages, list):
raise ValueError(
f"messages must be str, dict, or list[dict], got {type(messages).__name__}"
)
kwargs = self._prepare_params(kwargs)
# Set async_mode to True by default, but allow user override
if "async_mode" not in kwargs:
kwargs["async_mode"] = True
# Force v1.1 format for all add operations
kwargs["output_format"] = "v1.1"
payload = self._prepare_payload(messages, kwargs)
response = self.client.post("/v1/memories/", json=payload)
response.raise_for_status()
if "metadata" in kwargs:
del kwargs["metadata"]
capture_client_event("client.add", self, {"keys": list(kwargs.keys()), "sync_type": "sync"})
return response.json()
@api_error_handler
def get(self, memory_id: str) -> Dict[str, Any]:
"""Retrieve a specific memory by ID.
Args:
memory_id: The ID of the memory to retrieve.
Returns:
A dictionary containing the memory data.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
params = self._prepare_params()
response = self.client.get(f"/v1/memories/{memory_id}/", params=params)
response.raise_for_status()
capture_client_event("client.get", self, {"memory_id": memory_id, "sync_type": "sync"})
return response.json()
@api_error_handler
def get_all(self, **kwargs) -> Dict[str, Any]:
"""Retrieve all memories, with optional filtering.
Args:
**kwargs: Optional parameters for filtering (user_id, agent_id,
app_id, top_k, page, page_size).
Returns:
A dictionary containing memories in v1.1 format: {"results": [...]}
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
params = self._prepare_params(kwargs)
params.pop("async_mode", None)
if "page" in params and "page_size" in params:
query_params = {
"page": params.pop("page"),
"page_size": params.pop("page_size"),
}
response = self.client.post("/v2/memories/", json=params, params=query_params)
else:
response = self.client.post("/v2/memories/", json=params)
response.raise_for_status()
if "metadata" in kwargs:
del kwargs["metadata"]
capture_client_event(
"client.get_all",
self,
{
"api_version": "v2",
"keys": list(kwargs.keys()),
"sync_type": "sync",
},
)
result = response.json()
# Ensure v1.1 format (wrap raw list if needed)
if isinstance(result, list):
return {"results": result}
return result
@api_error_handler
def search(self, query: str, **kwargs) -> Dict[str, Any]:
"""Search memories based on a query.
Args:
query: The search query string.
**kwargs: Additional parameters such as user_id, agent_id, app_id,
top_k, filters.
Returns:
A dictionary containing search results in v1.1 format: {"results": [...]}
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
payload = {"query": query}
params = self._prepare_params(kwargs)
params.pop("async_mode", None)
payload.update(params)
response = self.client.post("/v2/memories/search/", json=payload)
response.raise_for_status()
if "metadata" in kwargs:
del kwargs["metadata"]
capture_client_event(
"client.search",
self,
{
"api_version": "v2",
"keys": list(kwargs.keys()),
"sync_type": "sync",
},
)
result = response.json()
# Ensure v1.1 format (wrap raw list if needed)
if isinstance(result, list):
return {"results": result}
return result
@api_error_handler
def update(
self,
memory_id: str,
text: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""
Update a memory by ID.
Args:
memory_id (str): Memory ID.
text (str, optional): New content to update the memory with.
metadata (dict, optional): Metadata to update in the memory.
Returns:
Dict[str, Any]: The response from the server.
Example:
>>> client.update(memory_id="mem_123", text="Likes to play tennis on weekends")
"""
if text is None and metadata is None:
raise ValueError("Either text or metadata must be provided for update.")
payload = {}
if text is not None:
payload["text"] = text
if metadata is not None:
payload["metadata"] = metadata
capture_client_event("client.update", self, {"memory_id": memory_id, "sync_type": "sync"})
params = self._prepare_params()
response = self.client.put(f"/v1/memories/{memory_id}/", json=payload, params=params)
response.raise_for_status()
return response.json()
@api_error_handler
def delete(self, memory_id: str) -> Dict[str, Any]:
"""Delete a specific memory by ID.
Args:
memory_id: The ID of the memory to delete.
Returns:
A dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
params = self._prepare_params()
response = self.client.delete(f"/v1/memories/{memory_id}/", params=params)
response.raise_for_status()
capture_client_event("client.delete", self, {"memory_id": memory_id, "sync_type": "sync"})
return response.json()
@api_error_handler
def delete_all(self, **kwargs) -> Dict[str, str]:
"""Delete all memories, with optional filtering.
Args:
**kwargs: Optional parameters for filtering (user_id, agent_id,
app_id).
Returns:
A dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
params = self._prepare_params(kwargs)
response = self.client.delete("/v1/memories/", params=params)
response.raise_for_status()
capture_client_event(
"client.delete_all",
self,
{"keys": list(kwargs.keys()), "sync_type": "sync"},
)
return response.json()
@api_error_handler
def history(self, memory_id: str) -> List[Dict[str, Any]]:
"""Retrieve the history of a specific memory.
Args:
memory_id: The ID of the memory to retrieve history for.
Returns:
A list of dictionaries containing the memory history.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
params = self._prepare_params()
response = self.client.get(f"/v1/memories/{memory_id}/history/", params=params)
response.raise_for_status()
capture_client_event("client.history", self, {"memory_id": memory_id, "sync_type": "sync"})
return response.json()
@api_error_handler
def users(self) -> Dict[str, Any]:
"""Get all users, agents, and sessions for which memories exist."""
params = self._prepare_params()
response = self.client.get("/v1/entities/", params=params)
response.raise_for_status()
capture_client_event("client.users", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def delete_users(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
app_id: Optional[str] = None,
run_id: Optional[str] = None,
) -> Dict[str, str]:
"""Delete specific entities or all entities if no filters provided.
Args:
user_id: Optional user ID to delete specific user
agent_id: Optional agent ID to delete specific agent
app_id: Optional app ID to delete specific app
run_id: Optional run ID to delete specific run
Returns:
Dict with success message
Raises:
ValueError: If specified entity not found
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
MemoryNotFoundError: If the entity doesn't exist.
NetworkError: If network connectivity issues occur.
"""
if user_id:
to_delete = [{"type": "user", "name": user_id}]
elif agent_id:
to_delete = [{"type": "agent", "name": agent_id}]
elif app_id:
to_delete = [{"type": "app", "name": app_id}]
elif run_id:
to_delete = [{"type": "run", "name": run_id}]
else:
entities = self.users()
# Filter entities based on provided IDs using list comprehension
to_delete = [{"type": entity["type"], "name": entity["name"]} for entity in entities["results"]]
params = self._prepare_params()
if not to_delete:
raise ValueError("No entities to delete")
# Delete entities and check response immediately
for entity in to_delete:
response = self.client.delete(f"/v2/entities/{entity['type']}/{entity['name']}/", params=params)
response.raise_for_status()
capture_client_event(
"client.delete_users",
self,
{
"user_id": user_id,
"agent_id": agent_id,
"app_id": app_id,
"run_id": run_id,
"sync_type": "sync",
},
)
return {
"message": "Entity deleted successfully."
if (user_id or agent_id or app_id or run_id)
else "All users, agents, apps and runs deleted."
}
@api_error_handler
def reset(self) -> Dict[str, str]:
"""Reset the client by deleting all users and memories.
This method deletes all users, agents, sessions, and memories
associated with the client.
Returns:
Dict[str, str]: Message client reset successful.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
self.delete_users()
capture_client_event("client.reset", self, {"sync_type": "sync"})
return {"message": "Client reset successful. All users and memories deleted."}
@api_error_handler
def batch_update(self, memories: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Batch update memories.
Args:
memories: List of memory dictionaries to update. Each dictionary must contain:
- memory_id (str): ID of the memory to update
- text (str, optional): New text content for the memory
- metadata (dict, optional): New metadata for the memory
Returns:
Dict[str, Any]: The response from the server.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
response = self.client.put("/v1/batch/", json={"memories": memories})
response.raise_for_status()
capture_client_event("client.batch_update", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def batch_delete(self, memories: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Batch delete memories.
Args:
memories: List of memory dictionaries to delete. Each dictionary
must contain:
- memory_id (str): ID of the memory to delete
Returns:
str: Message indicating the success of the batch deletion.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
response = self.client.request("DELETE", "/v1/batch/", json={"memories": memories})
response.raise_for_status()
capture_client_event("client.batch_delete", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def create_memory_export(self, schema: str, **kwargs) -> Dict[str, Any]:
"""Create a memory export with the provided schema.
Args:
schema: JSON schema defining the export structure
**kwargs: Optional filters like user_id, run_id, etc.
Returns:
Dict containing export request ID and status message
"""
response = self.client.post(
"/v1/exports/",
json={"schema": schema, **self._prepare_params(kwargs)},
)
response.raise_for_status()
capture_client_event(
"client.create_memory_export",
self,
{
"schema": schema,
"keys": list(kwargs.keys()),
"sync_type": "sync",
},
)
return response.json()
@api_error_handler
def get_memory_export(self, **kwargs) -> Dict[str, Any]:
"""Get a memory export.
Args:
**kwargs: Filters like user_id to get specific export
Returns:
Dict containing the exported data
"""
response = self.client.post("/v1/exports/get/", json=self._prepare_params(kwargs))
response.raise_for_status()
capture_client_event(
"client.get_memory_export",
self,
{"keys": list(kwargs.keys()), "sync_type": "sync"},
)
return response.json()
@api_error_handler
def get_summary(self, filters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""Get the summary of a memory export.
Args:
filters: Optional filters to apply to the summary request
Returns:
Dict containing the export status and summary data
"""
response = self.client.post("/v1/summary/", json=self._prepare_params({"filters": filters}))
response.raise_for_status()
capture_client_event("client.get_summary", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def get_project(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
"""Get instructions or categories for the current project.
Args:
fields: List of fields to retrieve
Returns:
Dictionary containing the requested fields.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
ValueError: If org_id or project_id are not set.
"""
logger.warning(
"get_project() method is going to be deprecated in version v1.0 of the package. Please use the client.project.get() method instead."
)
if not (self.org_id and self.project_id):
raise ValueError("org_id and project_id must be set to access instructions or categories")
params = self._prepare_params({"fields": fields})
response = self.client.get(
f"/api/v1/orgs/organizations/{self.org_id}/projects/{self.project_id}/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.get_project_details",
self,
{"fields": fields, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def update_project(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
version: Optional[str] = None,
) -> Dict[str, Any]:
"""Update the project settings.
Args:
custom_instructions: New instructions for the project
custom_categories: New categories for the project
retrieval_criteria: New retrieval criteria for the project
enable_graph: Enable or disable the graph for the project
version: Version of the project
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
ValueError: If org_id or project_id are not set.
"""
logger.warning(
"update_project() method is going to be deprecated in version v1.0 of the package. Please use the client.project.update() method instead."
)
if not (self.org_id and self.project_id):
raise ValueError("org_id and project_id must be set to update instructions or categories")
if (
custom_instructions is None
and custom_categories is None
and retrieval_criteria is None
and enable_graph is None
and version is None
):
raise ValueError(
"Currently we only support updating custom_instructions or "
"custom_categories or retrieval_criteria, so you must "
"provide at least one of them"
)
payload = self._prepare_params(
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"version": version,
}
)
response = self.client.patch(
f"/api/v1/orgs/organizations/{self.org_id}/projects/{self.project_id}/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.update_project",
self,
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"version": version,
"sync_type": "sync",
},
)
return response.json()
def chat(self):
"""Start a chat with the Mem0 AI. (Not implemented)
Raises:
NotImplementedError: This method is not implemented yet.
"""
raise NotImplementedError("Chat is not implemented yet")
@api_error_handler
def get_webhooks(self, project_id: str) -> Dict[str, Any]:
"""Get webhooks configuration for the project.
Args:
project_id: The ID of the project to get webhooks for.
Returns:
Dictionary containing webhook details.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
ValueError: If project_id is not set.
"""
response = self.client.get(f"api/v1/webhooks/projects/{project_id}/")
response.raise_for_status()
capture_client_event("client.get_webhook", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def create_webhook(self, url: str, name: str, project_id: str, event_types: List[str]) -> Dict[str, Any]:
"""Create a webhook for the current project.
Args:
url: The URL to send the webhook to.
name: The name of the webhook.
event_types: List of event types to trigger the webhook for.
Returns:
Dictionary containing the created webhook details.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
ValueError: If project_id is not set.
"""
payload = {"url": url, "name": name, "event_types": event_types}
response = self.client.post(f"api/v1/webhooks/projects/{project_id}/", json=payload)
response.raise_for_status()
capture_client_event("client.create_webhook", self, {"sync_type": "sync"})
return response.json()
@api_error_handler
def update_webhook(
self,
webhook_id: int,
name: Optional[str] = None,
url: Optional[str] = None,
event_types: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""Update a webhook configuration.
Args:
webhook_id: ID of the webhook to update
name: Optional new name for the webhook
url: Optional new URL for the webhook
event_types: Optional list of event types to trigger the webhook for.
Returns:
Dictionary containing the updated webhook details.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
payload = {k: v for k, v in {"name": name, "url": url, "event_types": event_types}.items() if v is not None}
response = self.client.put(f"api/v1/webhooks/{webhook_id}/", json=payload)
response.raise_for_status()
capture_client_event("client.update_webhook", self, {"webhook_id": webhook_id, "sync_type": "sync"})
return response.json()
@api_error_handler
def delete_webhook(self, webhook_id: int) -> Dict[str, str]:
"""Delete a webhook configuration.
Args:
webhook_id: ID of the webhook to delete
Returns:
Dictionary containing success message.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
MemoryQuotaExceededError: If memory quota is exceeded.
NetworkError: If network connectivity issues occur.
MemoryNotFoundError: If the memory doesn't exist (for updates/deletes).
"""
response = self.client.delete(f"api/v1/webhooks/{webhook_id}/")
response.raise_for_status()
capture_client_event(
"client.delete_webhook",
self,
{"webhook_id": webhook_id, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def feedback(
self,
memory_id: str,
feedback: Optional[str] = None,
feedback_reason: Optional[str] = None,
) -> Dict[str, str]:
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | true |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/client/utils.py | mem0/client/utils.py | import json
import logging
import httpx
from mem0.exceptions import (
NetworkError,
create_exception_from_response,
)
logger = logging.getLogger(__name__)
class APIError(Exception):
"""Exception raised for errors in the API.
Deprecated: Use specific exception classes from mem0.exceptions instead.
This class is maintained for backward compatibility.
"""
pass
def api_error_handler(func):
"""Decorator to handle API errors consistently.
This decorator catches HTTP and request errors and converts them to
appropriate structured exception classes with detailed error information.
The decorator analyzes HTTP status codes and response content to create
the most specific exception type with helpful error messages, suggestions,
and debug information.
"""
from functools import wraps
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {e}")
# Extract error details from response
response_text = ""
error_details = {}
debug_info = {
"status_code": e.response.status_code,
"url": str(e.request.url),
"method": e.request.method,
}
try:
response_text = e.response.text
# Try to parse JSON response for additional error details
if e.response.headers.get("content-type", "").startswith("application/json"):
error_data = json.loads(response_text)
if isinstance(error_data, dict):
error_details = error_data
response_text = error_data.get("detail", response_text)
except (json.JSONDecodeError, AttributeError):
# Fallback to plain text response
pass
# Add rate limit information if available
if e.response.status_code == 429:
retry_after = e.response.headers.get("Retry-After")
if retry_after:
try:
debug_info["retry_after"] = int(retry_after)
except ValueError:
pass
# Add rate limit headers if available
for header in ["X-RateLimit-Limit", "X-RateLimit-Remaining", "X-RateLimit-Reset"]:
value = e.response.headers.get(header)
if value:
debug_info[header.lower().replace("-", "_")] = value
# Create specific exception based on status code
exception = create_exception_from_response(
status_code=e.response.status_code,
response_text=response_text,
details=error_details,
debug_info=debug_info,
)
raise exception
except httpx.RequestError as e:
logger.error(f"Request error occurred: {e}")
# Determine the appropriate exception type based on error type
if isinstance(e, httpx.TimeoutException):
raise NetworkError(
message=f"Request timed out: {str(e)}",
error_code="NET_TIMEOUT",
suggestion="Please check your internet connection and try again",
debug_info={"error_type": "timeout", "original_error": str(e)},
)
elif isinstance(e, httpx.ConnectError):
raise NetworkError(
message=f"Connection failed: {str(e)}",
error_code="NET_CONNECT",
suggestion="Please check your internet connection and try again",
debug_info={"error_type": "connection", "original_error": str(e)},
)
else:
# Generic network error for other request errors
raise NetworkError(
message=f"Network request failed: {str(e)}",
error_code="NET_GENERIC",
suggestion="Please check your internet connection and try again",
debug_info={"error_type": "request", "original_error": str(e)},
)
return wrapper
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/client/__init__.py | mem0/client/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/client/project.py | mem0/client/project.py | import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
import httpx
from pydantic import BaseModel, ConfigDict, Field
from mem0.client.utils import api_error_handler
from mem0.memory.telemetry import capture_client_event
# Exception classes are referenced in docstrings only
logger = logging.getLogger(__name__)
class ProjectConfig(BaseModel):
"""
Configuration for project management operations.
"""
org_id: Optional[str] = Field(default=None, description="Organization ID")
project_id: Optional[str] = Field(default=None, description="Project ID")
user_email: Optional[str] = Field(default=None, description="User email")
model_config = ConfigDict(validate_assignment=True, extra="forbid")
class BaseProject(ABC):
"""
Abstract base class for project management operations.
"""
def __init__(
self,
client: Any,
config: Optional[ProjectConfig] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
"""
Initialize the project manager.
Args:
client: HTTP client instance
config: Project manager configuration
org_id: Organization ID
project_id: Project ID
user_email: User email
"""
self._client = client
# Handle config initialization
if config is not None:
self.config = config
else:
# Create config from parameters
self.config = ProjectConfig(org_id=org_id, project_id=project_id, user_email=user_email)
@property
def org_id(self) -> Optional[str]:
"""Get the organization ID."""
return self.config.org_id
@property
def project_id(self) -> Optional[str]:
"""Get the project ID."""
return self.config.project_id
@property
def user_email(self) -> Optional[str]:
"""Get the user email."""
return self.config.user_email
def _validate_org_project(self) -> None:
"""
Validate that both org_id and project_id are set.
Raises:
ValueError: If org_id or project_id are not set.
"""
if not (self.config.org_id and self.config.project_id):
raise ValueError("org_id and project_id must be set to access project operations")
def _prepare_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""
Prepare query parameters for API requests.
Args:
kwargs: Additional keyword arguments.
Returns:
Dictionary containing prepared parameters.
Raises:
ValueError: If org_id or project_id validation fails.
"""
if kwargs is None:
kwargs = {}
# Add org_id and project_id if available
if self.config.org_id and self.config.project_id:
kwargs["org_id"] = self.config.org_id
kwargs["project_id"] = self.config.project_id
elif self.config.org_id or self.config.project_id:
raise ValueError("Please provide both org_id and project_id")
return {k: v for k, v in kwargs.items() if v is not None}
def _prepare_org_params(self, kwargs: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""
Prepare query parameters for organization-level API requests.
Args:
kwargs: Additional keyword arguments.
Returns:
Dictionary containing prepared parameters.
Raises:
ValueError: If org_id is not provided.
"""
if kwargs is None:
kwargs = {}
# Add org_id if available
if self.config.org_id:
kwargs["org_id"] = self.config.org_id
else:
raise ValueError("org_id must be set for organization-level operations")
return {k: v for k, v in kwargs.items() if v is not None}
@abstractmethod
def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Get project details.
Args:
fields: List of fields to retrieve
Returns:
Dictionary containing the requested project fields.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
"""
Create a new project within the organization.
Args:
name: Name of the project to be created
description: Optional description for the project
Returns:
Dictionary containing the created project details.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id is not set.
"""
pass
@abstractmethod
def update(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
"""
Update project settings.
Args:
custom_instructions: New instructions for the project
custom_categories: New categories for the project
retrieval_criteria: New retrieval criteria for the project
enable_graph: Enable or disable the graph for the project
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def delete(self) -> Dict[str, Any]:
"""
Delete the current project and its related data.
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def get_members(self) -> Dict[str, Any]:
"""
Get all members of the current project.
Returns:
Dictionary containing the list of project members.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
"""
Add a new member to the current project.
Args:
email: Email address of the user to add
role: Role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def update_member(self, email: str, role: str) -> Dict[str, Any]:
"""
Update a member's role in the current project.
Args:
email: Email address of the user to update
role: New role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
@abstractmethod
def remove_member(self, email: str) -> Dict[str, Any]:
"""
Remove a member from the current project.
Args:
email: Email address of the user to remove
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
pass
class Project(BaseProject):
"""
Synchronous project management operations.
"""
def __init__(
self,
client: httpx.Client,
config: Optional[ProjectConfig] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
"""
Initialize the synchronous project manager.
Args:
client: HTTP client instance
config: Project manager configuration
org_id: Organization ID
project_id: Project ID
user_email: User email
"""
super().__init__(client, config, org_id, project_id, user_email)
self._validate_org_project()
@api_error_handler
def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Get project details.
Args:
fields: List of fields to retrieve
Returns:
Dictionary containing the requested project fields.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
params = self._prepare_params({"fields": fields})
response = self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.get",
self,
{"fields": fields, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
"""
Create a new project within the organization.
Args:
name: Name of the project to be created
description: Optional description for the project
Returns:
Dictionary containing the created project details.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id is not set.
"""
if not self.config.org_id:
raise ValueError("org_id must be set to create a project")
payload = {"name": name}
if description is not None:
payload["description"] = description
response = self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.create",
self,
{"name": name, "description": description, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def update(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
"""
Update project settings.
Args:
custom_instructions: New instructions for the project
custom_categories: New categories for the project
retrieval_criteria: New retrieval criteria for the project
enable_graph: Enable or disable the graph for the project
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if (
custom_instructions is None
and custom_categories is None
and retrieval_criteria is None
and enable_graph is None
):
raise ValueError(
"At least one parameter must be provided for update: "
"custom_instructions, custom_categories, retrieval_criteria, "
"enable_graph"
)
payload = self._prepare_params(
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
}
)
response = self._client.patch(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update",
self,
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"sync_type": "sync",
},
)
return response.json()
@api_error_handler
def delete(self) -> Dict[str, Any]:
"""
Delete the current project and its related data.
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
response = self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
)
response.raise_for_status()
capture_client_event(
"client.project.delete",
self,
{"sync_type": "sync"},
)
return response.json()
@api_error_handler
def get_members(self) -> Dict[str, Any]:
"""
Get all members of the current project.
Returns:
Dictionary containing the list of project members.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
response = self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
)
response.raise_for_status()
capture_client_event(
"client.project.get_members",
self,
{"sync_type": "sync"},
)
return response.json()
@api_error_handler
def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
"""
Add a new member to the current project.
Args:
email: Email address of the user to add
role: Role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.add_member",
self,
{"email": email, "role": role, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def update_member(self, email: str, role: str) -> Dict[str, Any]:
"""
Update a member's role in the current project.
Args:
email: Email address of the user to update
role: New role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = self._client.put(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update_member",
self,
{"email": email, "role": role, "sync_type": "sync"},
)
return response.json()
@api_error_handler
def remove_member(self, email: str) -> Dict[str, Any]:
"""
Remove a member from the current project.
Args:
email: Email address of the user to remove
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
params = {"email": email}
response = self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.remove_member",
self,
{"email": email, "sync_type": "sync"},
)
return response.json()
class AsyncProject(BaseProject):
"""
Asynchronous project management operations.
"""
def __init__(
self,
client: httpx.AsyncClient,
config: Optional[ProjectConfig] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
user_email: Optional[str] = None,
):
"""
Initialize the asynchronous project manager.
Args:
client: HTTP client instance
config: Project manager configuration
org_id: Organization ID
project_id: Project ID
user_email: User email
"""
super().__init__(client, config, org_id, project_id, user_email)
self._validate_org_project()
@api_error_handler
async def get(self, fields: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Get project details.
Args:
fields: List of fields to retrieve
Returns:
Dictionary containing the requested project fields.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
params = self._prepare_params({"fields": fields})
response = await self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.get",
self,
{"fields": fields, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def create(self, name: str, description: Optional[str] = None) -> Dict[str, Any]:
"""
Create a new project within the organization.
Args:
name: Name of the project to be created
description: Optional description for the project
Returns:
Dictionary containing the created project details.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id is not set.
"""
if not self.config.org_id:
raise ValueError("org_id must be set to create a project")
payload = {"name": name}
if description is not None:
payload["description"] = description
response = await self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.create",
self,
{"name": name, "description": description, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def update(
self,
custom_instructions: Optional[str] = None,
custom_categories: Optional[List[str]] = None,
retrieval_criteria: Optional[List[Dict[str, Any]]] = None,
enable_graph: Optional[bool] = None,
) -> Dict[str, Any]:
"""
Update project settings.
Args:
custom_instructions: New instructions for the project
custom_categories: New categories for the project
retrieval_criteria: New retrieval criteria for the project
enable_graph: Enable or disable the graph for the project
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if (
custom_instructions is None
and custom_categories is None
and retrieval_criteria is None
and enable_graph is None
):
raise ValueError(
"At least one parameter must be provided for update: "
"custom_instructions, custom_categories, retrieval_criteria, "
"enable_graph"
)
payload = self._prepare_params(
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
}
)
response = await self._client.patch(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update",
self,
{
"custom_instructions": custom_instructions,
"custom_categories": custom_categories,
"retrieval_criteria": retrieval_criteria,
"enable_graph": enable_graph,
"sync_type": "async",
},
)
return response.json()
@api_error_handler
async def delete(self) -> Dict[str, Any]:
"""
Delete the current project and its related data.
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
response = await self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/",
)
response.raise_for_status()
capture_client_event(
"client.project.delete",
self,
{"sync_type": "async"},
)
return response.json()
@api_error_handler
async def get_members(self) -> Dict[str, Any]:
"""
Get all members of the current project.
Returns:
Dictionary containing the list of project members.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
response = await self._client.get(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
)
response.raise_for_status()
capture_client_event(
"client.project.get_members",
self,
{"sync_type": "async"},
)
return response.json()
@api_error_handler
async def add_member(self, email: str, role: str = "READER") -> Dict[str, Any]:
"""
Add a new member to the current project.
Args:
email: Email address of the user to add
role: Role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = await self._client.post(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.add_member",
self,
{"email": email, "role": role, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def update_member(self, email: str, role: str) -> Dict[str, Any]:
"""
Update a member's role in the current project.
Args:
email: Email address of the user to update
role: New role to assign ("READER" or "OWNER")
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
if role not in ["READER", "OWNER"]:
raise ValueError("Role must be either 'READER' or 'OWNER'")
payload = {"email": email, "role": role}
response = await self._client.put(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
json=payload,
)
response.raise_for_status()
capture_client_event(
"client.project.update_member",
self,
{"email": email, "role": role, "sync_type": "async"},
)
return response.json()
@api_error_handler
async def remove_member(self, email: str) -> Dict[str, Any]:
"""
Remove a member from the current project.
Args:
email: Email address of the user to remove
Returns:
Dictionary containing the API response.
Raises:
ValidationError: If the input data is invalid.
AuthenticationError: If authentication fails.
RateLimitError: If rate limits are exceeded.
NetworkError: If network connectivity issues occur.
ValueError: If org_id or project_id are not set.
"""
params = {"email": email}
response = await self._client.delete(
f"/api/v1/orgs/organizations/{self.config.org_id}/projects/{self.config.project_id}/members/",
params=params,
)
response.raise_for_status()
capture_client_event(
"client.project.remove_member",
self,
{"email": email, "sync_type": "async"},
)
return response.json()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/ollama.py | mem0/embeddings/ollama.py | import subprocess
import sys
from typing import Literal, Optional
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
try:
from ollama import Client
except ImportError:
user_input = input("The 'ollama' library is required. Install it now? [y/N]: ")
if user_input.lower() == "y":
try:
subprocess.check_call([sys.executable, "-m", "pip", "install", "ollama"])
from ollama import Client
except subprocess.CalledProcessError:
print("Failed to install 'ollama'. Please install it manually using 'pip install ollama'.")
sys.exit(1)
else:
print("The required 'ollama' library is not installed.")
sys.exit(1)
class OllamaEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "nomic-embed-text"
self.config.embedding_dims = self.config.embedding_dims or 512
self.client = Client(host=self.config.ollama_base_url)
self._ensure_model_exists()
def _ensure_model_exists(self):
"""
Ensure the specified model exists locally. If not, pull it from Ollama.
"""
local_models = self.client.list()["models"]
if not any(model.get("name") == self.config.model or model.get("model") == self.config.model for model in local_models):
self.client.pull(self.config.model)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using Ollama.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
response = self.client.embeddings(model=self.config.model, prompt=text)
return response["embedding"]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/aws_bedrock.py | mem0/embeddings/aws_bedrock.py | import json
import os
from typing import Literal, Optional
try:
import boto3
except ImportError:
raise ImportError("The 'boto3' library is required. Please install it using 'pip install boto3'.")
import numpy as np
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
class AWSBedrockEmbedding(EmbeddingBase):
"""AWS Bedrock embedding implementation.
This class uses AWS Bedrock's embedding models.
"""
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "amazon.titan-embed-text-v1"
# Get AWS config from environment variables or use defaults
aws_access_key = os.environ.get("AWS_ACCESS_KEY_ID", "")
aws_secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY", "")
aws_session_token = os.environ.get("AWS_SESSION_TOKEN", "")
# Check if AWS config is provided in the config
if hasattr(self.config, "aws_access_key_id"):
aws_access_key = self.config.aws_access_key_id
if hasattr(self.config, "aws_secret_access_key"):
aws_secret_key = self.config.aws_secret_access_key
# AWS region is always set in config - see BaseEmbedderConfig
aws_region = self.config.aws_region or "us-west-2"
self.client = boto3.client(
"bedrock-runtime",
region_name=aws_region,
aws_access_key_id=aws_access_key if aws_access_key else None,
aws_secret_access_key=aws_secret_key if aws_secret_key else None,
aws_session_token=aws_session_token if aws_session_token else None,
)
def _normalize_vector(self, embeddings):
"""Normalize the embedding to a unit vector."""
emb = np.array(embeddings)
norm_emb = emb / np.linalg.norm(emb)
return norm_emb.tolist()
def _get_embedding(self, text):
"""Call out to Bedrock embedding endpoint."""
# Format input body based on the provider
provider = self.config.model.split(".")[0]
input_body = {}
if provider == "cohere":
input_body["input_type"] = "search_document"
input_body["texts"] = [text]
else:
# Amazon and other providers
input_body["inputText"] = text
body = json.dumps(input_body)
try:
response = self.client.invoke_model(
body=body,
modelId=self.config.model,
accept="application/json",
contentType="application/json",
)
response_body = json.loads(response.get("body").read())
if provider == "cohere":
embeddings = response_body.get("embeddings")[0]
else:
embeddings = response_body.get("embedding")
return embeddings
except Exception as e:
raise ValueError(f"Error getting embedding from AWS Bedrock: {e}")
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using AWS Bedrock.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
return self._get_embedding(text)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/together.py | mem0/embeddings/together.py | import os
from typing import Literal, Optional
from together import Together
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
class TogetherEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "togethercomputer/m2-bert-80M-8k-retrieval"
api_key = self.config.api_key or os.getenv("TOGETHER_API_KEY")
# TODO: check if this is correct
self.config.embedding_dims = self.config.embedding_dims or 768
self.client = Together(api_key=api_key)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using OpenAI.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
return self.client.embeddings.create(model=self.config.model, input=text).data[0].embedding
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/configs.py | mem0/embeddings/configs.py | from typing import Optional
from pydantic import BaseModel, Field, field_validator
class EmbedderConfig(BaseModel):
provider: str = Field(
description="Provider of the embedding model (e.g., 'ollama', 'openai')",
default="openai",
)
config: Optional[dict] = Field(description="Configuration for the specific embedding model", default={})
@field_validator("config")
def validate_config(cls, v, values):
provider = values.data.get("provider")
if provider in [
"openai",
"ollama",
"huggingface",
"azure_openai",
"gemini",
"vertexai",
"together",
"lmstudio",
"langchain",
"aws_bedrock",
"fastembed",
]:
return v
else:
raise ValueError(f"Unsupported embedding provider: {provider}")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/lmstudio.py | mem0/embeddings/lmstudio.py | from typing import Literal, Optional
from openai import OpenAI
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
class LMStudioEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf"
self.config.embedding_dims = self.config.embedding_dims or 1536
self.config.api_key = self.config.api_key or "lm-studio"
self.client = OpenAI(base_url=self.config.lmstudio_base_url, api_key=self.config.api_key)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using LM Studio.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
text = text.replace("\n", " ")
return self.client.embeddings.create(input=[text], model=self.config.model).data[0].embedding
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/mock.py | mem0/embeddings/mock.py | from typing import Literal, Optional
from mem0.embeddings.base import EmbeddingBase
class MockEmbeddings(EmbeddingBase):
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Generate a mock embedding with dimension of 10.
"""
return [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/huggingface.py | mem0/embeddings/huggingface.py | import logging
from typing import Literal, Optional
from openai import OpenAI
from sentence_transformers import SentenceTransformer
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
logging.getLogger("transformers").setLevel(logging.WARNING)
logging.getLogger("sentence_transformers").setLevel(logging.WARNING)
logging.getLogger("huggingface_hub").setLevel(logging.WARNING)
class HuggingFaceEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
if config.huggingface_base_url:
self.client = OpenAI(base_url=config.huggingface_base_url)
self.config.model = self.config.model or "tei"
else:
self.config.model = self.config.model or "multi-qa-MiniLM-L6-cos-v1"
self.model = SentenceTransformer(self.config.model, **self.config.model_kwargs)
self.config.embedding_dims = self.config.embedding_dims or self.model.get_sentence_embedding_dimension()
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using Hugging Face.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
if self.config.huggingface_base_url:
return self.client.embeddings.create(
input=text, model=self.config.model, **self.config.model_kwargs
).data[0].embedding
else:
return self.model.encode(text, convert_to_numpy=True).tolist()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/__init__.py | mem0/embeddings/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/vertexai.py | mem0/embeddings/vertexai.py | import os
from typing import Literal, Optional
from vertexai.language_models import TextEmbeddingInput, TextEmbeddingModel
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
from mem0.utils.gcp_auth import GCPAuthenticator
class VertexAIEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "text-embedding-004"
self.config.embedding_dims = self.config.embedding_dims or 256
self.embedding_types = {
"add": self.config.memory_add_embedding_type or "RETRIEVAL_DOCUMENT",
"update": self.config.memory_update_embedding_type or "RETRIEVAL_DOCUMENT",
"search": self.config.memory_search_embedding_type or "RETRIEVAL_QUERY",
}
# Set up authentication using centralized GCP authenticator
# This supports multiple authentication methods while preserving environment variable support
try:
GCPAuthenticator.setup_vertex_ai(
service_account_json=getattr(self.config, 'google_service_account_json', None),
credentials_path=self.config.vertex_credentials_json,
project_id=getattr(self.config, 'google_project_id', None)
)
except Exception:
# Fall back to original behavior for backward compatibility
credentials_path = self.config.vertex_credentials_json
if credentials_path:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentials_path
elif not os.getenv("GOOGLE_APPLICATION_CREDENTIALS"):
raise ValueError(
"Google application credentials JSON is not provided. Please provide a valid JSON path or set the 'GOOGLE_APPLICATION_CREDENTIALS' environment variable."
)
self.model = TextEmbeddingModel.from_pretrained(self.config.model)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using Vertex AI.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
embedding_type = "SEMANTIC_SIMILARITY"
if memory_action is not None:
if memory_action not in self.embedding_types:
raise ValueError(f"Invalid memory action: {memory_action}")
embedding_type = self.embedding_types[memory_action]
text_input = TextEmbeddingInput(text=text, task_type=embedding_type)
embeddings = self.model.get_embeddings(texts=[text_input], output_dimensionality=self.config.embedding_dims)
return embeddings[0].values
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/azure_openai.py | mem0/embeddings/azure_openai.py | import os
from typing import Literal, Optional
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
from openai import AzureOpenAI
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
SCOPE = "https://cognitiveservices.azure.com/.default"
class AzureOpenAIEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
api_key = self.config.azure_kwargs.api_key or os.getenv("EMBEDDING_AZURE_OPENAI_API_KEY")
azure_deployment = self.config.azure_kwargs.azure_deployment or os.getenv("EMBEDDING_AZURE_DEPLOYMENT")
azure_endpoint = self.config.azure_kwargs.azure_endpoint or os.getenv("EMBEDDING_AZURE_ENDPOINT")
api_version = self.config.azure_kwargs.api_version or os.getenv("EMBEDDING_AZURE_API_VERSION")
default_headers = self.config.azure_kwargs.default_headers
# If the API key is not provided or is a placeholder, use DefaultAzureCredential.
if api_key is None or api_key == "" or api_key == "your-api-key":
self.credential = DefaultAzureCredential()
azure_ad_token_provider = get_bearer_token_provider(
self.credential,
SCOPE,
)
api_key = None
else:
azure_ad_token_provider = None
self.client = AzureOpenAI(
azure_deployment=azure_deployment,
azure_endpoint=azure_endpoint,
azure_ad_token_provider=azure_ad_token_provider,
api_version=api_version,
api_key=api_key,
http_client=self.config.http_client,
default_headers=default_headers,
)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using OpenAI.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
text = text.replace("\n", " ")
return self.client.embeddings.create(input=[text], model=self.config.model).data[0].embedding
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/base.py | mem0/embeddings/base.py | from abc import ABC, abstractmethod
from typing import Literal, Optional
from mem0.configs.embeddings.base import BaseEmbedderConfig
class EmbeddingBase(ABC):
"""Initialized a base embedding class
:param config: Embedding configuration option class, defaults to None
:type config: Optional[BaseEmbedderConfig], optional
"""
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
if config is None:
self.config = BaseEmbedderConfig()
else:
self.config = config
@abstractmethod
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]]):
"""
Get the embedding for the given text.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
pass
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/fastembed.py | mem0/embeddings/fastembed.py | from typing import Optional, Literal
from mem0.embeddings.base import EmbeddingBase
from mem0.configs.embeddings.base import BaseEmbedderConfig
try:
from fastembed import TextEmbedding
except ImportError:
raise ImportError("FastEmbed is not installed. Please install it using `pip install fastembed`")
class FastEmbedEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "thenlper/gte-large"
self.dense_model = TextEmbedding(model_name = self.config.model)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Convert the text to embeddings using FastEmbed running in the Onnx runtime
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
text = text.replace("\n", " ")
embeddings = list(self.dense_model.embed(text))
return embeddings[0]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/langchain.py | mem0/embeddings/langchain.py | from typing import Literal, Optional
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
try:
from langchain.embeddings.base import Embeddings
except ImportError:
raise ImportError("langchain is not installed. Please install it using `pip install langchain`")
class LangchainEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
if self.config.model is None:
raise ValueError("`model` parameter is required")
if not isinstance(self.config.model, Embeddings):
raise ValueError("`model` must be an instance of Embeddings")
self.langchain_model = self.config.model
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using Langchain.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
return self.langchain_model.embed_query(text)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/openai.py | mem0/embeddings/openai.py | import os
import warnings
from typing import Literal, Optional
from openai import OpenAI
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
class OpenAIEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "text-embedding-3-small"
self.config.embedding_dims = self.config.embedding_dims or 1536
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
base_url = (
self.config.openai_base_url
or os.getenv("OPENAI_API_BASE")
or os.getenv("OPENAI_BASE_URL")
or "https://api.openai.com/v1"
)
if os.environ.get("OPENAI_API_BASE"):
warnings.warn(
"The environment variable 'OPENAI_API_BASE' is deprecated and will be removed in the 0.1.80. "
"Please use 'OPENAI_BASE_URL' instead.",
DeprecationWarning,
)
self.client = OpenAI(api_key=api_key, base_url=base_url)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using OpenAI.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
text = text.replace("\n", " ")
return (
self.client.embeddings.create(input=[text], model=self.config.model, dimensions=self.config.embedding_dims)
.data[0]
.embedding
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/mem0/embeddings/gemini.py | mem0/embeddings/gemini.py | import os
from typing import Literal, Optional
from google import genai
from google.genai import types
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
class GoogleGenAIEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
self.config.model = self.config.model or "models/text-embedding-004"
self.config.embedding_dims = self.config.embedding_dims or self.config.output_dimensionality or 768
api_key = self.config.api_key or os.getenv("GOOGLE_API_KEY")
self.client = genai.Client(api_key=api_key)
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using Google Generative AI.
Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
text = text.replace("\n", " ")
# Create config for embedding parameters
config = types.EmbedContentConfig(output_dimensionality=self.config.embedding_dims)
# Call the embed_content method with the correct parameters
response = self.client.models.embed_content(model=self.config.model, contents=text, config=config)
return response.embeddings[0].values
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/client.py | embedchain/embedchain/client.py | import json
import logging
import os
import uuid
import requests
from embedchain.constants import CONFIG_DIR, CONFIG_FILE
logger = logging.getLogger(__name__)
class Client:
def __init__(self, api_key=None, host="https://apiv2.embedchain.ai"):
self.config_data = self.load_config()
self.host = host
if api_key:
if self.check(api_key):
self.api_key = api_key
self.save()
else:
raise ValueError(
"Invalid API key provided. You can find your API key on https://app.embedchain.ai/settings/keys."
)
else:
if "api_key" in self.config_data:
self.api_key = self.config_data["api_key"]
logger.info("API key loaded successfully!")
else:
raise ValueError(
"You are not logged in. Please obtain an API key from https://app.embedchain.ai/settings/keys/"
)
@classmethod
def setup(cls):
"""
Loads the user id from the config file if it exists, otherwise generates a new
one and saves it to the config file.
:return: user id
:rtype: str
"""
os.makedirs(CONFIG_DIR, exist_ok=True)
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE, "r") as f:
data = json.load(f)
if "user_id" in data:
return data["user_id"]
u_id = str(uuid.uuid4())
with open(CONFIG_FILE, "w") as f:
json.dump({"user_id": u_id}, f)
@classmethod
def load_config(cls):
if not os.path.exists(CONFIG_FILE):
cls.setup()
with open(CONFIG_FILE, "r") as config_file:
return json.load(config_file)
def save(self):
self.config_data["api_key"] = self.api_key
with open(CONFIG_FILE, "w") as config_file:
json.dump(self.config_data, config_file, indent=4)
logger.info("API key saved successfully!")
def clear(self):
if "api_key" in self.config_data:
del self.config_data["api_key"]
with open(CONFIG_FILE, "w") as config_file:
json.dump(self.config_data, config_file, indent=4)
self.api_key = None
logger.info("API key deleted successfully!")
else:
logger.warning("API key not found in the configuration file.")
def update(self, api_key):
if self.check(api_key):
self.api_key = api_key
self.save()
logger.info("API key updated successfully!")
else:
logger.warning("Invalid API key provided. API key not updated.")
def check(self, api_key):
validation_url = f"{self.host}/api/v1/accounts/api_keys/validate/"
response = requests.post(validation_url, headers={"Authorization": f"Token {api_key}"})
if response.status_code == 200:
return True
else:
logger.warning(f"Response from API: {response.text}")
logger.warning("Invalid API key. Unable to validate.")
return False
def get(self):
return self.api_key
def __str__(self):
return self.api_key
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/cli.py | embedchain/embedchain/cli.py | import json
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import zipfile
from pathlib import Path
import click
import requests
from rich.console import Console
from embedchain.telemetry.posthog import AnonymousTelemetry
from embedchain.utils.cli import (
deploy_fly,
deploy_gradio_app,
deploy_hf_spaces,
deploy_modal,
deploy_render,
deploy_streamlit,
get_pkg_path_from_name,
setup_fly_io_app,
setup_gradio_app,
setup_hf_app,
setup_modal_com_app,
setup_render_com_app,
setup_streamlit_io_app,
)
console = Console()
api_process = None
ui_process = None
anonymous_telemetry = AnonymousTelemetry()
def signal_handler(sig, frame):
"""Signal handler to catch termination signals and kill server processes."""
global api_process, ui_process
console.print("\n🛑 [bold yellow]Stopping servers...[/bold yellow]")
if api_process:
api_process.terminate()
console.print("🛑 [bold yellow]API server stopped.[/bold yellow]")
if ui_process:
ui_process.terminate()
console.print("🛑 [bold yellow]UI server stopped.[/bold yellow]")
sys.exit(0)
@click.group()
def cli():
pass
@cli.command()
@click.argument("app_name")
@click.option("--docker", is_flag=True, help="Use docker to create the app.")
@click.pass_context
def create_app(ctx, app_name, docker):
if Path(app_name).exists():
console.print(
f"❌ [red]Directory '{app_name}' already exists. Try using a new directory name, or remove it.[/red]"
)
return
os.makedirs(app_name)
os.chdir(app_name)
# Step 1: Download the zip file
zip_url = "http://github.com/embedchain/ec-admin/archive/main.zip"
console.print(f"Creating a new embedchain app in [green]{Path().resolve()}[/green]\n")
try:
response = requests.get(zip_url)
response.raise_for_status()
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(response.content)
zip_file_path = tmp_file.name
console.print("✅ [bold green]Fetched template successfully.[/bold green]")
except requests.RequestException as e:
console.print(f"❌ [bold red]Failed to download zip file: {e}[/bold red]")
anonymous_telemetry.capture(event_name="ec_create_app", properties={"success": False})
return
# Step 2: Extract the zip file
try:
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
# Get the name of the root directory inside the zip file
root_dir = Path(zip_ref.namelist()[0])
for member in zip_ref.infolist():
# Build the path to extract the file to, skipping the root directory
target_file = Path(member.filename).relative_to(root_dir)
source_file = zip_ref.open(member, "r")
if member.is_dir():
# Create directory if it doesn't exist
os.makedirs(target_file, exist_ok=True)
else:
with open(target_file, "wb") as file:
# Write the file
shutil.copyfileobj(source_file, file)
console.print("✅ [bold green]Extracted zip file successfully.[/bold green]")
anonymous_telemetry.capture(event_name="ec_create_app", properties={"success": True})
except zipfile.BadZipFile:
console.print("❌ [bold red]Error in extracting zip file. The file might be corrupted.[/bold red]")
anonymous_telemetry.capture(event_name="ec_create_app", properties={"success": False})
return
if docker:
subprocess.run(["docker-compose", "build"], check=True)
else:
ctx.invoke(install_reqs)
@cli.command()
def install_reqs():
try:
console.print("Installing python requirements...\n")
time.sleep(2)
os.chdir("api")
subprocess.run(["pip", "install", "-r", "requirements.txt"], check=True)
os.chdir("..")
console.print("\n ✅ [bold green]Installed API requirements successfully.[/bold green]\n")
except Exception as e:
console.print(f"❌ [bold red]Failed to install API requirements: {e}[/bold red]")
anonymous_telemetry.capture(event_name="ec_install_reqs", properties={"success": False})
return
try:
os.chdir("ui")
subprocess.run(["yarn"], check=True)
console.print("\n✅ [bold green]Successfully installed frontend requirements.[/bold green]")
anonymous_telemetry.capture(event_name="ec_install_reqs", properties={"success": True})
except Exception as e:
console.print(f"❌ [bold red]Failed to install frontend requirements. Error: {e}[/bold red]")
anonymous_telemetry.capture(event_name="ec_install_reqs", properties={"success": False})
@cli.command()
@click.option("--docker", is_flag=True, help="Run inside docker.")
def start(docker):
if docker:
subprocess.run(["docker-compose", "up"], check=True)
return
# Set up signal handling
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Step 1: Start the API server
try:
os.chdir("api")
api_process = subprocess.Popen(["python", "-m", "main"], stdout=None, stderr=None)
os.chdir("..")
console.print("✅ [bold green]API server started successfully.[/bold green]")
except Exception as e:
console.print(f"❌ [bold red]Failed to start the API server: {e}[/bold red]")
anonymous_telemetry.capture(event_name="ec_start", properties={"success": False})
return
# Sleep for 2 seconds to give the user time to read the message
time.sleep(2)
# Step 2: Install UI requirements and start the UI server
try:
os.chdir("ui")
subprocess.run(["yarn"], check=True)
ui_process = subprocess.Popen(["yarn", "dev"])
console.print("✅ [bold green]UI server started successfully.[/bold green]")
anonymous_telemetry.capture(event_name="ec_start", properties={"success": True})
except Exception as e:
console.print(f"❌ [bold red]Failed to start the UI server: {e}[/bold red]")
anonymous_telemetry.capture(event_name="ec_start", properties={"success": False})
# Keep the script running until it receives a kill signal
try:
api_process.wait()
ui_process.wait()
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]Stopping server...[/bold yellow]")
@cli.command()
@click.option("--template", default="fly.io", help="The template to use.")
@click.argument("extra_args", nargs=-1, type=click.UNPROCESSED)
def create(template, extra_args):
anonymous_telemetry.capture(event_name="ec_create", properties={"template_used": template})
template_dir = template
if "/" in template_dir:
template_dir = template.split("/")[1]
src_path = get_pkg_path_from_name(template_dir)
shutil.copytree(src_path, os.getcwd(), dirs_exist_ok=True)
console.print(f"✅ [bold green]Successfully created app from template '{template}'.[/bold green]")
if template == "fly.io":
setup_fly_io_app(extra_args)
elif template == "modal.com":
setup_modal_com_app(extra_args)
elif template == "render.com":
setup_render_com_app()
elif template == "streamlit.io":
setup_streamlit_io_app()
elif template == "gradio.app":
setup_gradio_app()
elif template == "hf/gradio.app" or template == "hf/streamlit.io":
setup_hf_app()
else:
raise ValueError(f"Unknown template '{template}'.")
embedchain_config = {"provider": template}
with open("embedchain.json", "w") as file:
json.dump(embedchain_config, file, indent=4)
console.print(
f"🎉 [green]All done! Successfully created `embedchain.json` with '{template}' as provider.[/green]"
)
def run_dev_fly_io(debug, host, port):
uvicorn_command = ["uvicorn", "app:app"]
if debug:
uvicorn_command.append("--reload")
uvicorn_command.extend(["--host", host, "--port", str(port)])
try:
console.print(f"🚀 [bold cyan]Running FastAPI app with command: {' '.join(uvicorn_command)}[/bold cyan]")
subprocess.run(uvicorn_command, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]FastAPI server stopped[/bold yellow]")
def run_dev_modal_com():
modal_run_cmd = ["modal", "serve", "app"]
try:
console.print(f"🚀 [bold cyan]Running FastAPI app with command: {' '.join(modal_run_cmd)}[/bold cyan]")
subprocess.run(modal_run_cmd, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]FastAPI server stopped[/bold yellow]")
def run_dev_streamlit_io():
streamlit_run_cmd = ["streamlit", "run", "app.py"]
try:
console.print(f"🚀 [bold cyan]Running Streamlit app with command: {' '.join(streamlit_run_cmd)}[/bold cyan]")
subprocess.run(streamlit_run_cmd, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]Streamlit server stopped[/bold yellow]")
def run_dev_render_com(debug, host, port):
uvicorn_command = ["uvicorn", "app:app"]
if debug:
uvicorn_command.append("--reload")
uvicorn_command.extend(["--host", host, "--port", str(port)])
try:
console.print(f"🚀 [bold cyan]Running FastAPI app with command: {' '.join(uvicorn_command)}[/bold cyan]")
subprocess.run(uvicorn_command, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]FastAPI server stopped[/bold yellow]")
def run_dev_gradio():
gradio_run_cmd = ["gradio", "app.py"]
try:
console.print(f"🚀 [bold cyan]Running Gradio app with command: {' '.join(gradio_run_cmd)}[/bold cyan]")
subprocess.run(gradio_run_cmd, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]Gradio server stopped[/bold yellow]")
@cli.command()
@click.option("--debug", is_flag=True, help="Enable or disable debug mode.")
@click.option("--host", default="127.0.0.1", help="The host address to run the FastAPI app on.")
@click.option("--port", default=8000, help="The port to run the FastAPI app on.")
def dev(debug, host, port):
template = ""
with open("embedchain.json", "r") as file:
embedchain_config = json.load(file)
template = embedchain_config["provider"]
anonymous_telemetry.capture(event_name="ec_dev", properties={"template_used": template})
if template == "fly.io":
run_dev_fly_io(debug, host, port)
elif template == "modal.com":
run_dev_modal_com()
elif template == "render.com":
run_dev_render_com(debug, host, port)
elif template == "streamlit.io" or template == "hf/streamlit.io":
run_dev_streamlit_io()
elif template == "gradio.app" or template == "hf/gradio.app":
run_dev_gradio()
else:
raise ValueError(f"Unknown template '{template}'.")
@cli.command()
def deploy():
# Check for platform-specific files
template = ""
ec_app_name = ""
with open("embedchain.json", "r") as file:
embedchain_config = json.load(file)
ec_app_name = embedchain_config["name"] if "name" in embedchain_config else None
template = embedchain_config["provider"]
anonymous_telemetry.capture(event_name="ec_deploy", properties={"template_used": template})
if template == "fly.io":
deploy_fly()
elif template == "modal.com":
deploy_modal()
elif template == "render.com":
deploy_render()
elif template == "streamlit.io":
deploy_streamlit()
elif template == "gradio.app":
deploy_gradio_app()
elif template.startswith("hf/"):
deploy_hf_spaces(ec_app_name)
else:
console.print("❌ [bold red]No recognized deployment platform found.[/bold red]")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/constants.py | embedchain/embedchain/constants.py | import os
from pathlib import Path
ABS_PATH = os.getcwd()
HOME_DIR = os.environ.get("EMBEDCHAIN_CONFIG_DIR", str(Path.home()))
CONFIG_DIR = os.path.join(HOME_DIR, ".embedchain")
CONFIG_FILE = os.path.join(CONFIG_DIR, "config.json")
SQLITE_PATH = os.path.join(CONFIG_DIR, "embedchain.db")
# Set the environment variable for the database URI
os.environ.setdefault("EMBEDCHAIN_DB_URI", f"sqlite:///{SQLITE_PATH}")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/factory.py | embedchain/embedchain/factory.py | import importlib
def load_class(class_type):
module_path, class_name = class_type.rsplit(".", 1)
module = importlib.import_module(module_path)
return getattr(module, class_name)
class LlmFactory:
provider_to_class = {
"anthropic": "embedchain.llm.anthropic.AnthropicLlm",
"azure_openai": "embedchain.llm.azure_openai.AzureOpenAILlm",
"cohere": "embedchain.llm.cohere.CohereLlm",
"together": "embedchain.llm.together.TogetherLlm",
"gpt4all": "embedchain.llm.gpt4all.GPT4ALLLlm",
"ollama": "embedchain.llm.ollama.OllamaLlm",
"huggingface": "embedchain.llm.huggingface.HuggingFaceLlm",
"jina": "embedchain.llm.jina.JinaLlm",
"llama2": "embedchain.llm.llama2.Llama2Llm",
"openai": "embedchain.llm.openai.OpenAILlm",
"vertexai": "embedchain.llm.vertex_ai.VertexAILlm",
"google": "embedchain.llm.google.GoogleLlm",
"aws_bedrock": "embedchain.llm.aws_bedrock.AWSBedrockLlm",
"mistralai": "embedchain.llm.mistralai.MistralAILlm",
"clarifai": "embedchain.llm.clarifai.ClarifaiLlm",
"groq": "embedchain.llm.groq.GroqLlm",
"nvidia": "embedchain.llm.nvidia.NvidiaLlm",
"vllm": "embedchain.llm.vllm.VLLM",
}
provider_to_config_class = {
"embedchain": "embedchain.config.llm.base.BaseLlmConfig",
"openai": "embedchain.config.llm.base.BaseLlmConfig",
"anthropic": "embedchain.config.llm.base.BaseLlmConfig",
}
@classmethod
def create(cls, provider_name, config_data):
class_type = cls.provider_to_class.get(provider_name)
# Default to embedchain base config if the provider is not in the config map
config_name = "embedchain" if provider_name not in cls.provider_to_config_class else provider_name
config_class_type = cls.provider_to_config_class.get(config_name)
if class_type:
llm_class = load_class(class_type)
llm_config_class = load_class(config_class_type)
return llm_class(config=llm_config_class(**config_data))
else:
raise ValueError(f"Unsupported Llm provider: {provider_name}")
class EmbedderFactory:
provider_to_class = {
"azure_openai": "embedchain.embedder.azure_openai.AzureOpenAIEmbedder",
"gpt4all": "embedchain.embedder.gpt4all.GPT4AllEmbedder",
"huggingface": "embedchain.embedder.huggingface.HuggingFaceEmbedder",
"openai": "embedchain.embedder.openai.OpenAIEmbedder",
"vertexai": "embedchain.embedder.vertexai.VertexAIEmbedder",
"google": "embedchain.embedder.google.GoogleAIEmbedder",
"mistralai": "embedchain.embedder.mistralai.MistralAIEmbedder",
"clarifai": "embedchain.embedder.clarifai.ClarifaiEmbedder",
"nvidia": "embedchain.embedder.nvidia.NvidiaEmbedder",
"cohere": "embedchain.embedder.cohere.CohereEmbedder",
"ollama": "embedchain.embedder.ollama.OllamaEmbedder",
"aws_bedrock": "embedchain.embedder.aws_bedrock.AWSBedrockEmbedder",
}
provider_to_config_class = {
"azure_openai": "embedchain.config.embedder.base.BaseEmbedderConfig",
"google": "embedchain.config.embedder.google.GoogleAIEmbedderConfig",
"gpt4all": "embedchain.config.embedder.base.BaseEmbedderConfig",
"huggingface": "embedchain.config.embedder.base.BaseEmbedderConfig",
"clarifai": "embedchain.config.embedder.base.BaseEmbedderConfig",
"openai": "embedchain.config.embedder.base.BaseEmbedderConfig",
"ollama": "embedchain.config.embedder.ollama.OllamaEmbedderConfig",
"aws_bedrock": "embedchain.config.embedder.aws_bedrock.AWSBedrockEmbedderConfig",
}
@classmethod
def create(cls, provider_name, config_data):
class_type = cls.provider_to_class.get(provider_name)
# Default to openai config if the provider is not in the config map
config_name = "openai" if provider_name not in cls.provider_to_config_class else provider_name
config_class_type = cls.provider_to_config_class.get(config_name)
if class_type:
embedder_class = load_class(class_type)
embedder_config_class = load_class(config_class_type)
return embedder_class(config=embedder_config_class(**config_data))
else:
raise ValueError(f"Unsupported Embedder provider: {provider_name}")
class VectorDBFactory:
provider_to_class = {
"chroma": "embedchain.vectordb.chroma.ChromaDB",
"elasticsearch": "embedchain.vectordb.elasticsearch.ElasticsearchDB",
"opensearch": "embedchain.vectordb.opensearch.OpenSearchDB",
"lancedb": "embedchain.vectordb.lancedb.LanceDB",
"pinecone": "embedchain.vectordb.pinecone.PineconeDB",
"qdrant": "embedchain.vectordb.qdrant.QdrantDB",
"weaviate": "embedchain.vectordb.weaviate.WeaviateDB",
"zilliz": "embedchain.vectordb.zilliz.ZillizVectorDB",
}
provider_to_config_class = {
"chroma": "embedchain.config.vector_db.chroma.ChromaDbConfig",
"elasticsearch": "embedchain.config.vector_db.elasticsearch.ElasticsearchDBConfig",
"opensearch": "embedchain.config.vector_db.opensearch.OpenSearchDBConfig",
"lancedb": "embedchain.config.vector_db.lancedb.LanceDBConfig",
"pinecone": "embedchain.config.vector_db.pinecone.PineconeDBConfig",
"qdrant": "embedchain.config.vector_db.qdrant.QdrantDBConfig",
"weaviate": "embedchain.config.vector_db.weaviate.WeaviateDBConfig",
"zilliz": "embedchain.config.vector_db.zilliz.ZillizDBConfig",
}
@classmethod
def create(cls, provider_name, config_data):
class_type = cls.provider_to_class.get(provider_name)
config_class_type = cls.provider_to_config_class.get(provider_name)
if class_type:
embedder_class = load_class(class_type)
embedder_config_class = load_class(config_class_type)
return embedder_class(config=embedder_config_class(**config_data))
else:
raise ValueError(f"Unsupported Embedder provider: {provider_name}")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/pipeline.py | embedchain/embedchain/pipeline.py | from embedchain.app import App
class Pipeline(App):
"""
This is deprecated. Use `App` instead.
"""
pass
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/__init__.py | embedchain/embedchain/__init__.py | import importlib.metadata
__version__ = importlib.metadata.version(__package__ or __name__)
from embedchain.app import App # noqa: F401
from embedchain.client import Client # noqa: F401
from embedchain.pipeline import Pipeline # noqa: F401
# Setup the user directory if doesn't exist already
Client.setup()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedchain.py | embedchain/embedchain/embedchain.py | import hashlib
import json
import logging
from typing import Any, Optional, Union
from dotenv import load_dotenv
from langchain.docstore.document import Document
from embedchain.cache import (
adapt,
get_gptcache_session,
gptcache_data_convert,
gptcache_update_cache_callback,
)
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config import AddConfig, BaseLlmConfig, ChunkerConfig
from embedchain.config.base_app_config import BaseAppConfig
from embedchain.core.db.models import ChatHistory, DataSource
from embedchain.data_formatter import DataFormatter
from embedchain.embedder.base import BaseEmbedder
from embedchain.helpers.json_serializable import JSONSerializable
from embedchain.llm.base import BaseLlm
from embedchain.loaders.base_loader import BaseLoader
from embedchain.models.data_type import (
DataType,
DirectDataType,
IndirectDataType,
SpecialDataType,
)
from embedchain.utils.misc import detect_datatype, is_valid_json_string
from embedchain.vectordb.base import BaseVectorDB
load_dotenv()
logger = logging.getLogger(__name__)
class EmbedChain(JSONSerializable):
def __init__(
self,
config: BaseAppConfig,
llm: BaseLlm,
db: BaseVectorDB = None,
embedder: BaseEmbedder = None,
system_prompt: Optional[str] = None,
):
"""
Initializes the EmbedChain instance, sets up a vector DB client and
creates a collection.
:param config: Configuration just for the app, not the db or llm or embedder.
:type config: BaseAppConfig
:param llm: Instance of the LLM you want to use.
:type llm: BaseLlm
:param db: Instance of the Database to use, defaults to None
:type db: BaseVectorDB, optional
:param embedder: instance of the embedder to use, defaults to None
:type embedder: BaseEmbedder, optional
:param system_prompt: System prompt to use in the llm query, defaults to None
:type system_prompt: Optional[str], optional
:raises ValueError: No database or embedder provided.
"""
self.config = config
self.cache_config = None
self.memory_config = None
self.mem0_memory = None
# Llm
self.llm = llm
# Database has support for config assignment for backwards compatibility
if db is None and (not hasattr(self.config, "db") or self.config.db is None):
raise ValueError("App requires Database.")
self.db = db or self.config.db
# Embedder
if embedder is None:
raise ValueError("App requires Embedder.")
self.embedder = embedder
# Initialize database
self.db._set_embedder(self.embedder)
self.db._initialize()
# Set collection name from app config for backwards compatibility.
if config.collection_name:
self.db.set_collection_name(config.collection_name)
# Add variables that are "shortcuts"
if system_prompt:
self.llm.config.system_prompt = system_prompt
# Fetch the history from the database if exists
self.llm.update_history(app_id=self.config.id)
# Attributes that aren't subclass related.
self.user_asks = []
self.chunker: Optional[ChunkerConfig] = None
@property
def collect_metrics(self):
return self.config.collect_metrics
@collect_metrics.setter
def collect_metrics(self, value):
if not isinstance(value, bool):
raise ValueError(f"Boolean value expected but got {type(value)}.")
self.config.collect_metrics = value
@property
def online(self):
return self.llm.config.online
@online.setter
def online(self, value):
if not isinstance(value, bool):
raise ValueError(f"Boolean value expected but got {type(value)}.")
self.llm.config.online = value
def add(
self,
source: Any,
data_type: Optional[DataType] = None,
metadata: Optional[dict[str, Any]] = None,
config: Optional[AddConfig] = None,
dry_run=False,
loader: Optional[BaseLoader] = None,
chunker: Optional[BaseChunker] = None,
**kwargs: Optional[dict[str, Any]],
):
"""
Adds the data from the given URL to the vector db.
Loads the data, chunks it, create embedding for each chunk
and then stores the embedding to vector database.
:param source: The data to embed, can be a URL, local file or raw content, depending on the data type.
:type source: Any
:param data_type: Automatically detected, but can be forced with this argument. The type of the data to add,
defaults to None
:type data_type: Optional[DataType], optional
:param metadata: Metadata associated with the data source., defaults to None
:type metadata: Optional[dict[str, Any]], optional
:param config: The `AddConfig` instance to use as configuration options., defaults to None
:type config: Optional[AddConfig], optional
:raises ValueError: Invalid data type
:param dry_run: Optional. A dry run displays the chunks to ensure that the loader and chunker work as intended.
defaults to False
:type dry_run: bool
:param loader: The loader to use to load the data, defaults to None
:type loader: BaseLoader, optional
:param chunker: The chunker to use to chunk the data, defaults to None
:type chunker: BaseChunker, optional
:param kwargs: To read more params for the query function
:type kwargs: dict[str, Any]
:return: source_hash, a md5-hash of the source, in hexadecimal representation.
:rtype: str
"""
if config is not None:
pass
elif self.chunker is not None:
config = AddConfig(chunker=self.chunker)
else:
config = AddConfig()
try:
DataType(source)
logger.warning(
f"""Starting from version v0.0.40, Embedchain can automatically detect the data type. So, in the `add` method, the argument order has changed. You no longer need to specify '{source}' for the `source` argument. So the code snippet will be `.add("{data_type}", "{source}")`""" # noqa #E501
)
logger.warning(
"Embedchain is swapping the arguments for you. This functionality might be deprecated in the future, so please adjust your code." # noqa #E501
)
source, data_type = data_type, source
except ValueError:
pass
if data_type:
try:
data_type = DataType(data_type)
except ValueError:
logger.info(
f"Invalid data_type: '{data_type}', using `custom` instead.\n Check docs to pass the valid data type: `https://docs.embedchain.ai/data-sources/overview`" # noqa: E501
)
data_type = DataType.CUSTOM
if not data_type:
data_type = detect_datatype(source)
# `source_hash` is the md5 hash of the source argument
source_hash = hashlib.md5(str(source).encode("utf-8")).hexdigest()
self.user_asks.append([source, data_type.value, metadata])
data_formatter = DataFormatter(data_type, config, loader, chunker)
documents, metadatas, _ids, new_chunks = self._load_and_embed(
data_formatter.loader, data_formatter.chunker, source, metadata, source_hash, config, dry_run, **kwargs
)
if data_type in {DataType.DOCS_SITE}:
self.is_docs_site_instance = True
# Convert the source to a string if it is not already
if not isinstance(source, str):
source = str(source)
# Insert the data into the 'ec_data_sources' table
self.db_session.add(
DataSource(
hash=source_hash,
app_id=self.config.id,
type=data_type.value,
value=source,
metadata=json.dumps(metadata),
)
)
try:
self.db_session.commit()
except Exception as e:
logger.error(f"Error adding data source: {e}")
self.db_session.rollback()
if dry_run:
data_chunks_info = {"chunks": documents, "metadata": metadatas, "count": len(documents), "type": data_type}
logger.debug(f"Dry run info : {data_chunks_info}")
return data_chunks_info
# Send anonymous telemetry
if self.config.collect_metrics:
# it's quicker to check the variable twice than to count words when they won't be submitted.
word_count = data_formatter.chunker.get_word_count(documents)
# Send anonymous telemetry
event_properties = {
**self._telemetry_props,
"data_type": data_type.value,
"word_count": word_count,
"chunks_count": new_chunks,
}
self.telemetry.capture(event_name="add", properties=event_properties)
return source_hash
def _get_existing_doc_id(self, chunker: BaseChunker, src: Any):
"""
Get id of existing document for a given source, based on the data type
"""
# Find existing embeddings for the source
# Depending on the data type, existing embeddings are checked for.
if chunker.data_type.value in [item.value for item in DirectDataType]:
# DirectDataTypes can't be updated.
# Think of a text:
# Either it's the same, then it won't change, so it's not an update.
# Or it's different, then it will be added as a new text.
return None
elif chunker.data_type.value in [item.value for item in IndirectDataType]:
# These types have an indirect source reference
# As long as the reference is the same, they can be updated.
where = {"url": src}
if chunker.data_type == DataType.JSON and is_valid_json_string(src):
url = hashlib.sha256((src).encode("utf-8")).hexdigest()
where = {"url": url}
if self.config.id is not None:
where.update({"app_id": self.config.id})
existing_embeddings = self.db.get(
where=where,
limit=1,
)
if len(existing_embeddings.get("metadatas", [])) > 0:
return existing_embeddings["metadatas"][0]["doc_id"]
else:
return None
elif chunker.data_type.value in [item.value for item in SpecialDataType]:
# These types don't contain indirect references.
# Through custom logic, they can be attributed to a source and be updated.
if chunker.data_type == DataType.QNA_PAIR:
# QNA_PAIRs update the answer if the question already exists.
where = {"question": src[0]}
if self.config.id is not None:
where.update({"app_id": self.config.id})
existing_embeddings = self.db.get(
where=where,
limit=1,
)
if len(existing_embeddings.get("metadatas", [])) > 0:
return existing_embeddings["metadatas"][0]["doc_id"]
else:
return None
else:
raise NotImplementedError(
f"SpecialDataType {chunker.data_type} must have a custom logic to check for existing data"
)
else:
raise TypeError(
f"{chunker.data_type} is type {type(chunker.data_type)}. "
"When it should be DirectDataType, IndirectDataType or SpecialDataType."
)
def _load_and_embed(
self,
loader: BaseLoader,
chunker: BaseChunker,
src: Any,
metadata: Optional[dict[str, Any]] = None,
source_hash: Optional[str] = None,
add_config: Optional[AddConfig] = None,
dry_run=False,
**kwargs: Optional[dict[str, Any]],
):
"""
Loads the data from the given URL, chunks it, and adds it to database.
:param loader: The loader to use to load the data.
:type loader: BaseLoader
:param chunker: The chunker to use to chunk the data.
:type chunker: BaseChunker
:param src: The data to be handled by the loader. Can be a URL for
remote sources or local content for local loaders.
:type src: Any
:param metadata: Metadata associated with the data source.
:type metadata: dict[str, Any], optional
:param source_hash: Hexadecimal hash of the source.
:type source_hash: str, optional
:param add_config: The `AddConfig` instance to use as configuration options.
:type add_config: AddConfig, optional
:param dry_run: A dry run returns chunks and doesn't update DB.
:type dry_run: bool, defaults to False
:return: (list) documents (embedded text), (list) metadata, (list) ids, (int) number of chunks
"""
existing_doc_id = self._get_existing_doc_id(chunker=chunker, src=src)
app_id = self.config.id if self.config is not None else None
# Create chunks
embeddings_data = chunker.create_chunks(loader, src, app_id=app_id, config=add_config.chunker, **kwargs)
# spread chunking results
documents = embeddings_data["documents"]
metadatas = embeddings_data["metadatas"]
ids = embeddings_data["ids"]
new_doc_id = embeddings_data["doc_id"]
if existing_doc_id and existing_doc_id == new_doc_id:
logger.info("Doc content has not changed. Skipping creating chunks and embeddings")
return [], [], [], 0
# this means that doc content has changed.
if existing_doc_id and existing_doc_id != new_doc_id:
logger.info("Doc content has changed. Recomputing chunks and embeddings intelligently.")
self.db.delete({"doc_id": existing_doc_id})
# get existing ids, and discard doc if any common id exist.
where = {"url": src}
if chunker.data_type == DataType.JSON and is_valid_json_string(src):
url = hashlib.sha256((src).encode("utf-8")).hexdigest()
where = {"url": url}
# if data type is qna_pair, we check for question
if chunker.data_type == DataType.QNA_PAIR:
where = {"question": src[0]}
if self.config.id is not None:
where["app_id"] = self.config.id
db_result = self.db.get(ids=ids, where=where) # optional filter
existing_ids = set(db_result["ids"])
if len(existing_ids):
data_dict = {id: (doc, meta) for id, doc, meta in zip(ids, documents, metadatas)}
data_dict = {id: value for id, value in data_dict.items() if id not in existing_ids}
if not data_dict:
src_copy = src
if len(src_copy) > 50:
src_copy = src[:50] + "..."
logger.info(f"All data from {src_copy} already exists in the database.")
# Make sure to return a matching return type
return [], [], [], 0
ids = list(data_dict.keys())
documents, metadatas = zip(*data_dict.values())
# Loop though all metadatas and add extras.
new_metadatas = []
for m in metadatas:
# Add app id in metadatas so that they can be queried on later
if self.config.id:
m["app_id"] = self.config.id
# Add hashed source
m["hash"] = source_hash
# Note: Metadata is the function argument
if metadata:
# Spread whatever is in metadata into the new object.
m.update(metadata)
new_metadatas.append(m)
metadatas = new_metadatas
if dry_run:
return list(documents), metadatas, ids, 0
# Count before, to calculate a delta in the end.
chunks_before_addition = self.db.count()
# Filter out empty documents and ensure they meet the API requirements
valid_documents = [doc for doc in documents if doc and isinstance(doc, str)]
documents = valid_documents
# Chunk documents into batches of 2048 and handle each batch
# helps wigth large loads of embeddings that hit OpenAI limits
document_batches = [documents[i : i + 2048] for i in range(0, len(documents), 2048)]
metadata_batches = [metadatas[i : i + 2048] for i in range(0, len(metadatas), 2048)]
id_batches = [ids[i : i + 2048] for i in range(0, len(ids), 2048)]
for batch_docs, batch_meta, batch_ids in zip(document_batches, metadata_batches, id_batches):
try:
# Add only valid batches
if batch_docs:
self.db.add(documents=batch_docs, metadatas=batch_meta, ids=batch_ids, **kwargs)
except Exception as e:
logger.info(f"Failed to add batch due to a bad request: {e}")
# Handle the error, e.g., by logging, retrying, or skipping
pass
count_new_chunks = self.db.count() - chunks_before_addition
logger.info(f"Successfully saved {str(src)[:100]} ({chunker.data_type}). New chunks count: {count_new_chunks}")
return list(documents), metadatas, ids, count_new_chunks
@staticmethod
def _format_result(results):
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
def _retrieve_from_database(
self,
input_query: str,
config: Optional[BaseLlmConfig] = None,
where=None,
citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, str, str]], list[str]]:
"""
Queries the vector database based on the given input query.
Gets relevant doc based on the query
:param input_query: The query to use.
:type input_query: str
:param config: The query configuration, defaults to None
:type config: Optional[BaseLlmConfig], optional
:param where: A dictionary of key-value pairs to filter the database results, defaults to None
:type where: _type_, optional
:param citations: A boolean to indicate if db should fetch citation source
:type citations: bool
:return: List of contents of the document that matched your query
:rtype: list[str]
"""
query_config = config or self.llm.config
if where is not None:
where = where
else:
where = {}
if query_config is not None and query_config.where is not None:
where = query_config.where
if self.config.id is not None:
where.update({"app_id": self.config.id})
contexts = self.db.query(
input_query=input_query,
n_results=query_config.number_documents,
where=where,
citations=citations,
**kwargs,
)
return contexts
def query(
self,
input_query: str,
config: BaseLlmConfig = None,
dry_run=False,
where: Optional[dict] = None,
citations: bool = False,
**kwargs: dict[str, Any],
) -> Union[tuple[str, list[tuple[str, dict]]], str, dict[str, Any]]:
"""
Queries the vector database based on the given input query.
Gets relevant doc based on the query and then passes it to an
LLM as context to get the answer.
:param input_query: The query to use.
:type input_query: str
:param config: The `BaseLlmConfig` instance to use as configuration options. This is used for one method call.
To persistently use a config, declare it during app init., defaults to None
:type config: BaseLlmConfig, optional
:param dry_run: A dry run does everything except send the resulting prompt to
the LLM. The purpose is to test the prompt, not the response., defaults to False
:type dry_run: bool, optional
:param where: A dictionary of key-value pairs to filter the database results., defaults to None
:type where: dict[str, str], optional
:param citations: A boolean to indicate if db should fetch citation source
:type citations: bool
:param kwargs: To read more params for the query function. Ex. we use citations boolean
param to return context along with the answer
:type kwargs: dict[str, Any]
:return: The answer to the query, with citations if the citation flag is True
or the dry run result
:rtype: str, if citations is False and token_usage is False, otherwise if citations is true then
tuple[str, list[tuple[str,str,str]]] and if token_usage is true then
tuple[str, list[tuple[str,str,str]], dict[str, Any]]
"""
contexts = self._retrieve_from_database(
input_query=input_query, config=config, where=where, citations=citations, **kwargs
)
if citations and len(contexts) > 0 and isinstance(contexts[0], tuple):
contexts_data_for_llm_query = list(map(lambda x: x[0], contexts))
else:
contexts_data_for_llm_query = contexts
if self.cache_config is not None:
logger.info("Cache enabled. Checking cache...")
answer = adapt(
llm_handler=self.llm.query,
cache_data_convert=gptcache_data_convert,
update_cache_callback=gptcache_update_cache_callback,
session=get_gptcache_session(session_id=self.config.id),
input_query=input_query,
contexts=contexts_data_for_llm_query,
config=config,
dry_run=dry_run,
)
else:
if self.llm.config.token_usage:
answer, token_info = self.llm.query(
input_query=input_query, contexts=contexts_data_for_llm_query, config=config, dry_run=dry_run
)
else:
answer = self.llm.query(
input_query=input_query, contexts=contexts_data_for_llm_query, config=config, dry_run=dry_run
)
# Send anonymous telemetry
if self.config.collect_metrics:
self.telemetry.capture(event_name="query", properties=self._telemetry_props)
if citations:
if self.llm.config.token_usage:
return {"answer": answer, "contexts": contexts, "usage": token_info}
return answer, contexts
if self.llm.config.token_usage:
return {"answer": answer, "usage": token_info}
logger.warning(
"Starting from v0.1.125 the return type of query method will be changed to tuple containing `answer`."
)
return answer
def chat(
self,
input_query: str,
config: Optional[BaseLlmConfig] = None,
dry_run=False,
session_id: str = "default",
where: Optional[dict[str, str]] = None,
citations: bool = False,
**kwargs: dict[str, Any],
) -> Union[tuple[str, list[tuple[str, dict]]], str, dict[str, Any]]:
"""
Queries the vector database on the given input query.
Gets relevant doc based on the query and then passes it to an
LLM as context to get the answer.
Maintains the whole conversation in memory.
:param input_query: The query to use.
:type input_query: str
:param config: The `BaseLlmConfig` instance to use as configuration options. This is used for one method call.
To persistently use a config, declare it during app init., defaults to None
:type config: BaseLlmConfig, optional
:param dry_run: A dry run does everything except send the resulting prompt to
the LLM. The purpose is to test the prompt, not the response., defaults to False
:type dry_run: bool, optional
:param session_id: The session id to use for chat history, defaults to 'default'.
:type session_id: str, optional
:param where: A dictionary of key-value pairs to filter the database results., defaults to None
:type where: dict[str, str], optional
:param citations: A boolean to indicate if db should fetch citation source
:type citations: bool
:param kwargs: To read more params for the query function. Ex. we use citations boolean
param to return context along with the answer
:type kwargs: dict[str, Any]
:return: The answer to the query, with citations if the citation flag is True
or the dry run result
:rtype: str, if citations is False and token_usage is False, otherwise if citations is true then
tuple[str, list[tuple[str,str,str]]] and if token_usage is true then
tuple[str, list[tuple[str,str,str]], dict[str, Any]]
"""
contexts = self._retrieve_from_database(
input_query=input_query, config=config, where=where, citations=citations, **kwargs
)
if citations and len(contexts) > 0 and isinstance(contexts[0], tuple):
contexts_data_for_llm_query = list(map(lambda x: x[0], contexts))
else:
contexts_data_for_llm_query = contexts
memories = None
if self.mem0_memory:
memories = self.mem0_memory.search(
query=input_query, agent_id=self.config.id, user_id=session_id, limit=self.memory_config.top_k
)
# Update the history beforehand so that we can handle multiple chat sessions in the same python session
self.llm.update_history(app_id=self.config.id, session_id=session_id)
if self.cache_config is not None:
logger.debug("Cache enabled. Checking cache...")
cache_id = f"{session_id}--{self.config.id}"
answer = adapt(
llm_handler=self.llm.chat,
cache_data_convert=gptcache_data_convert,
update_cache_callback=gptcache_update_cache_callback,
session=get_gptcache_session(session_id=cache_id),
input_query=input_query,
contexts=contexts_data_for_llm_query,
config=config,
dry_run=dry_run,
)
else:
logger.debug("Cache disabled. Running chat without cache.")
if self.llm.config.token_usage:
answer, token_info = self.llm.query(
input_query=input_query,
contexts=contexts_data_for_llm_query,
config=config,
dry_run=dry_run,
memories=memories,
)
else:
answer = self.llm.query(
input_query=input_query,
contexts=contexts_data_for_llm_query,
config=config,
dry_run=dry_run,
memories=memories,
)
# Add to Mem0 memory if enabled
# Adding answer here because it would be much useful than input question itself
if self.mem0_memory:
self.mem0_memory.add(data=answer, agent_id=self.config.id, user_id=session_id)
# add conversation in memory
self.llm.add_history(self.config.id, input_query, answer, session_id=session_id)
# Send anonymous telemetry
if self.config.collect_metrics:
self.telemetry.capture(event_name="chat", properties=self._telemetry_props)
if citations:
if self.llm.config.token_usage:
return {"answer": answer, "contexts": contexts, "usage": token_info}
return answer, contexts
if self.llm.config.token_usage:
return {"answer": answer, "usage": token_info}
logger.warning(
"Starting from v0.1.125 the return type of query method will be changed to tuple containing `answer`."
)
return answer
def search(self, query, num_documents=3, where=None, raw_filter=None, namespace=None):
"""
Search for similar documents related to the query in the vector database.
Args:
query (str): The query to use.
num_documents (int, optional): Number of similar documents to fetch. Defaults to 3.
where (dict[str, any], optional): Filter criteria for the search.
raw_filter (dict[str, any], optional): Advanced raw filter criteria for the search.
namespace (str, optional): The namespace to search in. Defaults to None.
Raises:
ValueError: If both `raw_filter` and `where` are used simultaneously.
Returns:
list[dict]: A list of dictionaries, each containing the 'context' and 'metadata' of a document.
"""
# Send anonymous telemetry
if self.config.collect_metrics:
self.telemetry.capture(event_name="search", properties=self._telemetry_props)
if raw_filter and where:
raise ValueError("You can't use both `raw_filter` and `where` together.")
filter_type = "raw_filter" if raw_filter else "where"
filter_criteria = raw_filter if raw_filter else where
params = {
"input_query": query,
"n_results": num_documents,
"citations": True,
"app_id": self.config.id,
"namespace": namespace,
filter_type: filter_criteria,
}
return [{"context": c[0], "metadata": c[1]} for c in self.db.query(**params)]
def set_collection_name(self, name: str):
"""
Set the name of the collection. A collection is an isolated space for vectors.
Using `app.db.set_collection_name` method is preferred to this.
:param name: Name of the collection.
:type name: str
"""
self.db.set_collection_name(name)
# Create the collection if it does not exist
self.db._get_or_create_collection(name)
# TODO: Check whether it is necessary to assign to the `self.collection` attribute,
# since the main purpose is the creation.
def reset(self):
"""
Resets the database. Deletes all embeddings irreversibly.
`App` does not have to be reinitialized after using this method.
"""
try:
self.db_session.query(DataSource).filter_by(app_id=self.config.id).delete()
self.db_session.query(ChatHistory).filter_by(app_id=self.config.id).delete()
self.db_session.commit()
except Exception as e:
logger.error(f"Error deleting data sources: {e}")
self.db_session.rollback()
return None
self.db.reset()
self.delete_all_chat_history(app_id=self.config.id)
# Send anonymous telemetry
if self.config.collect_metrics:
self.telemetry.capture(event_name="reset", properties=self._telemetry_props)
def get_history(
self,
num_rounds: int = 10,
display_format: bool = True,
session_id: Optional[str] = "default",
fetch_all: bool = False,
):
history = self.llm.memory.get(
app_id=self.config.id,
session_id=session_id,
num_rounds=num_rounds,
display_format=display_format,
fetch_all=fetch_all,
)
return history
def delete_session_chat_history(self, session_id: str = "default"):
self.llm.memory.delete(app_id=self.config.id, session_id=session_id)
self.llm.update_history(app_id=self.config.id)
def delete_all_chat_history(self, app_id: str):
self.llm.memory.delete(app_id=app_id)
self.llm.update_history(app_id=app_id)
def delete(self, source_id: str):
"""
Deletes the data from the database.
:param source_hash: The hash of the source.
:type source_hash: str
"""
try:
self.db_session.query(DataSource).filter_by(hash=source_id, app_id=self.config.id).delete()
self.db_session.commit()
except Exception as e:
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | true |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/cache.py | embedchain/embedchain/cache.py | import logging
import os # noqa: F401
from typing import Any
from gptcache import cache # noqa: F401
from gptcache.adapter.adapter import adapt # noqa: F401
from gptcache.config import Config # noqa: F401
from gptcache.manager import get_data_manager
from gptcache.manager.scalar_data.base import Answer
from gptcache.manager.scalar_data.base import DataType as CacheDataType
from gptcache.session import Session
from gptcache.similarity_evaluation.distance import ( # noqa: F401
SearchDistanceEvaluation,
)
from gptcache.similarity_evaluation.exact_match import ( # noqa: F401
ExactMatchEvaluation,
)
logger = logging.getLogger(__name__)
def gptcache_pre_function(data: dict[str, Any], **params: dict[str, Any]):
return data["input_query"]
def gptcache_data_manager(vector_dimension):
return get_data_manager(cache_base="sqlite", vector_base="chromadb", max_size=1000, eviction="LRU")
def gptcache_data_convert(cache_data):
logger.info("[Cache] Cache hit, returning cache data...")
return cache_data
def gptcache_update_cache_callback(llm_data, update_cache_func, *args, **kwargs):
logger.info("[Cache] Cache missed, updating cache...")
update_cache_func(Answer(llm_data, CacheDataType.STR))
return llm_data
def _gptcache_session_hit_func(cur_session_id: str, cache_session_ids: list, cache_questions: list, cache_answer: str):
return cur_session_id in cache_session_ids
def get_gptcache_session(session_id: str):
return Session(name=session_id, check_hit_func=_gptcache_session_hit_func)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/app.py | embedchain/embedchain/app.py | import ast
import concurrent.futures
import json
import logging
import os
from typing import Any, Optional, Union
import requests
import yaml
from tqdm import tqdm
from embedchain.cache import (
Config,
ExactMatchEvaluation,
SearchDistanceEvaluation,
cache,
gptcache_data_manager,
gptcache_pre_function,
)
from embedchain.client import Client
from embedchain.config import AppConfig, CacheConfig, ChunkerConfig, Mem0Config
from embedchain.core.db.database import get_session
from embedchain.core.db.models import DataSource
from embedchain.embedchain import EmbedChain
from embedchain.embedder.base import BaseEmbedder
from embedchain.embedder.openai import OpenAIEmbedder
from embedchain.evaluation.base import BaseMetric
from embedchain.evaluation.metrics import (
AnswerRelevance,
ContextRelevance,
Groundedness,
)
from embedchain.factory import EmbedderFactory, LlmFactory, VectorDBFactory
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
from embedchain.llm.openai import OpenAILlm
from embedchain.telemetry.posthog import AnonymousTelemetry
from embedchain.utils.evaluation import EvalData, EvalMetric
from embedchain.utils.misc import validate_config
from embedchain.vectordb.base import BaseVectorDB
from embedchain.vectordb.chroma import ChromaDB
from mem0 import Memory
logger = logging.getLogger(__name__)
@register_deserializable
class App(EmbedChain):
"""
EmbedChain App lets you create a LLM powered app for your unstructured
data by defining your chosen data source, embedding model,
and vector database.
"""
def __init__(
self,
id: str = None,
name: str = None,
config: AppConfig = None,
db: BaseVectorDB = None,
embedding_model: BaseEmbedder = None,
llm: BaseLlm = None,
config_data: dict = None,
auto_deploy: bool = False,
chunker: ChunkerConfig = None,
cache_config: CacheConfig = None,
memory_config: Mem0Config = None,
log_level: int = logging.WARN,
):
"""
Initialize a new `App` instance.
:param config: Configuration for the pipeline, defaults to None
:type config: AppConfig, optional
:param db: The database to use for storing and retrieving embeddings, defaults to None
:type db: BaseVectorDB, optional
:param embedding_model: The embedding model used to calculate embeddings, defaults to None
:type embedding_model: BaseEmbedder, optional
:param llm: The LLM model used to calculate embeddings, defaults to None
:type llm: BaseLlm, optional
:param config_data: Config dictionary, defaults to None
:type config_data: dict, optional
:param auto_deploy: Whether to deploy the pipeline automatically, defaults to False
:type auto_deploy: bool, optional
:raises Exception: If an error occurs while creating the pipeline
"""
if id and config_data:
raise Exception("Cannot provide both id and config. Please provide only one of them.")
if id and name:
raise Exception("Cannot provide both id and name. Please provide only one of them.")
if name and config:
raise Exception("Cannot provide both name and config. Please provide only one of them.")
self.auto_deploy = auto_deploy
# Store the dict config as an attribute to be able to send it
self.config_data = config_data if (config_data and validate_config(config_data)) else None
self.client = None
# pipeline_id from the backend
self.id = None
self.chunker = ChunkerConfig(**chunker) if chunker else None
self.cache_config = cache_config
self.memory_config = memory_config
self.config = config or AppConfig()
self.name = self.config.name
self.config.id = self.local_id = "default-app-id" if self.config.id is None else self.config.id
if id is not None:
# Init client first since user is trying to fetch the pipeline
# details from the platform
self._init_client()
pipeline_details = self._get_pipeline(id)
self.config.id = self.local_id = pipeline_details["metadata"]["local_id"]
self.id = id
if name is not None:
self.name = name
self.embedding_model = embedding_model or OpenAIEmbedder()
self.db = db or ChromaDB()
self.llm = llm or OpenAILlm()
self._init_db()
# Session for the metadata db
self.db_session = get_session()
# If cache_config is provided, initializing the cache ...
if self.cache_config is not None:
self._init_cache()
# If memory_config is provided, initializing the memory ...
self.mem0_memory = None
if self.memory_config is not None:
self.mem0_memory = Memory()
# Send anonymous telemetry
self._telemetry_props = {"class": self.__class__.__name__}
self.telemetry = AnonymousTelemetry(enabled=self.config.collect_metrics)
self.telemetry.capture(event_name="init", properties=self._telemetry_props)
self.user_asks = []
if self.auto_deploy:
self.deploy()
def _init_db(self):
"""
Initialize the database.
"""
self.db._set_embedder(self.embedding_model)
self.db._initialize()
self.db.set_collection_name(self.db.config.collection_name)
def _init_cache(self):
if self.cache_config.similarity_eval_config.strategy == "exact":
similarity_eval_func = ExactMatchEvaluation()
else:
similarity_eval_func = SearchDistanceEvaluation(
max_distance=self.cache_config.similarity_eval_config.max_distance,
positive=self.cache_config.similarity_eval_config.positive,
)
cache.init(
pre_embedding_func=gptcache_pre_function,
embedding_func=self.embedding_model.to_embeddings,
data_manager=gptcache_data_manager(vector_dimension=self.embedding_model.vector_dimension),
similarity_evaluation=similarity_eval_func,
config=Config(**self.cache_config.init_config.as_dict()),
)
def _init_client(self):
"""
Initialize the client.
"""
config = Client.load_config()
if config.get("api_key"):
self.client = Client()
else:
api_key = input(
"🔑 Enter your Embedchain API key. You can find the API key at https://app.embedchain.ai/settings/keys/ \n" # noqa: E501
)
self.client = Client(api_key=api_key)
def _get_pipeline(self, id):
"""
Get existing pipeline
"""
print("🛠️ Fetching pipeline details from the platform...")
url = f"{self.client.host}/api/v1/pipelines/{id}/cli/"
r = requests.get(
url,
headers={"Authorization": f"Token {self.client.api_key}"},
)
if r.status_code == 404:
raise Exception(f"❌ Pipeline with id {id} not found!")
print(
f"🎉 Pipeline loaded successfully! Pipeline url: https://app.embedchain.ai/pipelines/{r.json()['id']}\n" # noqa: E501
)
return r.json()
def _create_pipeline(self):
"""
Create a pipeline on the platform.
"""
print("🛠️ Creating pipeline on the platform...")
# self.config_data is a dict. Pass it inside the key 'yaml_config' to the backend
payload = {
"yaml_config": json.dumps(self.config_data),
"name": self.name,
"local_id": self.local_id,
}
url = f"{self.client.host}/api/v1/pipelines/cli/create/"
r = requests.post(
url,
json=payload,
headers={"Authorization": f"Token {self.client.api_key}"},
)
if r.status_code not in [200, 201]:
raise Exception(f"❌ Error occurred while creating pipeline. API response: {r.text}")
if r.status_code == 200:
print(
f"🎉🎉🎉 Existing pipeline found! View your pipeline: https://app.embedchain.ai/pipelines/{r.json()['id']}\n" # noqa: E501
) # noqa: E501
elif r.status_code == 201:
print(
f"🎉🎉🎉 Pipeline created successfully! View your pipeline: https://app.embedchain.ai/pipelines/{r.json()['id']}\n" # noqa: E501
)
return r.json()
def _get_presigned_url(self, data_type, data_value):
payload = {"data_type": data_type, "data_value": data_value}
r = requests.post(
f"{self.client.host}/api/v1/pipelines/{self.id}/cli/presigned_url/",
json=payload,
headers={"Authorization": f"Token {self.client.api_key}"},
)
r.raise_for_status()
return r.json()
def _upload_file_to_presigned_url(self, presigned_url, file_path):
try:
with open(file_path, "rb") as file:
response = requests.put(presigned_url, data=file)
response.raise_for_status()
return response.status_code == 200
except Exception as e:
logger.exception(f"Error occurred during file upload: {str(e)}")
print("❌ Error occurred during file upload!")
return False
def _upload_data_to_pipeline(self, data_type, data_value, metadata=None):
payload = {
"data_type": data_type,
"data_value": data_value,
"metadata": metadata,
}
try:
self._send_api_request(f"/api/v1/pipelines/{self.id}/cli/add/", payload)
# print the local file path if user tries to upload a local file
printed_value = metadata.get("file_path") if metadata.get("file_path") else data_value
print(f"✅ Data of type: {data_type}, value: {printed_value} added successfully.")
except Exception as e:
print(f"❌ Error occurred during data upload for type {data_type}!. Error: {str(e)}")
def _send_api_request(self, endpoint, payload):
url = f"{self.client.host}{endpoint}"
headers = {"Authorization": f"Token {self.client.api_key}"}
response = requests.post(url, json=payload, headers=headers)
response.raise_for_status()
return response
def _process_and_upload_data(self, data_hash, data_type, data_value):
if os.path.isabs(data_value):
presigned_url_data = self._get_presigned_url(data_type, data_value)
presigned_url = presigned_url_data["presigned_url"]
s3_key = presigned_url_data["s3_key"]
if self._upload_file_to_presigned_url(presigned_url, file_path=data_value):
metadata = {"file_path": data_value, "s3_key": s3_key}
data_value = presigned_url
else:
logger.error(f"File upload failed for hash: {data_hash}")
return False
else:
if data_type == "qna_pair":
data_value = list(ast.literal_eval(data_value))
metadata = {}
try:
self._upload_data_to_pipeline(data_type, data_value, metadata)
self._mark_data_as_uploaded(data_hash)
return True
except Exception:
print(f"❌ Error occurred during data upload for hash {data_hash}!")
return False
def _mark_data_as_uploaded(self, data_hash):
self.db_session.query(DataSource).filter_by(hash=data_hash, app_id=self.local_id).update({"is_uploaded": 1})
def get_data_sources(self):
data_sources = self.db_session.query(DataSource).filter_by(app_id=self.local_id).all()
results = []
for row in data_sources:
results.append({"data_type": row.type, "data_value": row.value, "metadata": row.meta_data})
return results
def deploy(self):
if self.client is None:
self._init_client()
pipeline_data = self._create_pipeline()
self.id = pipeline_data["id"]
results = self.db_session.query(DataSource).filter_by(app_id=self.local_id, is_uploaded=0).all()
if len(results) > 0:
print("🛠️ Adding data to your pipeline...")
for result in results:
data_hash, data_type, data_value = result.hash, result.data_type, result.data_value
self._process_and_upload_data(data_hash, data_type, data_value)
# Send anonymous telemetry
self.telemetry.capture(event_name="deploy", properties=self._telemetry_props)
@classmethod
def from_config(
cls,
config_path: Optional[str] = None,
config: Optional[dict[str, Any]] = None,
auto_deploy: bool = False,
yaml_path: Optional[str] = None,
):
"""
Instantiate a App object from a configuration.
:param config_path: Path to the YAML or JSON configuration file.
:type config_path: Optional[str]
:param config: A dictionary containing the configuration.
:type config: Optional[dict[str, Any]]
:param auto_deploy: Whether to deploy the app automatically, defaults to False
:type auto_deploy: bool, optional
:param yaml_path: (Deprecated) Path to the YAML configuration file. Use config_path instead.
:type yaml_path: Optional[str]
:return: An instance of the App class.
:rtype: App
"""
# Backward compatibility for yaml_path
if yaml_path and not config_path:
config_path = yaml_path
if config_path and config:
raise ValueError("Please provide only one of config_path or config.")
config_data = None
if config_path:
file_extension = os.path.splitext(config_path)[1]
with open(config_path, "r", encoding="UTF-8") as file:
if file_extension in [".yaml", ".yml"]:
config_data = yaml.safe_load(file)
elif file_extension == ".json":
config_data = json.load(file)
else:
raise ValueError("config_path must be a path to a YAML or JSON file.")
elif config and isinstance(config, dict):
config_data = config
else:
logger.error(
"Please provide either a config file path (YAML or JSON) or a config dictionary. Falling back to defaults because no config is provided.", # noqa: E501
)
config_data = {}
# Validate the config
validate_config(config_data)
app_config_data = config_data.get("app", {}).get("config", {})
vector_db_config_data = config_data.get("vectordb", {})
embedding_model_config_data = config_data.get("embedding_model", config_data.get("embedder", {}))
memory_config_data = config_data.get("memory", {})
llm_config_data = config_data.get("llm", {})
chunker_config_data = config_data.get("chunker", {})
cache_config_data = config_data.get("cache", None)
app_config = AppConfig(**app_config_data)
memory_config = Mem0Config(**memory_config_data) if memory_config_data else None
vector_db_provider = vector_db_config_data.get("provider", "chroma")
vector_db = VectorDBFactory.create(vector_db_provider, vector_db_config_data.get("config", {}))
if llm_config_data:
llm_provider = llm_config_data.get("provider", "openai")
llm = LlmFactory.create(llm_provider, llm_config_data.get("config", {}))
else:
llm = None
embedding_model_provider = embedding_model_config_data.get("provider", "openai")
embedding_model = EmbedderFactory.create(
embedding_model_provider, embedding_model_config_data.get("config", {})
)
if cache_config_data is not None:
cache_config = CacheConfig.from_config(cache_config_data)
else:
cache_config = None
return cls(
config=app_config,
llm=llm,
db=vector_db,
embedding_model=embedding_model,
config_data=config_data,
auto_deploy=auto_deploy,
chunker=chunker_config_data,
cache_config=cache_config,
memory_config=memory_config,
)
def _eval(self, dataset: list[EvalData], metric: Union[BaseMetric, str]):
"""
Evaluate the app on a dataset for a given metric.
"""
metric_str = metric.name if isinstance(metric, BaseMetric) else metric
eval_class_map = {
EvalMetric.CONTEXT_RELEVANCY.value: ContextRelevance,
EvalMetric.ANSWER_RELEVANCY.value: AnswerRelevance,
EvalMetric.GROUNDEDNESS.value: Groundedness,
}
if metric_str in eval_class_map:
return eval_class_map[metric_str]().evaluate(dataset)
# Handle the case for custom metrics
if isinstance(metric, BaseMetric):
return metric.evaluate(dataset)
else:
raise ValueError(f"Invalid metric: {metric}")
def evaluate(
self,
questions: Union[str, list[str]],
metrics: Optional[list[Union[BaseMetric, str]]] = None,
num_workers: int = 4,
):
"""
Evaluate the app on a question.
param: questions: A question or a list of questions to evaluate.
type: questions: Union[str, list[str]]
param: metrics: A list of metrics to evaluate. Defaults to all metrics.
type: metrics: Optional[list[Union[BaseMetric, str]]]
param: num_workers: Number of workers to use for parallel processing.
type: num_workers: int
return: A dictionary containing the evaluation results.
rtype: dict
"""
if "OPENAI_API_KEY" not in os.environ:
raise ValueError("Please set the OPENAI_API_KEY environment variable with permission to use `gpt4` model.")
queries, answers, contexts = [], [], []
if isinstance(questions, list):
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
future_to_data = {executor.submit(self.query, q, citations=True): q for q in questions}
for future in tqdm(
concurrent.futures.as_completed(future_to_data),
total=len(future_to_data),
desc="Getting answer and contexts for questions",
):
question = future_to_data[future]
queries.append(question)
answer, context = future.result()
answers.append(answer)
contexts.append(list(map(lambda x: x[0], context)))
else:
answer, context = self.query(questions, citations=True)
queries = [questions]
answers = [answer]
contexts = [list(map(lambda x: x[0], context))]
metrics = metrics or [
EvalMetric.CONTEXT_RELEVANCY.value,
EvalMetric.ANSWER_RELEVANCY.value,
EvalMetric.GROUNDEDNESS.value,
]
logger.info(f"Collecting data from {len(queries)} questions for evaluation...")
dataset = []
for q, a, c in zip(queries, answers, contexts):
dataset.append(EvalData(question=q, answer=a, contexts=c))
logger.info(f"Evaluating {len(dataset)} data points...")
result = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
future_to_metric = {executor.submit(self._eval, dataset, metric): metric for metric in metrics}
for future in tqdm(
concurrent.futures.as_completed(future_to_metric),
total=len(future_to_metric),
desc="Evaluating metrics",
):
metric = future_to_metric[future]
if isinstance(metric, BaseMetric):
result[metric.name] = future.result()
else:
result[metric] = future.result()
if self.config.collect_metrics:
telemetry_props = self._telemetry_props
metrics_names = []
for metric in metrics:
if isinstance(metric, BaseMetric):
metrics_names.append(metric.name)
else:
metrics_names.append(metric)
telemetry_props["metrics"] = metrics_names
self.telemetry.capture(event_name="evaluate", properties=telemetry_props)
return result
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/vectordb/lancedb.py | embedchain/embedchain/vectordb/lancedb.py | from typing import Any, Dict, List, Optional, Union
import pyarrow as pa
try:
import lancedb
except ImportError:
raise ImportError('LanceDB is required. Install with pip install "embedchain[lancedb]"') from None
from embedchain.config.vector_db.lancedb import LanceDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.vectordb.base import BaseVectorDB
@register_deserializable
class LanceDB(BaseVectorDB):
"""
LanceDB as vector database
"""
def __init__(
self,
config: Optional[LanceDBConfig] = None,
):
"""LanceDB as vector database.
:param config: LanceDB database config, defaults to None
:type config: LanceDBConfig, optional
"""
if config:
self.config = config
else:
self.config = LanceDBConfig()
self.client = lancedb.connect(self.config.dir or "~/.lancedb")
self.embedder_check = True
super().__init__(config=self.config)
def _initialize(self):
"""
This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
"""
if not self.embedder:
raise ValueError(
"Embedder not set. Please set an embedder with `_set_embedder()` function before initialization."
)
else:
# check embedder function is working or not
try:
self.embedder.embedding_fn("Hello LanceDB")
except Exception:
self.embedder_check = False
self._get_or_create_collection(self.config.collection_name)
def _get_or_create_db(self):
"""
Called during initialization
"""
return self.client
def _generate_where_clause(self, where: Dict[str, any]) -> str:
"""
This method generate where clause using dictionary containing attributes and their values
"""
where_filters = ""
if len(list(where.keys())) == 1:
where_filters = f"{list(where.keys())[0]} = {list(where.values())[0]}"
return where_filters
where_items = list(where.items())
where_count = len(where_items)
for i, (key, value) in enumerate(where_items, start=1):
condition = f"{key} = {value} AND "
where_filters += condition
if i == where_count:
condition = f"{key} = {value}"
where_filters += condition
return where_filters
def _get_or_create_collection(self, table_name: str, reset=False):
"""
Get or create a named collection.
:param name: Name of the collection
:type name: str
:return: Created collection
:rtype: Collection
"""
if not self.embedder_check:
schema = pa.schema(
[
pa.field("doc", pa.string()),
pa.field("metadata", pa.string()),
pa.field("id", pa.string()),
]
)
else:
schema = pa.schema(
[
pa.field("vector", pa.list_(pa.float32(), list_size=self.embedder.vector_dimension)),
pa.field("doc", pa.string()),
pa.field("metadata", pa.string()),
pa.field("id", pa.string()),
]
)
if not reset:
if table_name not in self.client.table_names():
self.collection = self.client.create_table(table_name, schema=schema)
else:
self.client.drop_table(table_name)
self.collection = self.client.create_table(table_name, schema=schema)
self.collection = self.client[table_name]
return self.collection
def get(self, ids: Optional[List[str]] = None, where: Optional[Dict[str, any]] = None, limit: Optional[int] = None):
"""
Get existing doc ids present in vector database
:param ids: list of doc ids to check for existence
:type ids: List[str]
:param where: Optional. to filter data
:type where: Dict[str, Any]
:param limit: Optional. maximum number of documents
:type limit: Optional[int]
:return: Existing documents.
:rtype: List[str]
"""
if limit is not None:
max_limit = limit
else:
max_limit = 3
results = {"ids": [], "metadatas": []}
where_clause = {}
if where:
where_clause = self._generate_where_clause(where)
if ids is not None:
records = (
self.collection.to_lance().scanner(filter=f"id IN {tuple(ids)}", columns=["id"]).to_table().to_pydict()
)
for id in records["id"]:
if where is not None:
result = (
self.collection.search(query=id, vector_column_name="id")
.where(where_clause)
.limit(max_limit)
.to_list()
)
else:
result = self.collection.search(query=id, vector_column_name="id").limit(max_limit).to_list()
results["ids"] = [r["id"] for r in result]
results["metadatas"] = [r["metadata"] for r in result]
return results
def add(
self,
documents: List[str],
metadatas: List[object],
ids: List[str],
) -> Any:
"""
Add vectors to lancedb database
:param documents: Documents
:type documents: List[str]
:param metadatas: Metadatas
:type metadatas: List[object]
:param ids: ids
:type ids: List[str]
"""
data = []
to_ingest = list(zip(documents, metadatas, ids))
if not self.embedder_check:
for doc, meta, id in to_ingest:
temp = {}
temp["doc"] = doc
temp["metadata"] = str(meta)
temp["id"] = id
data.append(temp)
else:
for doc, meta, id in to_ingest:
temp = {}
temp["doc"] = doc
temp["vector"] = self.embedder.embedding_fn([doc])[0]
temp["metadata"] = str(meta)
temp["id"] = id
data.append(temp)
self.collection.add(data=data)
def _format_result(self, results) -> list:
"""
Format LanceDB results
:param results: LanceDB query results to format.
:type results: QueryResult
:return: Formatted results
:rtype: list[tuple[Document, float]]
"""
return results.tolist()
def query(
self,
input_query: str,
n_results: int = 3,
where: Optional[dict[str, any]] = None,
raw_filter: Optional[dict[str, any]] = None,
citations: bool = False,
**kwargs: Optional[dict[str, any]],
) -> Union[list[tuple[str, dict]], list[str]]:
"""
Query contents from vector database based on vector similarity
:param input_query: query string
:type input_query: str
:param n_results: no of similar documents to fetch from database
:type n_results: int
:param where: to filter data
:type where: dict[str, Any]
:param raw_filter: Raw filter to apply
:type raw_filter: dict[str, Any]
:param citations: we use citations boolean param to return context along with the answer.
:type citations: bool, default is False.
:raises InvalidDimensionException: Dimensions do not match.
:return: The content of the document that matched your query,
along with url of the source and doc_id (if citations flag is true)
:rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
"""
if where and raw_filter:
raise ValueError("Both `where` and `raw_filter` cannot be used together.")
try:
query_embedding = self.embedder.embedding_fn(input_query)[0]
result = self.collection.search(query_embedding).limit(n_results).to_list()
except Exception as e:
e.message()
results_formatted = result
contexts = []
for result in results_formatted:
if citations:
metadata = result["metadata"]
contexts.append((result["doc"], metadata))
else:
contexts.append(result["doc"])
return contexts
def set_collection_name(self, name: str):
"""
Set the name of the collection. A collection is an isolated space for vectors.
:param name: Name of the collection.
:type name: str
"""
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
self._get_or_create_collection(self.config.collection_name)
def count(self) -> int:
"""
Count number of documents/chunks embedded in the database.
:return: number of documents
:rtype: int
"""
return self.collection.count_rows()
def delete(self, where):
return self.collection.delete(where=where)
def reset(self):
"""
Resets the database. Deletes all embeddings irreversibly.
"""
# Delete all data from the collection and recreate collection
if self.config.allow_reset:
try:
self._get_or_create_collection(self.config.collection_name, reset=True)
except ValueError:
raise ValueError(
"For safety reasons, resetting is disabled. "
"Please enable it by setting `allow_reset=True` in your LanceDbConfig"
) from None
# Recreate
else:
print(
"For safety reasons, resetting is disabled. "
"Please enable it by setting `allow_reset=True` in your LanceDbConfig"
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/vectordb/opensearch.py | embedchain/embedchain/vectordb/opensearch.py | import logging
import time
from typing import Any, Optional, Union
from tqdm import tqdm
try:
from opensearchpy import OpenSearch
from opensearchpy.helpers import bulk
except ImportError:
raise ImportError(
"OpenSearch requires extra dependencies. Install with `pip install --upgrade embedchain[opensearch]`"
) from None
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores import OpenSearchVectorSearch
from embedchain.config import OpenSearchDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.vectordb.base import BaseVectorDB
logger = logging.getLogger(__name__)
@register_deserializable
class OpenSearchDB(BaseVectorDB):
"""
OpenSearch as vector database
"""
def __init__(self, config: OpenSearchDBConfig):
"""OpenSearch as vector database.
:param config: OpenSearch domain config
:type config: OpenSearchDBConfig
"""
if config is None:
raise ValueError("OpenSearchDBConfig is required")
self.config = config
self.batch_size = self.config.batch_size
self.client = OpenSearch(
hosts=[self.config.opensearch_url],
http_auth=self.config.http_auth,
**self.config.extra_params,
)
info = self.client.info()
logger.info(f"Connected to {info['version']['distribution']}. Version: {info['version']['number']}")
# Remove auth credentials from config after successful connection
super().__init__(config=self.config)
def _initialize(self):
logger.info(self.client.info())
index_name = self._get_index()
if self.client.indices.exists(index=index_name):
print(f"Index '{index_name}' already exists.")
return
index_body = {
"settings": {"knn": True},
"mappings": {
"properties": {
"text": {"type": "text"},
"embeddings": {
"type": "knn_vector",
"index": False,
"dimension": self.config.vector_dimension,
},
}
},
}
self.client.indices.create(index_name, body=index_body)
print(self.client.indices.get(index_name))
def _get_or_create_db(self):
"""Called during initialization"""
return self.client
def _get_or_create_collection(self, name):
"""Note: nothing to return here. Discuss later"""
def get(
self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None
) -> set[str]:
"""
Get existing doc ids present in vector database
:param ids: _list of doc ids to check for existence
:type ids: list[str]
:param where: to filter data
:type where: dict[str, any]
:return: ids
:type: set[str]
"""
query = {}
if ids:
query["query"] = {"bool": {"must": [{"ids": {"values": ids}}]}}
else:
query["query"] = {"bool": {"must": []}}
if where:
for key, value in where.items():
query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
# OpenSearch syntax is different from Elasticsearch
response = self.client.search(index=self._get_index(), body=query, _source=True, size=limit)
docs = response["hits"]["hits"]
ids = [doc["_id"] for doc in docs]
doc_ids = [doc["_source"]["metadata"]["doc_id"] for doc in docs]
# Result is modified for compatibility with other vector databases
# TODO: Add method in vector database to return result in a standard format
result = {"ids": ids, "metadatas": []}
for doc_id in doc_ids:
result["metadatas"].append({"doc_id": doc_id})
return result
def add(self, documents: list[str], metadatas: list[object], ids: list[str], **kwargs: Optional[dict[str, any]]):
"""Adds documents to the opensearch index"""
embeddings = self.embedder.embedding_fn(documents)
for batch_start in tqdm(range(0, len(documents), self.batch_size), desc="Inserting batches in opensearch"):
batch_end = batch_start + self.batch_size
batch_documents = documents[batch_start:batch_end]
batch_embeddings = embeddings[batch_start:batch_end]
# Create document entries for bulk upload
batch_entries = [
{
"_index": self._get_index(),
"_id": doc_id,
"_source": {"text": text, "metadata": metadata, "embeddings": embedding},
}
for doc_id, text, metadata, embedding in zip(
ids[batch_start:batch_end], batch_documents, metadatas[batch_start:batch_end], batch_embeddings
)
]
# Perform bulk operation
bulk(self.client, batch_entries, **kwargs)
self.client.indices.refresh(index=self._get_index())
# Sleep to avoid rate limiting
time.sleep(0.1)
def query(
self,
input_query: str,
n_results: int,
where: dict[str, any],
citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, dict]], list[str]]:
"""
query contents from vector database based on vector similarity
:param input_query: query string
:type input_query: str
:param n_results: no of similar documents to fetch from database
:type n_results: int
:param where: Optional. to filter data
:type where: dict[str, any]
:param citations: we use citations boolean param to return context along with the answer.
:type citations: bool, default is False.
:return: The content of the document that matched your query,
along with url of the source and doc_id (if citations flag is true)
:rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
"""
embeddings = OpenAIEmbeddings()
docsearch = OpenSearchVectorSearch(
index_name=self._get_index(),
embedding_function=embeddings,
opensearch_url=f"{self.config.opensearch_url}",
http_auth=self.config.http_auth,
use_ssl=hasattr(self.config, "use_ssl") and self.config.use_ssl,
verify_certs=hasattr(self.config, "verify_certs") and self.config.verify_certs,
)
pre_filter = {"match_all": {}} # default
if len(where) > 0:
pre_filter = {"bool": {"must": []}}
for key, value in where.items():
pre_filter["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
docs = docsearch.similarity_search_with_score(
input_query,
search_type="script_scoring",
space_type="cosinesimil",
vector_field="embeddings",
text_field="text",
metadata_field="metadata",
pre_filter=pre_filter,
k=n_results,
**kwargs,
)
contexts = []
for doc, score in docs:
context = doc.page_content
if citations:
metadata = doc.metadata
metadata["score"] = score
contexts.append(tuple((context, metadata)))
else:
contexts.append(context)
return contexts
def set_collection_name(self, name: str):
"""
Set the name of the collection. A collection is an isolated space for vectors.
:param name: Name of the collection.
:type name: str
"""
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def count(self) -> int:
"""
Count number of documents/chunks embedded in the database.
:return: number of documents
:rtype: int
"""
query = {"query": {"match_all": {}}}
response = self.client.count(index=self._get_index(), body=query)
doc_count = response["count"]
return doc_count
def reset(self):
"""
Resets the database. Deletes all embeddings irreversibly.
"""
# Delete all data from the database
if self.client.indices.exists(index=self._get_index()):
# delete index in ES
self.client.indices.delete(index=self._get_index())
def delete(self, where):
"""Deletes a document from the OpenSearch index"""
query = {"query": {"bool": {"must": []}}}
for key, value in where.items():
query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
self.client.delete_by_query(index=self._get_index(), body=query)
def _get_index(self) -> str:
"""Get the OpenSearch index for a collection
:return: OpenSearch index
:rtype: str
"""
return self.config.collection_name
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/vectordb/chroma.py | embedchain/embedchain/vectordb/chroma.py | import logging
from typing import Any, Optional, Union
from chromadb import Collection, QueryResult
from langchain.docstore.document import Document
from tqdm import tqdm
from embedchain.config import ChromaDbConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.vectordb.base import BaseVectorDB
try:
import chromadb
from chromadb.config import Settings
from chromadb.errors import InvalidDimensionException
except RuntimeError:
from embedchain.utils.misc import use_pysqlite3
use_pysqlite3()
import chromadb
from chromadb.config import Settings
from chromadb.errors import InvalidDimensionException
logger = logging.getLogger(__name__)
@register_deserializable
class ChromaDB(BaseVectorDB):
"""Vector database using ChromaDB."""
def __init__(self, config: Optional[ChromaDbConfig] = None):
"""Initialize a new ChromaDB instance
:param config: Configuration options for Chroma, defaults to None
:type config: Optional[ChromaDbConfig], optional
"""
if config:
self.config = config
else:
self.config = ChromaDbConfig()
self.settings = Settings(anonymized_telemetry=False)
self.settings.allow_reset = self.config.allow_reset if hasattr(self.config, "allow_reset") else False
self.batch_size = self.config.batch_size
if self.config.chroma_settings:
for key, value in self.config.chroma_settings.items():
if hasattr(self.settings, key):
setattr(self.settings, key, value)
if self.config.host and self.config.port:
logger.info(f"Connecting to ChromaDB server: {self.config.host}:{self.config.port}")
self.settings.chroma_server_host = self.config.host
self.settings.chroma_server_http_port = self.config.port
self.settings.chroma_api_impl = "chromadb.api.fastapi.FastAPI"
else:
if self.config.dir is None:
self.config.dir = "db"
self.settings.persist_directory = self.config.dir
self.settings.is_persistent = True
self.client = chromadb.Client(self.settings)
super().__init__(config=self.config)
def _initialize(self):
"""
This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
"""
if not self.embedder:
raise ValueError(
"Embedder not set. Please set an embedder with `_set_embedder()` function before initialization."
)
self._get_or_create_collection(self.config.collection_name)
def _get_or_create_db(self):
"""Called during initialization"""
return self.client
@staticmethod
def _generate_where_clause(where: dict[str, any]) -> dict[str, any]:
# If only one filter is supplied, return it as is
# (no need to wrap in $and based on chroma docs)
if where is None:
return {}
if len(where.keys()) <= 1:
return where
where_filters = []
for k, v in where.items():
if isinstance(v, str):
where_filters.append({k: v})
return {"$and": where_filters}
def _get_or_create_collection(self, name: str) -> Collection:
"""
Get or create a named collection.
:param name: Name of the collection
:type name: str
:raises ValueError: No embedder configured.
:return: Created collection
:rtype: Collection
"""
if not hasattr(self, "embedder") or not self.embedder:
raise ValueError("Cannot create a Chroma database collection without an embedder.")
self.collection = self.client.get_or_create_collection(
name=name,
embedding_function=self.embedder.embedding_fn,
)
return self.collection
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
"""
Get existing doc ids present in vector database
:param ids: list of doc ids to check for existence
:type ids: list[str]
:param where: Optional. to filter data
:type where: dict[str, Any]
:param limit: Optional. maximum number of documents
:type limit: Optional[int]
:return: Existing documents.
:rtype: list[str]
"""
args = {}
if ids:
args["ids"] = ids
if where:
args["where"] = self._generate_where_clause(where)
if limit:
args["limit"] = limit
return self.collection.get(**args)
def add(
self,
documents: list[str],
metadatas: list[object],
ids: list[str],
**kwargs: Optional[dict[str, Any]],
) -> Any:
"""
Add vectors to chroma database
:param documents: Documents
:type documents: list[str]
:param metadatas: Metadatas
:type metadatas: list[object]
:param ids: ids
:type ids: list[str]
"""
size = len(documents)
if len(documents) != size or len(metadatas) != size or len(ids) != size:
raise ValueError(
"Cannot add documents to chromadb with inconsistent sizes. Documents size: {}, Metadata size: {},"
" Ids size: {}".format(len(documents), len(metadatas), len(ids))
)
for i in tqdm(range(0, len(documents), self.batch_size), desc="Inserting batches in chromadb"):
self.collection.add(
documents=documents[i : i + self.batch_size],
metadatas=metadatas[i : i + self.batch_size],
ids=ids[i : i + self.batch_size],
)
self.config
@staticmethod
def _format_result(results: QueryResult) -> list[tuple[Document, float]]:
"""
Format Chroma results
:param results: ChromaDB query results to format.
:type results: QueryResult
:return: Formatted results
:rtype: list[tuple[Document, float]]
"""
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
def query(
self,
input_query: str,
n_results: int,
where: Optional[dict[str, any]] = None,
raw_filter: Optional[dict[str, any]] = None,
citations: bool = False,
**kwargs: Optional[dict[str, any]],
) -> Union[list[tuple[str, dict]], list[str]]:
"""
Query contents from vector database based on vector similarity
:param input_query: query string
:type input_query: str
:param n_results: no of similar documents to fetch from database
:type n_results: int
:param where: to filter data
:type where: dict[str, Any]
:param raw_filter: Raw filter to apply
:type raw_filter: dict[str, Any]
:param citations: we use citations boolean param to return context along with the answer.
:type citations: bool, default is False.
:raises InvalidDimensionException: Dimensions do not match.
:return: The content of the document that matched your query,
along with url of the source and doc_id (if citations flag is true)
:rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
"""
if where and raw_filter:
raise ValueError("Both `where` and `raw_filter` cannot be used together.")
where_clause = None
if raw_filter:
where_clause = raw_filter
if where:
where_clause = self._generate_where_clause(where)
try:
result = self.collection.query(
query_texts=[
input_query,
],
n_results=n_results,
where=where_clause,
)
except InvalidDimensionException as e:
raise InvalidDimensionException(
e.message()
+ ". This is commonly a side-effect when an embedding function, different from the one used to add the"
" embeddings, is used to retrieve an embedding from the database."
) from None
results_formatted = self._format_result(result)
contexts = []
for result in results_formatted:
context = result[0].page_content
if citations:
metadata = result[0].metadata
metadata["score"] = result[1]
contexts.append((context, metadata))
else:
contexts.append(context)
return contexts
def set_collection_name(self, name: str):
"""
Set the name of the collection. A collection is an isolated space for vectors.
:param name: Name of the collection.
:type name: str
"""
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
self._get_or_create_collection(self.config.collection_name)
def count(self) -> int:
"""
Count number of documents/chunks embedded in the database.
:return: number of documents
:rtype: int
"""
return self.collection.count()
def delete(self, where):
return self.collection.delete(where=self._generate_where_clause(where))
def reset(self):
"""
Resets the database. Deletes all embeddings irreversibly.
"""
# Delete all data from the collection
try:
self.client.delete_collection(self.config.collection_name)
except ValueError:
raise ValueError(
"For safety reasons, resetting is disabled. "
"Please enable it by setting `allow_reset=True` in your ChromaDbConfig"
) from None
# Recreate
self._get_or_create_collection(self.config.collection_name)
# Todo: Automatically recreating a collection with the same name cannot be the best way to handle a reset.
# A downside of this implementation is, if you have two instances,
# the other instance will not get the updated `self.collection` attribute.
# A better way would be to create the collection if it is called again after being reset.
# That means, checking if collection exists in the db-consuming methods, and creating it if it doesn't.
# That's an extra steps for all uses, just to satisfy a niche use case in a niche method. For now, this will do.
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/vectordb/__init__.py | embedchain/embedchain/vectordb/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/vectordb/base.py | embedchain/embedchain/vectordb/base.py | from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.helpers.json_serializable import JSONSerializable
class BaseVectorDB(JSONSerializable):
"""Base class for vector database."""
def __init__(self, config: BaseVectorDbConfig):
"""Initialize the database. Save the config and client as an attribute.
:param config: Database configuration class instance.
:type config: BaseVectorDbConfig
"""
self.client = self._get_or_create_db()
self.config: BaseVectorDbConfig = config
def _initialize(self):
"""
This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
So it's can't be done in __init__ in one step.
"""
raise NotImplementedError
def _get_or_create_db(self):
"""Get or create the database."""
raise NotImplementedError
def _get_or_create_collection(self):
"""Get or create a named collection."""
raise NotImplementedError
def _set_embedder(self, embedder: BaseEmbedder):
"""
The database needs to access the embedder sometimes, with this method you can persistently set it.
:param embedder: Embedder to be set as the embedder for this database.
:type embedder: BaseEmbedder
"""
self.embedder = embedder
def get(self):
"""Get database embeddings by id."""
raise NotImplementedError
def add(self):
"""Add to database"""
raise NotImplementedError
def query(self):
"""Query contents from vector database based on vector similarity"""
raise NotImplementedError
def count(self) -> int:
"""
Count number of documents/chunks embedded in the database.
:return: number of documents
:rtype: int
"""
raise NotImplementedError
def reset(self):
"""
Resets the database. Deletes all embeddings irreversibly.
"""
raise NotImplementedError
def set_collection_name(self, name: str):
"""
Set the name of the collection. A collection is an isolated space for vectors.
:param name: Name of the collection.
:type name: str
"""
raise NotImplementedError
def delete(self):
"""Delete from database."""
raise NotImplementedError
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/vectordb/pinecone.py | embedchain/embedchain/vectordb/pinecone.py | import logging
import os
from typing import Optional, Union
try:
import pinecone
except ImportError:
raise ImportError(
"Pinecone requires extra dependencies. Install with `pip install pinecone-text pinecone-client`"
) from None
from pinecone_text.sparse import BM25Encoder
from embedchain.config.vector_db.pinecone import PineconeDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.utils.misc import chunks
from embedchain.vectordb.base import BaseVectorDB
logger = logging.getLogger(__name__)
@register_deserializable
class PineconeDB(BaseVectorDB):
"""
Pinecone as vector database
"""
def __init__(
self,
config: Optional[PineconeDBConfig] = None,
):
"""Pinecone as vector database.
:param config: Pinecone database config, defaults to None
:type config: PineconeDBConfig, optional
:raises ValueError: No config provided
"""
if config is None:
self.config = PineconeDBConfig()
else:
if not isinstance(config, PineconeDBConfig):
raise TypeError(
"config is not a `PineconeDBConfig` instance. "
"Please make sure the type is right and that you are passing an instance."
)
self.config = config
self._setup_pinecone_index()
# Setup BM25Encoder if sparse vectors are to be used
self.bm25_encoder = None
self.batch_size = self.config.batch_size
if self.config.hybrid_search:
logger.info("Initializing BM25Encoder for sparse vectors..")
self.bm25_encoder = self.config.bm25_encoder if self.config.bm25_encoder else BM25Encoder.default()
# Call parent init here because embedder is needed
super().__init__(config=self.config)
def _initialize(self):
"""
This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
"""
if not self.embedder:
raise ValueError("Embedder not set. Please set an embedder with `set_embedder` before initialization.")
def _setup_pinecone_index(self):
"""
Loads the Pinecone index or creates it if not present.
"""
api_key = self.config.api_key or os.environ.get("PINECONE_API_KEY")
if not api_key:
raise ValueError("Please set the PINECONE_API_KEY environment variable or pass it in config.")
self.client = pinecone.Pinecone(api_key=api_key, **self.config.extra_params)
indexes = self.client.list_indexes().names()
if indexes is None or self.config.index_name not in indexes:
if self.config.pod_config:
spec = pinecone.PodSpec(**self.config.pod_config)
elif self.config.serverless_config:
spec = pinecone.ServerlessSpec(**self.config.serverless_config)
else:
raise ValueError("No pod_config or serverless_config found.")
self.client.create_index(
name=self.config.index_name,
metric=self.config.metric,
dimension=self.config.vector_dimension,
spec=spec,
)
self.pinecone_index = self.client.Index(self.config.index_name)
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
"""
Get existing doc ids present in vector database
:param ids: _list of doc ids to check for existence
:type ids: list[str]
:param where: to filter data
:type where: dict[str, any]
:return: ids
:rtype: Set[str]
"""
existing_ids = list()
metadatas = []
if ids is not None:
for i in range(0, len(ids), self.batch_size):
result = self.pinecone_index.fetch(ids=ids[i : i + self.batch_size])
vectors = result.get("vectors")
batch_existing_ids = list(vectors.keys())
existing_ids.extend(batch_existing_ids)
metadatas.extend([vectors.get(ids).get("metadata") for ids in batch_existing_ids])
return {"ids": existing_ids, "metadatas": metadatas}
def add(
self,
documents: list[str],
metadatas: list[object],
ids: list[str],
**kwargs: Optional[dict[str, any]],
):
"""add data in vector database
:param documents: list of texts to add
:type documents: list[str]
:param metadatas: list of metadata associated with docs
:type metadatas: list[object]
:param ids: ids of docs
:type ids: list[str]
"""
docs = []
embeddings = self.embedder.embedding_fn(documents)
for id, text, metadata, embedding in zip(ids, documents, metadatas, embeddings):
# Insert sparse vectors as well if the user wants to do the hybrid search
sparse_vector_dict = (
{"sparse_values": self.bm25_encoder.encode_documents(text)} if self.bm25_encoder else {}
)
docs.append(
{
"id": id,
"values": embedding,
"metadata": {**metadata, "text": text},
**sparse_vector_dict,
},
)
for chunk in chunks(docs, self.batch_size, desc="Adding chunks in batches"):
self.pinecone_index.upsert(chunk, **kwargs)
def query(
self,
input_query: str,
n_results: int,
where: Optional[dict[str, any]] = None,
raw_filter: Optional[dict[str, any]] = None,
citations: bool = False,
app_id: Optional[str] = None,
**kwargs: Optional[dict[str, any]],
) -> Union[list[tuple[str, dict]], list[str]]:
"""
Query contents from vector database based on vector similarity.
Args:
input_query (str): query string.
n_results (int): Number of similar documents to fetch from the database.
where (dict[str, any], optional): Filter criteria for the search.
raw_filter (dict[str, any], optional): Advanced raw filter criteria for the search.
citations (bool, optional): Flag to return context along with metadata. Defaults to False.
app_id (str, optional): Application ID to be passed to Pinecone.
Returns:
Union[list[tuple[str, dict]], list[str]]: List of document contexts, optionally with metadata.
"""
query_filter = raw_filter if raw_filter is not None else self._generate_filter(where)
if app_id:
query_filter["app_id"] = {"$eq": app_id}
query_vector = self.embedder.embedding_fn([input_query])[0]
params = {
"vector": query_vector,
"filter": query_filter,
"top_k": n_results,
"include_metadata": True,
**kwargs,
}
if self.bm25_encoder:
sparse_query_vector = self.bm25_encoder.encode_queries(input_query)
params["sparse_vector"] = sparse_query_vector
data = self.pinecone_index.query(**params)
return [
(metadata.get("text"), {**metadata, "score": doc.get("score")}) if citations else metadata.get("text")
for doc in data.get("matches", [])
for metadata in [doc.get("metadata", {})]
]
def set_collection_name(self, name: str):
"""
Set the name of the collection. A collection is an isolated space for vectors.
:param name: Name of the collection.
:type name: str
"""
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def count(self) -> int:
"""
Count number of documents/chunks embedded in the database.
:return: number of documents
:rtype: int
"""
data = self.pinecone_index.describe_index_stats()
return data["total_vector_count"]
def _get_or_create_db(self):
"""Called during initialization"""
return self.client
def reset(self):
"""
Resets the database. Deletes all embeddings irreversibly.
"""
# Delete all data from the database
self.client.delete_index(self.config.index_name)
self._setup_pinecone_index()
@staticmethod
def _generate_filter(where: dict):
query = {}
if where is None:
return query
for k, v in where.items():
query[k] = {"$eq": v}
return query
def delete(self, where: dict):
"""Delete from database.
:param ids: list of ids to delete
:type ids: list[str]
"""
# Deleting with filters is not supported for `starter` index type.
# Follow `https://docs.pinecone.io/docs/metadata-filtering#deleting-vectors-by-metadata-filter` for more details
db_filter = self._generate_filter(where)
try:
self.pinecone_index.delete(filter=db_filter)
except Exception as e:
print(f"Failed to delete from Pinecone: {e}")
return
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/vectordb/weaviate.py | embedchain/embedchain/vectordb/weaviate.py | import copy
import os
from typing import Optional, Union
try:
import weaviate
except ImportError:
raise ImportError(
"Weaviate requires extra dependencies. Install with `pip install --upgrade 'embedchain[weaviate]'`"
) from None
from embedchain.config.vector_db.weaviate import WeaviateDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.vectordb.base import BaseVectorDB
@register_deserializable
class WeaviateDB(BaseVectorDB):
"""
Weaviate as vector database
"""
def __init__(
self,
config: Optional[WeaviateDBConfig] = None,
):
"""Weaviate as vector database.
:param config: Weaviate database config, defaults to None
:type config: WeaviateDBConfig, optional
:raises ValueError: No config provided
"""
if config is None:
self.config = WeaviateDBConfig()
else:
if not isinstance(config, WeaviateDBConfig):
raise TypeError(
"config is not a `WeaviateDBConfig` instance. "
"Please make sure the type is right and that you are passing an instance."
)
self.config = config
self.batch_size = self.config.batch_size
self.client = weaviate.Client(
url=os.environ.get("WEAVIATE_ENDPOINT"),
auth_client_secret=weaviate.AuthApiKey(api_key=os.environ.get("WEAVIATE_API_KEY")),
**self.config.extra_params,
)
# Since weaviate uses graphQL, we need to keep track of metadata keys added in the vectordb.
# This is needed to filter data while querying.
self.metadata_keys = {"data_type", "doc_id", "url", "hash", "app_id"}
# Call parent init here because embedder is needed
super().__init__(config=self.config)
def _initialize(self):
"""
This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
"""
if not self.embedder:
raise ValueError("Embedder not set. Please set an embedder with `set_embedder` before initialization.")
self.index_name = self._get_index_name()
if not self.client.schema.exists(self.index_name):
# id is a reserved field in Weaviate, hence we had to change the name of the id field to identifier
# The none vectorizer is crucial as we have our own custom embedding function
"""
TODO: wait for weaviate to add indexing on `object[]` data-type so that we can add filter while querying.
Once that is done, change `dataType` of "metadata" field to `object[]` and update the query below.
"""
class_obj = {
"classes": [
{
"class": self.index_name,
"vectorizer": "none",
"properties": [
{
"name": "identifier",
"dataType": ["text"],
},
{
"name": "text",
"dataType": ["text"],
},
{
"name": "metadata",
"dataType": [self.index_name + "_metadata"],
},
],
},
{
"class": self.index_name + "_metadata",
"vectorizer": "none",
"properties": [
{
"name": "data_type",
"dataType": ["text"],
},
{
"name": "doc_id",
"dataType": ["text"],
},
{
"name": "url",
"dataType": ["text"],
},
{
"name": "hash",
"dataType": ["text"],
},
{
"name": "app_id",
"dataType": ["text"],
},
],
},
]
}
self.client.schema.create(class_obj)
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
"""
Get existing doc ids present in vector database
:param ids: _list of doc ids to check for existance
:type ids: list[str]
:param where: to filter data
:type where: dict[str, any]
:return: ids
:rtype: Set[str]
"""
weaviate_where_operands = []
if ids:
for doc_id in ids:
weaviate_where_operands.append({"path": ["identifier"], "operator": "Equal", "valueText": doc_id})
keys = set(where.keys() if where is not None else set())
if len(keys) > 0:
for key in keys:
weaviate_where_operands.append(
{
"path": ["metadata", self.index_name + "_metadata", key],
"operator": "Equal",
"valueText": where.get(key),
}
)
if len(weaviate_where_operands) == 1:
weaviate_where_clause = weaviate_where_operands[0]
else:
weaviate_where_clause = {"operator": "And", "operands": weaviate_where_operands}
existing_ids = []
metadatas = []
cursor = None
offset = 0
has_iterated_once = False
query_metadata_keys = self.metadata_keys.union(keys)
while cursor is not None or not has_iterated_once:
has_iterated_once = True
results = self._query_with_offset(
self.client.query.get(
self.index_name,
[
"identifier",
weaviate.LinkTo("metadata", self.index_name + "_metadata", list(query_metadata_keys)),
],
)
.with_where(weaviate_where_clause)
.with_additional(["id"])
.with_limit(limit or self.batch_size),
offset,
)
fetched_results = results["data"]["Get"].get(self.index_name, [])
if not fetched_results:
break
for result in fetched_results:
existing_ids.append(result["identifier"])
metadatas.append(result["metadata"][0])
cursor = result["_additional"]["id"]
offset += 1
if limit is not None and len(existing_ids) >= limit:
break
return {"ids": existing_ids, "metadatas": metadatas}
def add(self, documents: list[str], metadatas: list[object], ids: list[str], **kwargs: Optional[dict[str, any]]):
"""add data in vector database
:param documents: list of texts to add
:type documents: list[str]
:param metadatas: list of metadata associated with docs
:type metadatas: list[object]
:param ids: ids of docs
:type ids: list[str]
"""
embeddings = self.embedder.embedding_fn(documents)
self.client.batch.configure(batch_size=self.batch_size, timeout_retries=3) # Configure batch
with self.client.batch as batch: # Initialize a batch process
for id, text, metadata, embedding in zip(ids, documents, metadatas, embeddings):
doc = {"identifier": id, "text": text}
updated_metadata = {"text": text}
if metadata is not None:
updated_metadata.update(**metadata)
obj_uuid = batch.add_data_object(
data_object=copy.deepcopy(doc), class_name=self.index_name, vector=embedding
)
metadata_uuid = batch.add_data_object(
data_object=copy.deepcopy(updated_metadata),
class_name=self.index_name + "_metadata",
vector=embedding,
)
batch.add_reference(
obj_uuid, self.index_name, "metadata", metadata_uuid, self.index_name + "_metadata", **kwargs
)
def query(
self, input_query: str, n_results: int, where: dict[str, any], citations: bool = False
) -> Union[list[tuple[str, dict]], list[str]]:
"""
query contents from vector database based on vector similarity
:param input_query: query string
:type input_query: str
:param n_results: no of similar documents to fetch from database
:type n_results: int
:param where: Optional. to filter data
:type where: dict[str, any]
:param citations: we use citations boolean param to return context along with the answer.
:type citations: bool, default is False.
:return: The content of the document that matched your query,
along with url of the source and doc_id (if citations flag is true)
:rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
"""
query_vector = self.embedder.embedding_fn([input_query])[0]
keys = set(where.keys() if where is not None else set())
data_fields = ["text"]
query_metadata_keys = self.metadata_keys.union(keys)
if citations:
data_fields.append(weaviate.LinkTo("metadata", self.index_name + "_metadata", list(query_metadata_keys)))
if len(keys) > 0:
weaviate_where_operands = []
for key in keys:
weaviate_where_operands.append(
{
"path": ["metadata", self.index_name + "_metadata", key],
"operator": "Equal",
"valueText": where.get(key),
}
)
if len(weaviate_where_operands) == 1:
weaviate_where_clause = weaviate_where_operands[0]
else:
weaviate_where_clause = {"operator": "And", "operands": weaviate_where_operands}
results = (
self.client.query.get(self.index_name, data_fields)
.with_where(weaviate_where_clause)
.with_near_vector({"vector": query_vector})
.with_limit(n_results)
.with_additional(["distance"])
.do()
)
else:
results = (
self.client.query.get(self.index_name, data_fields)
.with_near_vector({"vector": query_vector})
.with_limit(n_results)
.with_additional(["distance"])
.do()
)
if results["data"]["Get"].get(self.index_name) is None:
return []
docs = results["data"]["Get"].get(self.index_name)
contexts = []
for doc in docs:
context = doc["text"]
if citations:
metadata = doc["metadata"][0]
score = doc["_additional"]["distance"]
metadata["score"] = score
contexts.append((context, metadata))
else:
contexts.append(context)
return contexts
def set_collection_name(self, name: str):
"""
Set the name of the collection. A collection is an isolated space for vectors.
:param name: Name of the collection.
:type name: str
"""
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def count(self) -> int:
"""
Count number of documents/chunks embedded in the database.
:return: number of documents
:rtype: int
"""
data = self.client.query.aggregate(self.index_name).with_meta_count().do()
return data["data"]["Aggregate"].get(self.index_name)[0]["meta"]["count"]
def _get_or_create_db(self):
"""Called during initialization"""
return self.client
def reset(self):
"""
Resets the database. Deletes all embeddings irreversibly.
"""
# Delete all data from the database
self.client.batch.delete_objects(
self.index_name, where={"path": ["identifier"], "operator": "Like", "valueText": ".*"}
)
# Weaviate internally by default capitalizes the class name
def _get_index_name(self) -> str:
"""Get the Weaviate index for a collection
:return: Weaviate index
:rtype: str
"""
return f"{self.config.collection_name}_{self.embedder.vector_dimension}".capitalize().replace("-", "_")
@staticmethod
def _query_with_offset(query, offset):
if offset:
query.with_offset(offset)
results = query.do()
return results
def _generate_query(self, where: dict):
weaviate_where_operands = []
for key, value in where.items():
weaviate_where_operands.append(
{
"path": ["metadata", self.index_name + "_metadata", key],
"operator": "Equal",
"valueText": value,
}
)
if len(weaviate_where_operands) == 1:
weaviate_where_clause = weaviate_where_operands[0]
else:
weaviate_where_clause = {"operator": "And", "operands": weaviate_where_operands}
return weaviate_where_clause
def delete(self, where: dict):
"""Delete from database.
:param where: to filter data
:type where: dict[str, any]
"""
query = self._generate_query(where)
self.client.batch.delete_objects(self.index_name, where=query)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/vectordb/elasticsearch.py | embedchain/embedchain/vectordb/elasticsearch.py | import logging
from typing import Any, Optional, Union
try:
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
except ImportError:
raise ImportError(
"Elasticsearch requires extra dependencies. Install with `pip install --upgrade embedchain[elasticsearch]`"
) from None
from embedchain.config import ElasticsearchDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.utils.misc import chunks
from embedchain.vectordb.base import BaseVectorDB
logger = logging.getLogger(__name__)
@register_deserializable
class ElasticsearchDB(BaseVectorDB):
"""
Elasticsearch as vector database
"""
def __init__(
self,
config: Optional[ElasticsearchDBConfig] = None,
es_config: Optional[ElasticsearchDBConfig] = None, # Backwards compatibility
):
"""Elasticsearch as vector database.
:param config: Elasticsearch database config, defaults to None
:type config: ElasticsearchDBConfig, optional
:param es_config: `es_config` is supported as an alias for `config` (for backwards compatibility),
defaults to None
:type es_config: ElasticsearchDBConfig, optional
:raises ValueError: No config provided
"""
if config is None and es_config is None:
self.config = ElasticsearchDBConfig()
else:
if not isinstance(config, ElasticsearchDBConfig):
raise TypeError(
"config is not a `ElasticsearchDBConfig` instance. "
"Please make sure the type is right and that you are passing an instance."
)
self.config = config or es_config
if self.config.ES_URL:
self.client = Elasticsearch(self.config.ES_URL, **self.config.ES_EXTRA_PARAMS)
elif self.config.CLOUD_ID:
self.client = Elasticsearch(cloud_id=self.config.CLOUD_ID, **self.config.ES_EXTRA_PARAMS)
else:
raise ValueError(
"Something is wrong with your config. Please check again - `https://docs.embedchain.ai/components/vector-databases#elasticsearch`" # noqa: E501
)
self.batch_size = self.config.batch_size
# Call parent init here because embedder is needed
super().__init__(config=self.config)
def _initialize(self):
"""
This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
"""
logger.info(self.client.info())
index_settings = {
"mappings": {
"properties": {
"text": {"type": "text"},
"embeddings": {"type": "dense_vector", "index": False, "dims": self.embedder.vector_dimension},
}
}
}
es_index = self._get_index()
if not self.client.indices.exists(index=es_index):
# create index if not exist
print("Creating index", es_index, index_settings)
self.client.indices.create(index=es_index, body=index_settings)
def _get_or_create_db(self):
"""Called during initialization"""
return self.client
def _get_or_create_collection(self, name):
"""Note: nothing to return here. Discuss later"""
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
"""
Get existing doc ids present in vector database
:param ids: _list of doc ids to check for existence
:type ids: list[str]
:param where: to filter data
:type where: dict[str, any]
:return: ids
:rtype: Set[str]
"""
if ids:
query = {"bool": {"must": [{"ids": {"values": ids}}]}}
else:
query = {"bool": {"must": []}}
if where:
for key, value in where.items():
query["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
response = self.client.search(index=self._get_index(), query=query, _source=True, size=limit)
docs = response["hits"]["hits"]
ids = [doc["_id"] for doc in docs]
doc_ids = [doc["_source"]["metadata"]["doc_id"] for doc in docs]
# Result is modified for compatibility with other vector databases
# TODO: Add method in vector database to return result in a standard format
result = {"ids": ids, "metadatas": []}
for doc_id in doc_ids:
result["metadatas"].append({"doc_id": doc_id})
return result
def add(
self,
documents: list[str],
metadatas: list[object],
ids: list[str],
**kwargs: Optional[dict[str, any]],
) -> Any:
"""
add data in vector database
:param documents: list of texts to add
:type documents: list[str]
:param metadatas: list of metadata associated with docs
:type metadatas: list[object]
:param ids: ids of docs
:type ids: list[str]
"""
embeddings = self.embedder.embedding_fn(documents)
for chunk in chunks(
list(zip(ids, documents, metadatas, embeddings)),
self.batch_size,
desc="Inserting batches in elasticsearch",
): # noqa: E501
ids, docs, metadatas, embeddings = [], [], [], []
for id, text, metadata, embedding in chunk:
ids.append(id)
docs.append(text)
metadatas.append(metadata)
embeddings.append(embedding)
batch_docs = []
for id, text, metadata, embedding in zip(ids, docs, metadatas, embeddings):
batch_docs.append(
{
"_index": self._get_index(),
"_id": id,
"_source": {"text": text, "metadata": metadata, "embeddings": embedding},
}
)
bulk(self.client, batch_docs, **kwargs)
self.client.indices.refresh(index=self._get_index())
def query(
self,
input_query: str,
n_results: int,
where: dict[str, any],
citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, dict]], list[str]]:
"""
query contents from vector database based on vector similarity
:param input_query: query string
:type input_query: str
:param n_results: no of similar documents to fetch from database
:type n_results: int
:param where: Optional. to filter data
:type where: dict[str, any]
:return: The context of the document that matched your query, url of the source, doc_id
:param citations: we use citations boolean param to return context along with the answer.
:type citations: bool, default is False.
:return: The content of the document that matched your query,
along with url of the source and doc_id (if citations flag is true)
:rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
"""
input_query_vector = self.embedder.embedding_fn([input_query])
query_vector = input_query_vector[0]
# `https://www.elastic.co/guide/en/elasticsearch/reference/7.17/query-dsl-script-score-query.html`
query = {
"script_score": {
"query": {"bool": {"must": [{"exists": {"field": "text"}}]}},
"script": {
"source": "cosineSimilarity(params.input_query_vector, 'embeddings') + 1.0",
"params": {"input_query_vector": query_vector},
},
}
}
if where:
for key, value in where.items():
query["script_score"]["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
_source = ["text", "metadata"]
response = self.client.search(index=self._get_index(), query=query, _source=_source, size=n_results)
docs = response["hits"]["hits"]
contexts = []
for doc in docs:
context = doc["_source"]["text"]
if citations:
metadata = doc["_source"]["metadata"]
metadata["score"] = doc["_score"]
contexts.append(tuple((context, metadata)))
else:
contexts.append(context)
return contexts
def set_collection_name(self, name: str):
"""
Set the name of the collection. A collection is an isolated space for vectors.
:param name: Name of the collection.
:type name: str
"""
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def count(self) -> int:
"""
Count number of documents/chunks embedded in the database.
:return: number of documents
:rtype: int
"""
query = {"match_all": {}}
response = self.client.count(index=self._get_index(), query=query)
doc_count = response["count"]
return doc_count
def reset(self):
"""
Resets the database. Deletes all embeddings irreversibly.
"""
# Delete all data from the database
if self.client.indices.exists(index=self._get_index()):
# delete index in Es
self.client.indices.delete(index=self._get_index())
def _get_index(self) -> str:
"""Get the Elasticsearch index for a collection
:return: Elasticsearch index
:rtype: str
"""
# NOTE: The method is preferred to an attribute, because if collection name changes,
# it's always up-to-date.
return f"{self.config.collection_name}_{self.embedder.vector_dimension}".lower()
def delete(self, where):
"""Delete documents from the database."""
query = {"query": {"bool": {"must": []}}}
for key, value in where.items():
query["query"]["bool"]["must"].append({"term": {f"metadata.{key}.keyword": value}})
self.client.delete_by_query(index=self._get_index(), body=query)
self.client.indices.refresh(index=self._get_index())
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/vectordb/zilliz.py | embedchain/embedchain/vectordb/zilliz.py | import logging
from typing import Any, Optional, Union
from embedchain.config import ZillizDBConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.vectordb.base import BaseVectorDB
try:
from pymilvus import (
Collection,
CollectionSchema,
DataType,
FieldSchema,
MilvusClient,
connections,
utility,
)
except ImportError:
raise ImportError(
"Zilliz requires extra dependencies. Install with `pip install --upgrade embedchain[milvus]`"
) from None
logger = logging.getLogger(__name__)
@register_deserializable
class ZillizVectorDB(BaseVectorDB):
"""Base class for vector database."""
def __init__(self, config: ZillizDBConfig = None):
"""Initialize the database. Save the config and client as an attribute.
:param config: Database configuration class instance.
:type config: ZillizDBConfig
"""
if config is None:
self.config = ZillizDBConfig()
else:
self.config = config
self.client = MilvusClient(
uri=self.config.uri,
token=self.config.token,
)
self.connection = connections.connect(
uri=self.config.uri,
token=self.config.token,
)
super().__init__(config=self.config)
def _initialize(self):
"""
This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
So it's can't be done in __init__ in one step.
"""
self._get_or_create_collection(self.config.collection_name)
def _get_or_create_db(self):
"""Get or create the database."""
return self.client
def _get_or_create_collection(self, name):
"""
Get or create a named collection.
:param name: Name of the collection
:type name: str
"""
if utility.has_collection(name):
logger.info(f"[ZillizDB]: found an existing collection {name}, make sure the auto-id is disabled.")
self.collection = Collection(name)
else:
fields = [
FieldSchema(name="id", dtype=DataType.VARCHAR, is_primary=True, max_length=512),
FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=2048),
FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=self.embedder.vector_dimension),
FieldSchema(name="metadata", dtype=DataType.JSON),
]
schema = CollectionSchema(fields, enable_dynamic_field=True)
self.collection = Collection(name=name, schema=schema)
index = {
"index_type": "AUTOINDEX",
"metric_type": self.config.metric_type,
}
self.collection.create_index("embeddings", index)
return self.collection
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
"""
Get existing doc ids present in vector database
:param ids: list of doc ids to check for existence
:type ids: list[str]
:param where: Optional. to filter data
:type where: dict[str, Any]
:param limit: Optional. maximum number of documents
:type limit: Optional[int]
:return: Existing documents.
:rtype: Set[str]
"""
data_ids = []
metadatas = []
if self.collection.num_entities == 0 or self.collection.is_empty:
return {"ids": data_ids, "metadatas": metadatas}
filter_ = ""
if ids:
filter_ = f'id in "{ids}"'
if where:
if filter_:
filter_ += " and "
filter_ = f"{self._generate_zilliz_filter(where)}"
results = self.client.query(collection_name=self.config.collection_name, filter=filter_, output_fields=["*"])
for res in results:
data_ids.append(res.get("id"))
metadatas.append(res.get("metadata", {}))
return {"ids": data_ids, "metadatas": metadatas}
def add(
self,
documents: list[str],
metadatas: list[object],
ids: list[str],
**kwargs: Optional[dict[str, any]],
):
"""Add to database"""
embeddings = self.embedder.embedding_fn(documents)
for id, doc, metadata, embedding in zip(ids, documents, metadatas, embeddings):
data = {"id": id, "text": doc, "embeddings": embedding, "metadata": metadata}
self.client.insert(collection_name=self.config.collection_name, data=data, **kwargs)
self.collection.load()
self.collection.flush()
self.client.flush(self.config.collection_name)
def query(
self,
input_query: str,
n_results: int,
where: dict[str, Any],
citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, dict]], list[str]]:
"""
Query contents from vector database based on vector similarity
:param input_query: query string
:type input_query: str
:param n_results: no of similar documents to fetch from database
:type n_results: int
:param where: to filter data
:type where: dict[str, Any]
:raises InvalidDimensionException: Dimensions do not match.
:param citations: we use citations boolean param to return context along with the answer.
:type citations: bool, default is False.
:return: The content of the document that matched your query,
along with url of the source and doc_id (if citations flag is true)
:rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
"""
if self.collection.is_empty:
return []
output_fields = ["*"]
input_query_vector = self.embedder.embedding_fn([input_query])
query_vector = input_query_vector[0]
query_filter = self._generate_zilliz_filter(where)
query_result = self.client.search(
collection_name=self.config.collection_name,
data=[query_vector],
filter=query_filter,
limit=n_results,
output_fields=output_fields,
**kwargs,
)
query_result = query_result[0]
contexts = []
for query in query_result:
data = query["entity"]
score = query["distance"]
context = data["text"]
if citations:
metadata = data.get("metadata", {})
metadata["score"] = score
contexts.append(tuple((context, metadata)))
else:
contexts.append(context)
return contexts
def count(self) -> int:
"""
Count number of documents/chunks embedded in the database.
:return: number of documents
:rtype: int
"""
return self.collection.num_entities
def reset(self, collection_names: list[str] = None):
"""
Resets the database. Deletes all embeddings irreversibly.
"""
if self.config.collection_name:
if collection_names:
for collection_name in collection_names:
if collection_name in self.client.list_collections():
self.client.drop_collection(collection_name=collection_name)
else:
self.client.drop_collection(collection_name=self.config.collection_name)
self._get_or_create_collection(self.config.collection_name)
def set_collection_name(self, name: str):
"""
Set the name of the collection. A collection is an isolated space for vectors.
:param name: Name of the collection.
:type name: str
"""
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
def _generate_zilliz_filter(self, where: dict[str, str]):
operands = []
for key, value in where.items():
operands.append(f'(metadata["{key}"] == "{value}")')
return " and ".join(operands)
def delete(self, where: dict[str, Any]):
"""
Delete the embeddings from DB. Zilliz only support deleting with keys.
:param keys: Primary keys of the table entries to delete.
:type keys: Union[list, str, int]
"""
data = self.get(where=where)
keys = data.get("ids", [])
if keys:
self.client.delete(collection_name=self.config.collection_name, pks=keys)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/vectordb/qdrant.py | embedchain/embedchain/vectordb/qdrant.py | import copy
import os
from typing import Any, Optional, Union
try:
from qdrant_client import QdrantClient
from qdrant_client.http import models
from qdrant_client.http.models import Batch
from qdrant_client.models import Distance, VectorParams
except ImportError:
raise ImportError("Qdrant requires extra dependencies. Install with `pip install embedchain[qdrant]`") from None
from tqdm import tqdm
from embedchain.config.vector_db.qdrant import QdrantDBConfig
from embedchain.vectordb.base import BaseVectorDB
class QdrantDB(BaseVectorDB):
"""
Qdrant as vector database
"""
def __init__(self, config: QdrantDBConfig = None):
"""
Qdrant as vector database
:param config. Qdrant database config to be used for connection
"""
if config is None:
config = QdrantDBConfig()
else:
if not isinstance(config, QdrantDBConfig):
raise TypeError(
"config is not a `QdrantDBConfig` instance. "
"Please make sure the type is right and that you are passing an instance."
)
self.config = config
self.batch_size = self.config.batch_size
self.client = QdrantClient(url=os.getenv("QDRANT_URL"), api_key=os.getenv("QDRANT_API_KEY"))
# Call parent init here because embedder is needed
super().__init__(config=self.config)
def _initialize(self):
"""
This method is needed because `embedder` attribute needs to be set externally before it can be initialized.
"""
if not self.embedder:
raise ValueError("Embedder not set. Please set an embedder with `set_embedder` before initialization.")
self.collection_name = self._get_or_create_collection()
all_collections = self.client.get_collections()
collection_names = [collection.name for collection in all_collections.collections]
if self.collection_name not in collection_names:
self.client.recreate_collection(
collection_name=self.collection_name,
vectors_config=VectorParams(
size=self.embedder.vector_dimension,
distance=Distance.COSINE,
hnsw_config=self.config.hnsw_config,
quantization_config=self.config.quantization_config,
on_disk=self.config.on_disk,
),
)
def _get_or_create_db(self):
return self.client
def _get_or_create_collection(self):
return f"{self.config.collection_name}-{self.embedder.vector_dimension}".lower().replace("_", "-")
def get(self, ids: Optional[list[str]] = None, where: Optional[dict[str, any]] = None, limit: Optional[int] = None):
"""
Get existing doc ids present in vector database
:param ids: _list of doc ids to check for existence
:type ids: list[str]
:param where: to filter data
:type where: dict[str, any]
:param limit: The number of entries to be fetched
:type limit: Optional int, defaults to None
:return: All the existing IDs
:rtype: Set[str]
"""
keys = set(where.keys() if where is not None else set())
qdrant_must_filters = []
if ids:
qdrant_must_filters.append(
models.FieldCondition(
key="identifier",
match=models.MatchAny(
any=ids,
),
)
)
if len(keys) > 0:
for key in keys:
qdrant_must_filters.append(
models.FieldCondition(
key="metadata.{}".format(key),
match=models.MatchValue(
value=where.get(key),
),
)
)
offset = 0
existing_ids = []
metadatas = []
while offset is not None:
response = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=models.Filter(must=qdrant_must_filters),
offset=offset,
limit=self.batch_size,
)
offset = response[1]
for doc in response[0]:
existing_ids.append(doc.payload["identifier"])
metadatas.append(doc.payload["metadata"])
return {"ids": existing_ids, "metadatas": metadatas}
def add(
self,
documents: list[str],
metadatas: list[object],
ids: list[str],
**kwargs: Optional[dict[str, any]],
):
"""add data in vector database
:param documents: list of texts to add
:type documents: list[str]
:param metadatas: list of metadata associated with docs
:type metadatas: list[object]
:param ids: ids of docs
:type ids: list[str]
"""
embeddings = self.embedder.embedding_fn(documents)
payloads = []
qdrant_ids = []
for id, document, metadata in zip(ids, documents, metadatas):
metadata["text"] = document
qdrant_ids.append(id)
payloads.append({"identifier": id, "text": document, "metadata": copy.deepcopy(metadata)})
for i in tqdm(range(0, len(qdrant_ids), self.batch_size), desc="Adding data in batches"):
self.client.upsert(
collection_name=self.collection_name,
points=Batch(
ids=qdrant_ids[i : i + self.batch_size],
payloads=payloads[i : i + self.batch_size],
vectors=embeddings[i : i + self.batch_size],
),
**kwargs,
)
def query(
self,
input_query: str,
n_results: int,
where: dict[str, any],
citations: bool = False,
**kwargs: Optional[dict[str, Any]],
) -> Union[list[tuple[str, dict]], list[str]]:
"""
query contents from vector database based on vector similarity
:param input_query: query string
:type input_query: str
:param n_results: no of similar documents to fetch from database
:type n_results: int
:param where: Optional. to filter data
:type where: dict[str, any]
:param citations: we use citations boolean param to return context along with the answer.
:type citations: bool, default is False.
:return: The content of the document that matched your query,
along with url of the source and doc_id (if citations flag is true)
:rtype: list[str], if citations=False, otherwise list[tuple[str, str, str]]
"""
query_vector = self.embedder.embedding_fn([input_query])[0]
keys = set(where.keys() if where is not None else set())
qdrant_must_filters = []
if len(keys) > 0:
for key in keys:
qdrant_must_filters.append(
models.FieldCondition(
key="metadata.{}".format(key),
match=models.MatchValue(
value=where.get(key),
),
)
)
results = self.client.search(
collection_name=self.collection_name,
query_filter=models.Filter(must=qdrant_must_filters),
query_vector=query_vector,
limit=n_results,
**kwargs,
)
contexts = []
for result in results:
context = result.payload["text"]
if citations:
metadata = result.payload["metadata"]
metadata["score"] = result.score
contexts.append(tuple((context, metadata)))
else:
contexts.append(context)
return contexts
def count(self) -> int:
response = self.client.get_collection(collection_name=self.collection_name)
return response.points_count
def reset(self):
self.client.delete_collection(collection_name=self.collection_name)
self._initialize()
def set_collection_name(self, name: str):
"""
Set the name of the collection. A collection is an isolated space for vectors.
:param name: Name of the collection.
:type name: str
"""
if not isinstance(name, str):
raise TypeError("Collection name must be a string")
self.config.collection_name = name
self.collection_name = self._get_or_create_collection()
@staticmethod
def _generate_query(where: dict):
must_fields = []
for key, value in where.items():
must_fields.append(
models.FieldCondition(
key=f"metadata.{key}",
match=models.MatchValue(
value=value,
),
)
)
return models.Filter(must=must_fields)
def delete(self, where: dict):
db_filter = self._generate_query(where)
self.client.delete(collection_name=self.collection_name, points_selector=db_filter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/bots/whatsapp.py | embedchain/embedchain/bots/whatsapp.py | import argparse
import importlib
import logging
import signal
import sys
from embedchain.helpers.json_serializable import register_deserializable
from .base import BaseBot
logger = logging.getLogger(__name__)
@register_deserializable
class WhatsAppBot(BaseBot):
def __init__(self):
try:
self.flask = importlib.import_module("flask")
self.twilio = importlib.import_module("twilio")
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for WhatsApp are not installed. "
"Please install with `pip install twilio==8.5.0 flask==2.3.3`"
) from None
super().__init__()
def handle_message(self, message):
if message.startswith("add "):
response = self.add_data(message)
else:
response = self.ask_bot(message)
return response
def add_data(self, message):
data = message.split(" ")[-1]
try:
self.add(data)
response = f"Added data from: {data}"
except Exception:
logger.exception(f"Failed to add data {data}.")
response = "Some error occurred while adding data."
return response
def ask_bot(self, message):
try:
response = self.query(message)
except Exception:
logger.exception(f"Failed to query {message}.")
response = "An error occurred. Please try again!"
return response
def start(self, host="0.0.0.0", port=5000, debug=True):
app = self.flask.Flask(__name__)
def signal_handler(sig, frame):
logger.info("\nGracefully shutting down the WhatsAppBot...")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
@app.route("/chat", methods=["POST"])
def chat():
incoming_message = self.flask.request.values.get("Body", "").lower()
response = self.handle_message(incoming_message)
twilio_response = self.twilio.twiml.messaging_response.MessagingResponse()
twilio_response.message(response)
return str(twilio_response)
app.run(host=host, port=port, debug=debug)
def start_command():
parser = argparse.ArgumentParser(description="EmbedChain WhatsAppBot command line interface")
parser.add_argument("--host", default="0.0.0.0", help="Host IP to bind")
parser.add_argument("--port", default=5000, type=int, help="Port to bind")
args = parser.parse_args()
whatsapp_bot = WhatsAppBot()
whatsapp_bot.start(host=args.host, port=args.port)
if __name__ == "__main__":
start_command()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/bots/discord.py | embedchain/embedchain/bots/discord.py | import argparse
import logging
import os
from embedchain.helpers.json_serializable import register_deserializable
from .base import BaseBot
try:
import discord
from discord import app_commands
from discord.ext import commands
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for Discord are not installed." "Please install with `pip install discord==2.3.2`"
) from None
logger = logging.getLogger(__name__)
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
tree = app_commands.CommandTree(client)
# Invite link example
# https://discord.com/api/oauth2/authorize?client_id={DISCORD_CLIENT_ID}&permissions=2048&scope=bot
@register_deserializable
class DiscordBot(BaseBot):
def __init__(self, *args, **kwargs):
BaseBot.__init__(self, *args, **kwargs)
def add_data(self, message):
data = message.split(" ")[-1]
try:
self.add(data)
response = f"Added data from: {data}"
except Exception:
logger.exception(f"Failed to add data {data}.")
response = "Some error occurred while adding data."
return response
def ask_bot(self, message):
try:
response = self.query(message)
except Exception:
logger.exception(f"Failed to query {message}.")
response = "An error occurred. Please try again!"
return response
def start(self):
client.run(os.environ["DISCORD_BOT_TOKEN"])
# @tree decorator cannot be used in a class. A global discord_bot is used as a workaround.
@tree.command(name="question", description="ask embedchain")
async def query_command(interaction: discord.Interaction, question: str):
await interaction.response.defer()
member = client.guilds[0].get_member(client.user.id)
logger.info(f"User: {member}, Query: {question}")
try:
answer = discord_bot.ask_bot(question)
if args.include_question:
response = f"> {question}\n\n{answer}"
else:
response = answer
await interaction.followup.send(response)
except Exception as e:
await interaction.followup.send("An error occurred. Please try again!")
logger.error("Error occurred during 'query' command:", e)
@tree.command(name="add", description="add new content to the embedchain database")
async def add_command(interaction: discord.Interaction, url_or_text: str):
await interaction.response.defer()
member = client.guilds[0].get_member(client.user.id)
logger.info(f"User: {member}, Add: {url_or_text}")
try:
response = discord_bot.add_data(url_or_text)
await interaction.followup.send(response)
except Exception as e:
await interaction.followup.send("An error occurred. Please try again!")
logger.error("Error occurred during 'add' command:", e)
@tree.command(name="ping", description="Simple ping pong command")
async def ping(interaction: discord.Interaction):
await interaction.response.send_message("Pong", ephemeral=True)
@tree.error
async def on_app_command_error(interaction: discord.Interaction, error: discord.app_commands.AppCommandError) -> None:
if isinstance(error, commands.CommandNotFound):
await interaction.followup.send("Invalid command. Please refer to the documentation for correct syntax.")
else:
logger.error("Error occurred during command execution:", error)
@client.event
async def on_ready():
# TODO: Sync in admin command, to not hit rate limits.
# This might be overkill for most users, and it would require to set a guild or user id, where sync is allowed.
await tree.sync()
logger.debug("Command tree synced")
logger.info(f"Logged in as {client.user.name}")
def start_command():
parser = argparse.ArgumentParser(description="EmbedChain DiscordBot command line interface")
parser.add_argument(
"--include-question",
help="include question in query reply, otherwise it is hidden behind the slash command.",
action="store_true",
)
global args
args = parser.parse_args()
global discord_bot
discord_bot = DiscordBot()
discord_bot.start()
if __name__ == "__main__":
start_command()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/bots/poe.py | embedchain/embedchain/bots/poe.py | import argparse
import logging
import os
from typing import Optional
from embedchain.helpers.json_serializable import register_deserializable
from .base import BaseBot
try:
from fastapi_poe import PoeBot, run
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for Poe are not installed." "Please install with `pip install fastapi-poe==0.0.16`"
) from None
def start_command():
parser = argparse.ArgumentParser(description="EmbedChain PoeBot command line interface")
# parser.add_argument("--host", default="0.0.0.0", help="Host IP to bind")
parser.add_argument("--port", default=8080, type=int, help="Port to bind")
parser.add_argument("--api-key", type=str, help="Poe API key")
# parser.add_argument(
# "--history-length",
# default=5,
# type=int,
# help="Set the max size of the chat history. Multiplies cost, but improves conversation awareness.",
# )
args = parser.parse_args()
# FIXME: Arguments are automatically loaded by Poebot's ArgumentParser which causes it to fail.
# the port argument here is also just for show, it actually works because poe has the same argument.
run(PoeBot(), api_key=args.api_key or os.environ.get("POE_API_KEY"))
@register_deserializable
class PoeBot(BaseBot, PoeBot):
def __init__(self):
self.history_length = 5
super().__init__()
async def get_response(self, query):
last_message = query.query[-1].content
try:
history = (
[f"{m.role}: {m.content}" for m in query.query[-(self.history_length + 1) : -1]]
if len(query.query) > 0
else None
)
except Exception as e:
logging.error(f"Error when processing the chat history. Message is being sent without history. Error: {e}")
answer = self.handle_message(last_message, history)
yield self.text_event(answer)
def handle_message(self, message, history: Optional[list[str]] = None):
if message.startswith("/add "):
response = self.add_data(message)
else:
response = self.ask_bot(message, history)
return response
# def add_data(self, message):
# data = message.split(" ")[-1]
# try:
# self.add(data)
# response = f"Added data from: {data}"
# except Exception:
# logging.exception(f"Failed to add data {data}.")
# response = "Some error occurred while adding data."
# return response
def ask_bot(self, message, history: list[str]):
try:
self.app.llm.set_history(history=history)
response = self.query(message)
except Exception:
logging.exception(f"Failed to query {message}.")
response = "An error occurred. Please try again!"
return response
def start(self):
start_command()
if __name__ == "__main__":
start_command()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/bots/__init__.py | embedchain/embedchain/bots/__init__.py | from embedchain.bots.poe import PoeBot # noqa: F401
from embedchain.bots.whatsapp import WhatsAppBot # noqa: F401
# TODO: fix discord import
# from embedchain.bots.discord import DiscordBot
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/bots/slack.py | embedchain/embedchain/bots/slack.py | import argparse
import logging
import os
import signal
import sys
from embedchain import App
from embedchain.helpers.json_serializable import register_deserializable
from .base import BaseBot
try:
from flask import Flask, request
from slack_sdk import WebClient
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for Slack are not installed."
"Please install with `pip install slack-sdk==3.21.3 flask==2.3.3`"
) from None
logger = logging.getLogger(__name__)
SLACK_BOT_TOKEN = os.environ.get("SLACK_BOT_TOKEN")
@register_deserializable
class SlackBot(BaseBot):
def __init__(self):
self.client = WebClient(token=SLACK_BOT_TOKEN)
self.chat_bot = App()
self.recent_message = {"ts": 0, "channel": ""}
super().__init__()
def handle_message(self, event_data):
message = event_data.get("event")
if message and "text" in message and message.get("subtype") != "bot_message":
text: str = message["text"]
if float(message.get("ts")) > float(self.recent_message["ts"]):
self.recent_message["ts"] = message["ts"]
self.recent_message["channel"] = message["channel"]
if text.startswith("query"):
_, question = text.split(" ", 1)
try:
response = self.chat_bot.chat(question)
self.send_slack_message(message["channel"], response)
logger.info("Query answered successfully!")
except Exception as e:
self.send_slack_message(message["channel"], "An error occurred. Please try again!")
logger.error("Error occurred during 'query' command:", e)
elif text.startswith("add"):
_, data_type, url_or_text = text.split(" ", 2)
if url_or_text.startswith("<") and url_or_text.endswith(">"):
url_or_text = url_or_text[1:-1]
try:
self.chat_bot.add(url_or_text, data_type)
self.send_slack_message(message["channel"], f"Added {data_type} : {url_or_text}")
except ValueError as e:
self.send_slack_message(message["channel"], f"Error: {str(e)}")
logger.error("Error occurred during 'add' command:", e)
except Exception as e:
self.send_slack_message(message["channel"], f"Failed to add {data_type} : {url_or_text}")
logger.error("Error occurred during 'add' command:", e)
def send_slack_message(self, channel, message):
response = self.client.chat_postMessage(channel=channel, text=message)
return response
def start(self, host="0.0.0.0", port=5000, debug=True):
app = Flask(__name__)
def signal_handler(sig, frame):
logger.info("\nGracefully shutting down the SlackBot...")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
@app.route("/", methods=["POST"])
def chat():
# Check if the request is a verification request
if request.json.get("challenge"):
return str(request.json.get("challenge"))
response = self.handle_message(request.json)
return str(response)
app.run(host=host, port=port, debug=debug)
def start_command():
parser = argparse.ArgumentParser(description="EmbedChain SlackBot command line interface")
parser.add_argument("--host", default="0.0.0.0", help="Host IP to bind")
parser.add_argument("--port", default=5000, type=int, help="Port to bind")
args = parser.parse_args()
slack_bot = SlackBot()
slack_bot.start(host=args.host, port=args.port)
if __name__ == "__main__":
start_command()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/bots/base.py | embedchain/embedchain/bots/base.py | from typing import Any
from embedchain import App
from embedchain.config import AddConfig, AppConfig, BaseLlmConfig
from embedchain.embedder.openai import OpenAIEmbedder
from embedchain.helpers.json_serializable import (
JSONSerializable,
register_deserializable,
)
from embedchain.llm.openai import OpenAILlm
from embedchain.vectordb.chroma import ChromaDB
@register_deserializable
class BaseBot(JSONSerializable):
def __init__(self):
self.app = App(config=AppConfig(), llm=OpenAILlm(), db=ChromaDB(), embedding_model=OpenAIEmbedder())
def add(self, data: Any, config: AddConfig = None):
"""
Add data to the bot (to the vector database).
Auto-dectects type only, so some data types might not be usable.
:param data: data to embed
:type data: Any
:param config: configuration class instance, defaults to None
:type config: AddConfig, optional
"""
config = config if config else AddConfig()
self.app.add(data, config=config)
def query(self, query: str, config: BaseLlmConfig = None) -> str:
"""
Query the bot
:param query: the user query
:type query: str
:param config: configuration class instance, defaults to None
:type config: BaseLlmConfig, optional
:return: Answer
:rtype: str
"""
config = config
return self.app.query(query, config=config)
def start(self):
"""Start the bot's functionality."""
raise NotImplementedError("Subclasses must implement the start method.")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/data_formatter/data_formatter.py | embedchain/embedchain/data_formatter/data_formatter.py | from importlib import import_module
from typing import Any, Optional
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config import AddConfig
from embedchain.config.add_config import ChunkerConfig, LoaderConfig
from embedchain.helpers.json_serializable import JSONSerializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.models.data_type import DataType
class DataFormatter(JSONSerializable):
"""
DataFormatter is an internal utility class which abstracts the mapping for
loaders and chunkers to the data_type entered by the user in their
.add or .add_local method call
"""
def __init__(
self,
data_type: DataType,
config: AddConfig,
loader: Optional[BaseLoader] = None,
chunker: Optional[BaseChunker] = None,
):
"""
Initialize a dataformatter, set data type and chunker based on datatype.
:param data_type: The type of the data to load and chunk.
:type data_type: DataType
:param config: AddConfig instance with nested loader and chunker config attributes.
:type config: AddConfig
"""
self.loader = self._get_loader(data_type=data_type, config=config.loader, loader=loader)
self.chunker = self._get_chunker(data_type=data_type, config=config.chunker, chunker=chunker)
@staticmethod
def _lazy_load(module_path: str):
module_path, class_name = module_path.rsplit(".", 1)
module = import_module(module_path)
return getattr(module, class_name)
def _get_loader(
self,
data_type: DataType,
config: LoaderConfig,
loader: Optional[BaseLoader],
**kwargs: Optional[dict[str, Any]],
) -> BaseLoader:
"""
Returns the appropriate data loader for the given data type.
:param data_type: The type of the data to load.
:type data_type: DataType
:param config: Config to initialize the loader with.
:type config: LoaderConfig
:raises ValueError: If an unsupported data type is provided.
:return: The loader for the given data type.
:rtype: BaseLoader
"""
loaders = {
DataType.YOUTUBE_VIDEO: "embedchain.loaders.youtube_video.YoutubeVideoLoader",
DataType.PDF_FILE: "embedchain.loaders.pdf_file.PdfFileLoader",
DataType.WEB_PAGE: "embedchain.loaders.web_page.WebPageLoader",
DataType.QNA_PAIR: "embedchain.loaders.local_qna_pair.LocalQnaPairLoader",
DataType.TEXT: "embedchain.loaders.local_text.LocalTextLoader",
DataType.DOCX: "embedchain.loaders.docx_file.DocxFileLoader",
DataType.SITEMAP: "embedchain.loaders.sitemap.SitemapLoader",
DataType.XML: "embedchain.loaders.xml.XmlLoader",
DataType.DOCS_SITE: "embedchain.loaders.docs_site_loader.DocsSiteLoader",
DataType.CSV: "embedchain.loaders.csv.CsvLoader",
DataType.MDX: "embedchain.loaders.mdx.MdxLoader",
DataType.IMAGE: "embedchain.loaders.image.ImageLoader",
DataType.UNSTRUCTURED: "embedchain.loaders.unstructured_file.UnstructuredLoader",
DataType.JSON: "embedchain.loaders.json.JSONLoader",
DataType.OPENAPI: "embedchain.loaders.openapi.OpenAPILoader",
DataType.GMAIL: "embedchain.loaders.gmail.GmailLoader",
DataType.NOTION: "embedchain.loaders.notion.NotionLoader",
DataType.SUBSTACK: "embedchain.loaders.substack.SubstackLoader",
DataType.YOUTUBE_CHANNEL: "embedchain.loaders.youtube_channel.YoutubeChannelLoader",
DataType.DISCORD: "embedchain.loaders.discord.DiscordLoader",
DataType.RSSFEED: "embedchain.loaders.rss_feed.RSSFeedLoader",
DataType.BEEHIIV: "embedchain.loaders.beehiiv.BeehiivLoader",
DataType.GOOGLE_DRIVE: "embedchain.loaders.google_drive.GoogleDriveLoader",
DataType.DIRECTORY: "embedchain.loaders.directory_loader.DirectoryLoader",
DataType.SLACK: "embedchain.loaders.slack.SlackLoader",
DataType.DROPBOX: "embedchain.loaders.dropbox.DropboxLoader",
DataType.TEXT_FILE: "embedchain.loaders.text_file.TextFileLoader",
DataType.EXCEL_FILE: "embedchain.loaders.excel_file.ExcelFileLoader",
DataType.AUDIO: "embedchain.loaders.audio.AudioLoader",
}
if data_type == DataType.CUSTOM or loader is not None:
loader_class: type = loader
if loader_class:
return loader_class
elif data_type in loaders:
loader_class: type = self._lazy_load(loaders[data_type])
return loader_class()
raise ValueError(
f"Cant find the loader for {data_type}.\
We recommend to pass the loader to use data_type: {data_type},\
check `https://docs.embedchain.ai/data-sources/overview`."
)
def _get_chunker(self, data_type: DataType, config: ChunkerConfig, chunker: Optional[BaseChunker]) -> BaseChunker:
"""Returns the appropriate chunker for the given data type (updated for lazy loading)."""
chunker_classes = {
DataType.YOUTUBE_VIDEO: "embedchain.chunkers.youtube_video.YoutubeVideoChunker",
DataType.PDF_FILE: "embedchain.chunkers.pdf_file.PdfFileChunker",
DataType.WEB_PAGE: "embedchain.chunkers.web_page.WebPageChunker",
DataType.QNA_PAIR: "embedchain.chunkers.qna_pair.QnaPairChunker",
DataType.TEXT: "embedchain.chunkers.text.TextChunker",
DataType.DOCX: "embedchain.chunkers.docx_file.DocxFileChunker",
DataType.SITEMAP: "embedchain.chunkers.sitemap.SitemapChunker",
DataType.XML: "embedchain.chunkers.xml.XmlChunker",
DataType.DOCS_SITE: "embedchain.chunkers.docs_site.DocsSiteChunker",
DataType.CSV: "embedchain.chunkers.table.TableChunker",
DataType.MDX: "embedchain.chunkers.mdx.MdxChunker",
DataType.IMAGE: "embedchain.chunkers.image.ImageChunker",
DataType.UNSTRUCTURED: "embedchain.chunkers.unstructured_file.UnstructuredFileChunker",
DataType.JSON: "embedchain.chunkers.json.JSONChunker",
DataType.OPENAPI: "embedchain.chunkers.openapi.OpenAPIChunker",
DataType.GMAIL: "embedchain.chunkers.gmail.GmailChunker",
DataType.NOTION: "embedchain.chunkers.notion.NotionChunker",
DataType.SUBSTACK: "embedchain.chunkers.substack.SubstackChunker",
DataType.YOUTUBE_CHANNEL: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.DISCORD: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.CUSTOM: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.RSSFEED: "embedchain.chunkers.rss_feed.RSSFeedChunker",
DataType.BEEHIIV: "embedchain.chunkers.beehiiv.BeehiivChunker",
DataType.GOOGLE_DRIVE: "embedchain.chunkers.google_drive.GoogleDriveChunker",
DataType.DIRECTORY: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.SLACK: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.DROPBOX: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.TEXT_FILE: "embedchain.chunkers.common_chunker.CommonChunker",
DataType.EXCEL_FILE: "embedchain.chunkers.excel_file.ExcelFileChunker",
DataType.AUDIO: "embedchain.chunkers.audio.AudioChunker",
}
if chunker is not None:
return chunker
elif data_type in chunker_classes:
chunker_class = self._lazy_load(chunker_classes[data_type])
chunker = chunker_class(config)
chunker.set_data_type(data_type)
return chunker
raise ValueError(
f"Cant find the chunker for {data_type}.\
We recommend to pass the chunker to use data_type: {data_type},\
check `https://docs.embedchain.ai/data-sources/overview`."
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/data_formatter/__init__.py | embedchain/embedchain/data_formatter/__init__.py | from .data_formatter import DataFormatter # noqa: F401
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/qna_pair.py | embedchain/embedchain/chunkers/qna_pair.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class QnaPairChunker(BaseChunker):
"""Chunker for QnA pair."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=300, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/image.py | embedchain/embedchain/chunkers/image.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class ImageChunker(BaseChunker):
"""Chunker for Images."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=2000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/web_page.py | embedchain/embedchain/chunkers/web_page.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class WebPageChunker(BaseChunker):
"""Chunker for web page."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=2000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/sitemap.py | embedchain/embedchain/chunkers/sitemap.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class SitemapChunker(BaseChunker):
"""Chunker for sitemap."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=500, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/table.py | embedchain/embedchain/chunkers/table.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
class TableChunker(BaseChunker):
"""Chunker for tables, for instance csv, google sheets or databases."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=300, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/pdf_file.py | embedchain/embedchain/chunkers/pdf_file.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class PdfFileChunker(BaseChunker):
"""Chunker for PDF file."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/docs_site.py | embedchain/embedchain/chunkers/docs_site.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class DocsSiteChunker(BaseChunker):
"""Chunker for code docs site."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=500, chunk_overlap=50, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/substack.py | embedchain/embedchain/chunkers/substack.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class SubstackChunker(BaseChunker):
"""Chunker for Substack."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/audio.py | embedchain/embedchain/chunkers/audio.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class AudioChunker(BaseChunker):
"""Chunker for audio."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/excel_file.py | embedchain/embedchain/chunkers/excel_file.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class ExcelFileChunker(BaseChunker):
"""Chunker for Excel file."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/xml.py | embedchain/embedchain/chunkers/xml.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class XmlChunker(BaseChunker):
"""Chunker for XML files."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=500, chunk_overlap=50, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/youtube_video.py | embedchain/embedchain/chunkers/youtube_video.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class YoutubeVideoChunker(BaseChunker):
"""Chunker for Youtube video."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=2000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/unstructured_file.py | embedchain/embedchain/chunkers/unstructured_file.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class UnstructuredFileChunker(BaseChunker):
"""Chunker for Unstructured file."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/mysql.py | embedchain/embedchain/chunkers/mysql.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class MySQLChunker(BaseChunker):
"""Chunker for json."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/postgres.py | embedchain/embedchain/chunkers/postgres.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class PostgresChunker(BaseChunker):
"""Chunker for postgres."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/docx_file.py | embedchain/embedchain/chunkers/docx_file.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class DocxFileChunker(BaseChunker):
"""Chunker for .docx file."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/beehiiv.py | embedchain/embedchain/chunkers/beehiiv.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class BeehiivChunker(BaseChunker):
"""Chunker for Beehiiv."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/mdx.py | embedchain/embedchain/chunkers/mdx.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class MdxChunker(BaseChunker):
"""Chunker for mdx files."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/discourse.py | embedchain/embedchain/chunkers/discourse.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class DiscourseChunker(BaseChunker):
"""Chunker for discourse."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/base_chunker.py | embedchain/embedchain/chunkers/base_chunker.py | import hashlib
import logging
from typing import Any, Optional
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import JSONSerializable
from embedchain.models.data_type import DataType
logger = logging.getLogger(__name__)
class BaseChunker(JSONSerializable):
def __init__(self, text_splitter):
"""Initialize the chunker."""
self.text_splitter = text_splitter
self.data_type = None
def create_chunks(
self,
loader,
src,
app_id=None,
config: Optional[ChunkerConfig] = None,
**kwargs: Optional[dict[str, Any]],
):
"""
Loads data and chunks it.
:param loader: The loader whose `load_data` method is used to create
the raw data.
:param src: The data to be handled by the loader. Can be a URL for
remote sources or local content for local loaders.
:param app_id: App id used to generate the doc_id.
"""
documents = []
chunk_ids = []
id_map = {}
min_chunk_size = config.min_chunk_size if config is not None else 1
logger.info(f"Skipping chunks smaller than {min_chunk_size} characters")
data_result = loader.load_data(src, **kwargs)
data_records = data_result["data"]
doc_id = data_result["doc_id"]
# Prefix app_id in the document id if app_id is not None to
# distinguish between different documents stored in the same
# elasticsearch or opensearch index
doc_id = f"{app_id}--{doc_id}" if app_id is not None else doc_id
metadatas = []
for data in data_records:
content = data["content"]
metadata = data["meta_data"]
# add data type to meta data to allow query using data type
metadata["data_type"] = self.data_type.value
metadata["doc_id"] = doc_id
# TODO: Currently defaulting to the src as the url. This is done intentianally since some
# of the data types like 'gmail' loader doesn't have the url in the meta data.
url = metadata.get("url", src)
chunks = self.get_chunks(content)
for chunk in chunks:
chunk_id = hashlib.sha256((chunk + url).encode()).hexdigest()
chunk_id = f"{app_id}--{chunk_id}" if app_id is not None else chunk_id
if id_map.get(chunk_id) is None and len(chunk) >= min_chunk_size:
id_map[chunk_id] = True
chunk_ids.append(chunk_id)
documents.append(chunk)
metadatas.append(metadata)
return {
"documents": documents,
"ids": chunk_ids,
"metadatas": metadatas,
"doc_id": doc_id,
}
def get_chunks(self, content):
"""
Returns chunks using text splitter instance.
Override in child class if custom logic.
"""
return self.text_splitter.split_text(content)
def set_data_type(self, data_type: DataType):
"""
set the data type of chunker
"""
self.data_type = data_type
# TODO: This should be done during initialization. This means it has to be done in the child classes.
@staticmethod
def get_word_count(documents) -> int:
return sum(len(document.split(" ")) for document in documents)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/__init__.py | embedchain/embedchain/chunkers/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/rss_feed.py | embedchain/embedchain/chunkers/rss_feed.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class RSSFeedChunker(BaseChunker):
"""Chunker for RSS Feed."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=2000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/slack.py | embedchain/embedchain/chunkers/slack.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class SlackChunker(BaseChunker):
"""Chunker for postgres."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/json.py | embedchain/embedchain/chunkers/json.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class JSONChunker(BaseChunker):
"""Chunker for json."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/text.py | embedchain/embedchain/chunkers/text.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class TextChunker(BaseChunker):
"""Chunker for text."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=300, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.