repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/google_drive.py | embedchain/embedchain/chunkers/google_drive.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class GoogleDriveChunker(BaseChunker):
"""Chunker for google drive folder."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/gmail.py | embedchain/embedchain/chunkers/gmail.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class GmailChunker(BaseChunker):
"""Chunker for gmail."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/notion.py | embedchain/embedchain/chunkers/notion.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class NotionChunker(BaseChunker):
"""Chunker for notion."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=300, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/common_chunker.py | embedchain/embedchain/chunkers/common_chunker.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class CommonChunker(BaseChunker):
"""Common chunker for all loaders."""
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=2000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/chunkers/openapi.py | embedchain/embedchain/chunkers/openapi.py | from typing import Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
class OpenAPIChunker(BaseChunker):
def __init__(self, config: Optional[ChunkerConfig] = None):
if config is None:
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config.chunk_size,
chunk_overlap=config.chunk_overlap,
length_function=config.length_function,
)
super().__init__(text_splitter)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/helpers/json_serializable.py | embedchain/embedchain/helpers/json_serializable.py | import json
import logging
from string import Template
from typing import Any, Type, TypeVar, Union
T = TypeVar("T", bound="JSONSerializable")
# NOTE: Through inheritance, all of our classes should be children of JSONSerializable. (highest level)
# NOTE: The @register_deserializable decorator should be added to all user facing child classes. (lowest level)
logger = logging.getLogger(__name__)
def register_deserializable(cls: Type[T]) -> Type[T]:
"""
A class decorator to register a class as deserializable.
When a class is decorated with @register_deserializable, it becomes
a part of the set of classes that the JSONSerializable class can
deserialize.
Deserialization is in essence loading attributes from a json file.
This decorator is a security measure put in place to make sure that
you don't load attributes that were initially part of another class.
Example:
@register_deserializable
class ChildClass(JSONSerializable):
def __init__(self, ...):
# initialization logic
Args:
cls (Type): The class to be registered.
Returns:
Type: The same class, after registration.
"""
JSONSerializable._register_class_as_deserializable(cls)
return cls
class JSONSerializable:
"""
A class to represent a JSON serializable object.
This class provides methods to serialize and deserialize objects,
as well as to save serialized objects to a file and load them back.
"""
_deserializable_classes = set() # Contains classes that are whitelisted for deserialization.
def serialize(self) -> str:
"""
Serialize the object to a JSON-formatted string.
Returns:
str: A JSON string representation of the object.
"""
try:
return json.dumps(self, default=self._auto_encoder, ensure_ascii=False)
except Exception as e:
logger.error(f"Serialization error: {e}")
return "{}"
@classmethod
def deserialize(cls, json_str: str) -> Any:
"""
Deserialize a JSON-formatted string to an object.
If it fails, a default class is returned instead.
Note: This *returns* an instance, it's not automatically loaded on the calling class.
Example:
app = App.deserialize(json_str)
Args:
json_str (str): A JSON string representation of an object.
Returns:
Object: The deserialized object.
"""
try:
return json.loads(json_str, object_hook=cls._auto_decoder)
except Exception as e:
logger.error(f"Deserialization error: {e}")
# Return a default instance in case of failure
return cls()
@staticmethod
def _auto_encoder(obj: Any) -> Union[dict[str, Any], None]:
"""
Automatically encode an object for JSON serialization.
Args:
obj (Object): The object to be encoded.
Returns:
dict: A dictionary representation of the object.
"""
if hasattr(obj, "__dict__"):
dct = {}
for key, value in obj.__dict__.items():
try:
# Recursive: If the value is an instance of a subclass of JSONSerializable,
# serialize it using the JSONSerializable serialize method.
if isinstance(value, JSONSerializable):
serialized_value = value.serialize()
# The value is stored as a serialized string.
dct[key] = json.loads(serialized_value)
# Custom rules (subclass is not json serializable by default)
elif isinstance(value, Template):
dct[key] = {"__type__": "Template", "data": value.template}
# Future custom types we can follow a similar pattern
# elif isinstance(value, SomeOtherType):
# dct[key] = {
# "__type__": "SomeOtherType",
# "data": value.some_method()
# }
# NOTE: Keep in mind that this logic needs to be applied to the decoder too.
else:
json.dumps(value) # Try to serialize the value.
dct[key] = value
except TypeError:
pass # If it fails, simply pass to skip this key-value pair of the dictionary.
dct["__class__"] = obj.__class__.__name__
return dct
raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
@classmethod
def _auto_decoder(cls, dct: dict[str, Any]) -> Any:
"""
Automatically decode a dictionary to an object during JSON deserialization.
Args:
dct (dict): The dictionary representation of an object.
Returns:
Object: The decoded object or the original dictionary if decoding is not possible.
"""
class_name = dct.pop("__class__", None)
if class_name:
if not hasattr(cls, "_deserializable_classes"): # Additional safety check
raise AttributeError(f"`{class_name}` has no registry of allowed deserializations.")
if class_name not in {cl.__name__ for cl in cls._deserializable_classes}:
raise KeyError(f"Deserialization of class `{class_name}` is not allowed.")
target_class = next((cl for cl in cls._deserializable_classes if cl.__name__ == class_name), None)
if target_class:
obj = target_class.__new__(target_class)
for key, value in dct.items():
if isinstance(value, dict) and "__type__" in value:
if value["__type__"] == "Template":
value = Template(value["data"])
# For future custom types we can follow a similar pattern
# elif value["__type__"] == "SomeOtherType":
# value = SomeOtherType.some_constructor(value["data"])
default_value = getattr(target_class, key, None)
setattr(obj, key, value or default_value)
return obj
return dct
def save_to_file(self, filename: str) -> None:
"""
Save the serialized object to a file.
Args:
filename (str): The path to the file where the object should be saved.
"""
with open(filename, "w", encoding="utf-8") as f:
f.write(self.serialize())
@classmethod
def load_from_file(cls, filename: str) -> Any:
"""
Load and deserialize an object from a file.
Args:
filename (str): The path to the file from which the object should be loaded.
Returns:
Object: The deserialized object.
"""
with open(filename, "r", encoding="utf-8") as f:
json_str = f.read()
return cls.deserialize(json_str)
@classmethod
def _register_class_as_deserializable(cls, target_class: Type[T]) -> None:
"""
Register a class as deserializable. This is a classmethod and globally shared.
This method adds the target class to the set of classes that
can be deserialized. This is a security measure to ensure only
whitelisted classes are deserialized.
Args:
target_class (Type): The class to be registered.
"""
cls._deserializable_classes.add(target_class)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/helpers/callbacks.py | embedchain/embedchain/helpers/callbacks.py | import queue
from typing import Any, Union
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import LLMResult
STOP_ITEM = "[END]"
"""
This is a special item that is used to signal the end of the stream.
"""
class StreamingStdOutCallbackHandlerYield(StreamingStdOutCallbackHandler):
"""
This is a callback handler that yields the tokens as they are generated.
For a usage example, see the :func:`generate` function below.
"""
q: queue.Queue
"""
The queue to write the tokens to as they are generated.
"""
def __init__(self, q: queue.Queue) -> None:
"""
Initialize the callback handler.
q: The queue to write the tokens to as they are generated.
"""
super().__init__()
self.q = q
def on_llm_start(self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any) -> None:
"""Run when LLM starts running."""
with self.q.mutex:
self.q.queue.clear()
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
self.q.put(token)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.q.put(STOP_ITEM)
def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
"""Run when LLM errors."""
self.q.put("%s: %s" % (type(error).__name__, str(error)))
self.q.put(STOP_ITEM)
def generate(rq: queue.Queue):
"""
This is a generator that yields the items in the queue until it reaches the stop item.
Usage example:
```
def askQuestion(callback_fn: StreamingStdOutCallbackHandlerYield):
llm = OpenAI(streaming=True, callbacks=[callback_fn])
return llm.invoke(prompt="Write a poem about a tree.")
@app.route("/", methods=["GET"])
def generate_output():
q = Queue()
callback_fn = StreamingStdOutCallbackHandlerYield(q)
threading.Thread(target=askQuestion, args=(callback_fn,)).start()
return Response(generate(q), mimetype="text/event-stream")
```
"""
while True:
result: str = rq.get()
if result == STOP_ITEM or result is None:
break
yield result
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/helpers/__init__.py | embedchain/embedchain/helpers/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/store/assistants.py | embedchain/embedchain/store/assistants.py | import logging
import os
import re
import tempfile
import time
import uuid
from pathlib import Path
from typing import cast
from openai import OpenAI
from openai.types.beta.threads import Message
from openai.types.beta.threads.text_content_block import TextContentBlock
from embedchain import Client, Pipeline
from embedchain.config import AddConfig
from embedchain.data_formatter import DataFormatter
from embedchain.models.data_type import DataType
from embedchain.telemetry.posthog import AnonymousTelemetry
from embedchain.utils.misc import detect_datatype
# Set up the user directory if it doesn't exist already
Client.setup()
class OpenAIAssistant:
def __init__(
self,
name=None,
instructions=None,
tools=None,
thread_id=None,
model="gpt-4-1106-preview",
data_sources=None,
assistant_id=None,
log_level=logging.INFO,
collect_metrics=True,
):
self.name = name or "OpenAI Assistant"
self.instructions = instructions
self.tools = tools or [{"type": "retrieval"}]
self.model = model
self.data_sources = data_sources or []
self.log_level = log_level
self._client = OpenAI()
self._initialize_assistant(assistant_id)
self.thread_id = thread_id or self._create_thread()
self._telemetry_props = {"class": self.__class__.__name__}
self.telemetry = AnonymousTelemetry(enabled=collect_metrics)
self.telemetry.capture(event_name="init", properties=self._telemetry_props)
def add(self, source, data_type=None):
file_path = self._prepare_source_path(source, data_type)
self._add_file_to_assistant(file_path)
event_props = {
**self._telemetry_props,
"data_type": data_type or detect_datatype(source),
}
self.telemetry.capture(event_name="add", properties=event_props)
logging.info("Data successfully added to the assistant.")
def chat(self, message):
self._send_message(message)
self.telemetry.capture(event_name="chat", properties=self._telemetry_props)
return self._get_latest_response()
def delete_thread(self):
self._client.beta.threads.delete(self.thread_id)
self.thread_id = self._create_thread()
# Internal methods
def _initialize_assistant(self, assistant_id):
file_ids = self._generate_file_ids(self.data_sources)
self.assistant = (
self._client.beta.assistants.retrieve(assistant_id)
if assistant_id
else self._client.beta.assistants.create(
name=self.name, model=self.model, file_ids=file_ids, instructions=self.instructions, tools=self.tools
)
)
def _create_thread(self):
thread = self._client.beta.threads.create()
return thread.id
def _prepare_source_path(self, source, data_type=None):
if Path(source).is_file():
return source
data_type = data_type or detect_datatype(source)
formatter = DataFormatter(data_type=DataType(data_type), config=AddConfig())
data = formatter.loader.load_data(source)["data"]
return self._save_temp_data(data=data[0]["content"].encode(), source=source)
def _add_file_to_assistant(self, file_path):
file_obj = self._client.files.create(file=open(file_path, "rb"), purpose="assistants")
self._client.beta.assistants.files.create(assistant_id=self.assistant.id, file_id=file_obj.id)
def _generate_file_ids(self, data_sources):
return [
self._add_file_to_assistant(self._prepare_source_path(ds["source"], ds.get("data_type")))
for ds in data_sources
]
def _send_message(self, message):
self._client.beta.threads.messages.create(thread_id=self.thread_id, role="user", content=message)
self._wait_for_completion()
def _wait_for_completion(self):
run = self._client.beta.threads.runs.create(
thread_id=self.thread_id,
assistant_id=self.assistant.id,
instructions=self.instructions,
)
run_id = run.id
run_status = run.status
while run_status in ["queued", "in_progress", "requires_action"]:
time.sleep(0.1) # Sleep before making the next API call to avoid hitting rate limits
run = self._client.beta.threads.runs.retrieve(thread_id=self.thread_id, run_id=run_id)
run_status = run.status
if run_status == "failed":
raise ValueError(f"Thread run failed with the following error: {run.last_error}")
def _get_latest_response(self):
history = self._get_history()
return self._format_message(history[0]) if history else None
def _get_history(self):
messages = self._client.beta.threads.messages.list(thread_id=self.thread_id, order="desc")
return list(messages)
@staticmethod
def _format_message(thread_message):
thread_message = cast(Message, thread_message)
content = [c.text.value for c in thread_message.content if isinstance(c, TextContentBlock)]
return " ".join(content)
@staticmethod
def _save_temp_data(data, source):
special_chars_pattern = r'[\\/:*?"<>|&=% ]+'
sanitized_source = re.sub(special_chars_pattern, "_", source)[:256]
temp_dir = tempfile.mkdtemp()
file_path = os.path.join(temp_dir, sanitized_source)
with open(file_path, "wb") as file:
file.write(data)
return file_path
class AIAssistant:
def __init__(
self,
name=None,
instructions=None,
yaml_path=None,
assistant_id=None,
thread_id=None,
data_sources=None,
log_level=logging.INFO,
collect_metrics=True,
):
self.name = name or "AI Assistant"
self.data_sources = data_sources or []
self.log_level = log_level
self.instructions = instructions
self.assistant_id = assistant_id or str(uuid.uuid4())
self.thread_id = thread_id or str(uuid.uuid4())
self.pipeline = Pipeline.from_config(config_path=yaml_path) if yaml_path else Pipeline()
self.pipeline.local_id = self.pipeline.config.id = self.thread_id
if self.instructions:
self.pipeline.system_prompt = self.instructions
print(
f"🎉 Created AI Assistant with name: {self.name}, assistant_id: {self.assistant_id}, thread_id: {self.thread_id}" # noqa: E501
)
# telemetry related properties
self._telemetry_props = {"class": self.__class__.__name__}
self.telemetry = AnonymousTelemetry(enabled=collect_metrics)
self.telemetry.capture(event_name="init", properties=self._telemetry_props)
if self.data_sources:
for data_source in self.data_sources:
metadata = {"assistant_id": self.assistant_id, "thread_id": "global_knowledge"}
self.pipeline.add(data_source["source"], data_source.get("data_type"), metadata=metadata)
def add(self, source, data_type=None):
metadata = {"assistant_id": self.assistant_id, "thread_id": self.thread_id}
self.pipeline.add(source, data_type=data_type, metadata=metadata)
event_props = {
**self._telemetry_props,
"data_type": data_type or detect_datatype(source),
}
self.telemetry.capture(event_name="add", properties=event_props)
def chat(self, query):
where = {
"$and": [
{"assistant_id": {"$eq": self.assistant_id}},
{"thread_id": {"$in": [self.thread_id, "global_knowledge"]}},
]
}
return self.pipeline.chat(query, where=where)
def delete(self):
self.pipeline.reset()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/store/__init__.py | embedchain/embedchain/store/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/models/data_type.py | embedchain/embedchain/models/data_type.py | from enum import Enum
class DirectDataType(Enum):
"""
DirectDataType enum contains data types that contain raw data directly.
"""
TEXT = "text"
class IndirectDataType(Enum):
"""
IndirectDataType enum contains data types that contain references to data stored elsewhere.
"""
YOUTUBE_VIDEO = "youtube_video"
PDF_FILE = "pdf_file"
WEB_PAGE = "web_page"
SITEMAP = "sitemap"
XML = "xml"
DOCX = "docx"
DOCS_SITE = "docs_site"
NOTION = "notion"
CSV = "csv"
MDX = "mdx"
IMAGE = "image"
UNSTRUCTURED = "unstructured"
JSON = "json"
OPENAPI = "openapi"
GMAIL = "gmail"
SUBSTACK = "substack"
YOUTUBE_CHANNEL = "youtube_channel"
DISCORD = "discord"
CUSTOM = "custom"
RSSFEED = "rss_feed"
BEEHIIV = "beehiiv"
GOOGLE_DRIVE = "google_drive"
DIRECTORY = "directory"
SLACK = "slack"
DROPBOX = "dropbox"
TEXT_FILE = "text_file"
EXCEL_FILE = "excel_file"
AUDIO = "audio"
class SpecialDataType(Enum):
"""
SpecialDataType enum contains data types that are neither direct nor indirect, or simply require special attention.
"""
QNA_PAIR = "qna_pair"
class DataType(Enum):
TEXT = DirectDataType.TEXT.value
YOUTUBE_VIDEO = IndirectDataType.YOUTUBE_VIDEO.value
PDF_FILE = IndirectDataType.PDF_FILE.value
WEB_PAGE = IndirectDataType.WEB_PAGE.value
SITEMAP = IndirectDataType.SITEMAP.value
XML = IndirectDataType.XML.value
DOCX = IndirectDataType.DOCX.value
DOCS_SITE = IndirectDataType.DOCS_SITE.value
NOTION = IndirectDataType.NOTION.value
CSV = IndirectDataType.CSV.value
MDX = IndirectDataType.MDX.value
QNA_PAIR = SpecialDataType.QNA_PAIR.value
IMAGE = IndirectDataType.IMAGE.value
UNSTRUCTURED = IndirectDataType.UNSTRUCTURED.value
JSON = IndirectDataType.JSON.value
OPENAPI = IndirectDataType.OPENAPI.value
GMAIL = IndirectDataType.GMAIL.value
SUBSTACK = IndirectDataType.SUBSTACK.value
YOUTUBE_CHANNEL = IndirectDataType.YOUTUBE_CHANNEL.value
DISCORD = IndirectDataType.DISCORD.value
CUSTOM = IndirectDataType.CUSTOM.value
RSSFEED = IndirectDataType.RSSFEED.value
BEEHIIV = IndirectDataType.BEEHIIV.value
GOOGLE_DRIVE = IndirectDataType.GOOGLE_DRIVE.value
DIRECTORY = IndirectDataType.DIRECTORY.value
SLACK = IndirectDataType.SLACK.value
DROPBOX = IndirectDataType.DROPBOX.value
TEXT_FILE = IndirectDataType.TEXT_FILE.value
EXCEL_FILE = IndirectDataType.EXCEL_FILE.value
AUDIO = IndirectDataType.AUDIO.value
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/models/embedding_functions.py | embedchain/embedchain/models/embedding_functions.py | from enum import Enum
class EmbeddingFunctions(Enum):
OPENAI = "OPENAI"
HUGGING_FACE = "HUGGING_FACE"
VERTEX_AI = "VERTEX_AI"
AWS_BEDROCK = "AWS_BEDROCK"
GPT4ALL = "GPT4ALL"
OLLAMA = "OLLAMA"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/models/providers.py | embedchain/embedchain/models/providers.py | from enum import Enum
class Providers(Enum):
OPENAI = "OPENAI"
ANTHROPHIC = "ANTHPROPIC"
VERTEX_AI = "VERTEX_AI"
GPT4ALL = "GPT4ALL"
OLLAMA = "OLLAMA"
AZURE_OPENAI = "AZURE_OPENAI"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/models/vector_dimensions.py | embedchain/embedchain/models/vector_dimensions.py | from enum import Enum
# vector length created by embedding fn
class VectorDimensions(Enum):
GPT4ALL = 384
OPENAI = 1536
VERTEX_AI = 768
HUGGING_FACE = 384
GOOGLE_AI = 768
MISTRAL_AI = 1024
NVIDIA_AI = 1024
COHERE = 384
OLLAMA = 384
AMAZON_TITAN_V1 = 1536
AMAZON_TITAN_V2 = 1024
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/models/__init__.py | embedchain/embedchain/models/__init__.py | from .embedding_functions import EmbeddingFunctions # noqa: F401
from .providers import Providers # noqa: F401
from .vector_dimensions import VectorDimensions # noqa: F401
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/memory/message.py | embedchain/embedchain/memory/message.py | import logging
from typing import Any, Optional
from embedchain.helpers.json_serializable import JSONSerializable
logger = logging.getLogger(__name__)
class BaseMessage(JSONSerializable):
"""
The base abstract message class.
Messages are the inputs and outputs of Models.
"""
# The string content of the message.
content: str
# The created_by of the message. AI, Human, Bot etc.
created_by: str
# Any additional info.
metadata: dict[str, Any]
def __init__(self, content: str, created_by: str, metadata: Optional[dict[str, Any]] = None) -> None:
super().__init__()
self.content = content
self.created_by = created_by
self.metadata = metadata
@property
def type(self) -> str:
"""Type of the Message, used for serialization."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
def __str__(self) -> str:
return f"{self.created_by}: {self.content}"
class ChatMessage(JSONSerializable):
"""
The base abstract chat message class.
Chat messages are the pair of (question, answer) conversation
between human and model.
"""
human_message: Optional[BaseMessage] = None
ai_message: Optional[BaseMessage] = None
def add_user_message(self, message: str, metadata: Optional[dict] = None):
if self.human_message:
logger.info(
"Human message already exists in the chat message,\
overwriting it with new message."
)
self.human_message = BaseMessage(content=message, created_by="human", metadata=metadata)
def add_ai_message(self, message: str, metadata: Optional[dict] = None):
if self.ai_message:
logger.info(
"AI message already exists in the chat message,\
overwriting it with new message."
)
self.ai_message = BaseMessage(content=message, created_by="ai", metadata=metadata)
def __str__(self) -> str:
return f"{self.human_message}\n{self.ai_message}"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/memory/utils.py | embedchain/embedchain/memory/utils.py | from typing import Any, Optional
def merge_metadata_dict(left: Optional[dict[str, Any]], right: Optional[dict[str, Any]]) -> Optional[dict[str, Any]]:
"""
Merge the metadatas of two BaseMessage types.
Args:
left (dict[str, Any]): metadata of human message
right (dict[str, Any]): metadata of AI message
Returns:
dict[str, Any]: combined metadata dict with dedup
to be saved in db.
"""
if not left and not right:
return None
elif not left:
return right
elif not right:
return left
merged = left.copy()
for k, v in right.items():
if k not in merged:
merged[k] = v
elif type(merged[k]) is not type(v):
raise ValueError(f'additional_kwargs["{k}"] already exists in this message,' " but with a different type.")
elif isinstance(merged[k], str):
merged[k] += v
elif isinstance(merged[k], dict):
merged[k] = merge_metadata_dict(merged[k], v)
else:
raise ValueError(f"Additional kwargs key {k} already exists in this message.")
return merged
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/memory/__init__.py | embedchain/embedchain/memory/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/memory/base.py | embedchain/embedchain/memory/base.py | import json
import logging
import uuid
from typing import Any, Optional
from embedchain.core.db.database import get_session
from embedchain.core.db.models import ChatHistory as ChatHistoryModel
from embedchain.memory.message import ChatMessage
from embedchain.memory.utils import merge_metadata_dict
logger = logging.getLogger(__name__)
class ChatHistory:
def __init__(self) -> None:
self.db_session = get_session()
def add(self, app_id, session_id, chat_message: ChatMessage) -> Optional[str]:
memory_id = str(uuid.uuid4())
metadata_dict = merge_metadata_dict(chat_message.human_message.metadata, chat_message.ai_message.metadata)
if metadata_dict:
metadata = self._serialize_json(metadata_dict)
self.db_session.add(
ChatHistoryModel(
app_id=app_id,
id=memory_id,
session_id=session_id,
question=chat_message.human_message.content,
answer=chat_message.ai_message.content,
metadata=metadata if metadata_dict else "{}",
)
)
try:
self.db_session.commit()
except Exception as e:
logger.error(f"Error adding chat memory to db: {e}")
self.db_session.rollback()
return None
logger.info(f"Added chat memory to db with id: {memory_id}")
return memory_id
def delete(self, app_id: str, session_id: Optional[str] = None):
"""
Delete all chat history for a given app_id and session_id.
This is useful for deleting chat history for a given user.
:param app_id: The app_id to delete chat history for
:param session_id: The session_id to delete chat history for
:return: None
"""
params = {"app_id": app_id}
if session_id:
params["session_id"] = session_id
self.db_session.query(ChatHistoryModel).filter_by(**params).delete()
try:
self.db_session.commit()
except Exception as e:
logger.error(f"Error deleting chat history: {e}")
self.db_session.rollback()
def get(
self, app_id, session_id: str = "default", num_rounds=10, fetch_all: bool = False, display_format=False
) -> list[ChatMessage]:
"""
Get the chat history for a given app_id.
param: app_id - The app_id to get chat history
param: session_id (optional) - The session_id to get chat history. Defaults to "default"
param: num_rounds (optional) - The number of rounds to get chat history. Defaults to 10
param: fetch_all (optional) - Whether to fetch all chat history or not. Defaults to False
param: display_format (optional) - Whether to return the chat history in display format. Defaults to False
"""
params = {"app_id": app_id}
if not fetch_all:
params["session_id"] = session_id
results = (
self.db_session.query(ChatHistoryModel).filter_by(**params).order_by(ChatHistoryModel.created_at.asc())
)
results = results.limit(num_rounds) if not fetch_all else results
history = []
for result in results:
metadata = self._deserialize_json(metadata=result.meta_data or "{}")
# Return list of dict if display_format is True
if display_format:
history.append(
{
"session_id": result.session_id,
"human": result.question,
"ai": result.answer,
"metadata": result.meta_data,
"timestamp": result.created_at,
}
)
else:
memory = ChatMessage()
memory.add_user_message(result.question, metadata=metadata)
memory.add_ai_message(result.answer, metadata=metadata)
history.append(memory)
return history
def count(self, app_id: str, session_id: Optional[str] = None):
"""
Count the number of chat messages for a given app_id and session_id.
:param app_id: The app_id to count chat history for
:param session_id: The session_id to count chat history for
:return: The number of chat messages for a given app_id and session_id
"""
# Rewrite the logic below with sqlalchemy
params = {"app_id": app_id}
if session_id:
params["session_id"] = session_id
return self.db_session.query(ChatHistoryModel).filter_by(**params).count()
@staticmethod
def _serialize_json(metadata: dict[str, Any]):
return json.dumps(metadata)
@staticmethod
def _deserialize_json(metadata: str):
return json.loads(metadata)
def close_connection(self):
self.connection.close()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/utils/cli.py | embedchain/embedchain/utils/cli.py | import os
import re
import shutil
import subprocess
import pkg_resources
from rich.console import Console
console = Console()
def get_pkg_path_from_name(template: str):
try:
# Determine the installation location of the embedchain package
package_path = pkg_resources.resource_filename("embedchain", "")
except ImportError:
console.print("❌ [bold red]Failed to locate the 'embedchain' package. Is it installed?[/bold red]")
return
# Construct the source path from the embedchain package
src_path = os.path.join(package_path, "deployment", template)
if not os.path.exists(src_path):
console.print(f"❌ [bold red]Template '{template}' not found.[/bold red]")
return
return src_path
def setup_fly_io_app(extra_args):
fly_launch_command = ["fly", "launch", "--region", "sjc", "--no-deploy"] + list(extra_args)
try:
console.print(f"🚀 [bold cyan]Running: {' '.join(fly_launch_command)}[/bold cyan]")
shutil.move(".env.example", ".env")
subprocess.run(fly_launch_command, check=True)
console.print("✅ [bold green]'fly launch' executed successfully.[/bold green]")
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except FileNotFoundError:
console.print(
"❌ [bold red]'fly' command not found. Please ensure Fly CLI is installed and in your PATH.[/bold red]"
)
def setup_modal_com_app(extra_args):
modal_setup_file = os.path.join(os.path.expanduser("~"), ".modal.toml")
if os.path.exists(modal_setup_file):
console.print(
"""✅ [bold green]Modal setup already done. You can now install the dependencies by doing \n
`pip install -r requirements.txt`[/bold green]"""
)
else:
modal_setup_cmd = ["modal", "setup"] + list(extra_args)
console.print(f"🚀 [bold cyan]Running: {' '.join(modal_setup_cmd)}[/bold cyan]")
subprocess.run(modal_setup_cmd, check=True)
shutil.move(".env.example", ".env")
console.print(
"""Great! Now you can install the dependencies by doing: \n
`pip install -r requirements.txt`\n
\n
To run your app locally:\n
`ec dev`
"""
)
def setup_render_com_app():
render_setup_file = os.path.join(os.path.expanduser("~"), ".render/config.yaml")
if os.path.exists(render_setup_file):
console.print(
"""✅ [bold green]Render setup already done. You can now install the dependencies by doing \n
`pip install -r requirements.txt`[/bold green]"""
)
else:
render_setup_cmd = ["render", "config", "init"]
console.print(f"🚀 [bold cyan]Running: {' '.join(render_setup_cmd)}[/bold cyan]")
subprocess.run(render_setup_cmd, check=True)
shutil.move(".env.example", ".env")
console.print(
"""Great! Now you can install the dependencies by doing: \n
`pip install -r requirements.txt`\n
\n
To run your app locally:\n
`ec dev`
"""
)
def setup_streamlit_io_app():
# nothing needs to be done here
console.print("Great! Now you can install the dependencies by doing `pip install -r requirements.txt`")
def setup_gradio_app():
# nothing needs to be done here
console.print("Great! Now you can install the dependencies by doing `pip install -r requirements.txt`")
def setup_hf_app():
subprocess.run(["pip", "install", "huggingface_hub[cli]"], check=True)
hf_setup_file = os.path.join(os.path.expanduser("~"), ".cache/huggingface/token")
if os.path.exists(hf_setup_file):
console.print(
"""✅ [bold green]HuggingFace setup already done. You can now install the dependencies by doing \n
`pip install -r requirements.txt`[/bold green]"""
)
else:
console.print(
"""🚀 [cyan]Running: huggingface-cli login \n
Please provide a [bold]WRITE[/bold] token so that we can directly deploy\n
your apps from the terminal.[/cyan]
"""
)
subprocess.run(["huggingface-cli", "login"], check=True)
console.print("Great! Now you can install the dependencies by doing `pip install -r requirements.txt`")
def run_dev_fly_io(debug, host, port):
uvicorn_command = ["uvicorn", "app:app"]
if debug:
uvicorn_command.append("--reload")
uvicorn_command.extend(["--host", host, "--port", str(port)])
try:
console.print(f"🚀 [bold cyan]Running FastAPI app with command: {' '.join(uvicorn_command)}[/bold cyan]")
subprocess.run(uvicorn_command, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]FastAPI server stopped[/bold yellow]")
def run_dev_modal_com():
modal_run_cmd = ["modal", "serve", "app"]
try:
console.print(f"🚀 [bold cyan]Running FastAPI app with command: {' '.join(modal_run_cmd)}[/bold cyan]")
subprocess.run(modal_run_cmd, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]FastAPI server stopped[/bold yellow]")
def run_dev_streamlit_io():
streamlit_run_cmd = ["streamlit", "run", "app.py"]
try:
console.print(f"🚀 [bold cyan]Running Streamlit app with command: {' '.join(streamlit_run_cmd)}[/bold cyan]")
subprocess.run(streamlit_run_cmd, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]Streamlit server stopped[/bold yellow]")
def run_dev_render_com(debug, host, port):
uvicorn_command = ["uvicorn", "app:app"]
if debug:
uvicorn_command.append("--reload")
uvicorn_command.extend(["--host", host, "--port", str(port)])
try:
console.print(f"🚀 [bold cyan]Running FastAPI app with command: {' '.join(uvicorn_command)}[/bold cyan]")
subprocess.run(uvicorn_command, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]FastAPI server stopped[/bold yellow]")
def run_dev_gradio():
gradio_run_cmd = ["gradio", "app.py"]
try:
console.print(f"🚀 [bold cyan]Running Gradio app with command: {' '.join(gradio_run_cmd)}[/bold cyan]")
subprocess.run(gradio_run_cmd, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except KeyboardInterrupt:
console.print("\n🛑 [bold yellow]Gradio server stopped[/bold yellow]")
def read_env_file(env_file_path):
"""
Reads an environment file and returns a dictionary of key-value pairs.
Args:
env_file_path (str): The path to the .env file.
Returns:
dict: Dictionary of environment variables.
"""
env_vars = {}
pattern = re.compile(r"(\w+)=(.*)") # compile regular expression for better performance
with open(env_file_path, "r") as file:
lines = file.readlines() # readlines is faster as it reads all at once
for line in lines:
line = line.strip()
# Ignore comments and empty lines
if line and not line.startswith("#"):
# Assume each line is in the format KEY=VALUE
key_value_match = pattern.match(line)
if key_value_match:
key, value = key_value_match.groups()
env_vars[key] = value
return env_vars
def deploy_fly():
app_name = ""
with open("fly.toml", "r") as file:
for line in file:
if line.strip().startswith("app ="):
app_name = line.split("=")[1].strip().strip('"')
if not app_name:
console.print("❌ [bold red]App name not found in fly.toml[/bold red]")
return
env_vars = read_env_file(".env")
secrets_command = ["flyctl", "secrets", "set", "-a", app_name] + [f"{k}={v}" for k, v in env_vars.items()]
deploy_command = ["fly", "deploy"]
try:
# Set secrets
console.print(f"🔐 [bold cyan]Setting secrets for {app_name}[/bold cyan]")
subprocess.run(secrets_command, check=True)
# Deploy application
console.print(f"🚀 [bold cyan]Running: {' '.join(deploy_command)}[/bold cyan]")
subprocess.run(deploy_command, check=True)
console.print("✅ [bold green]'fly deploy' executed successfully.[/bold green]")
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except FileNotFoundError:
console.print(
"❌ [bold red]'fly' command not found. Please ensure Fly CLI is installed and in your PATH.[/bold red]"
)
def deploy_modal():
modal_deploy_cmd = ["modal", "deploy", "app"]
try:
console.print(f"🚀 [bold cyan]Running: {' '.join(modal_deploy_cmd)}[/bold cyan]")
subprocess.run(modal_deploy_cmd, check=True)
console.print("✅ [bold green]'modal deploy' executed successfully.[/bold green]")
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except FileNotFoundError:
console.print(
"❌ [bold red]'modal' command not found. Please ensure Modal CLI is installed and in your PATH.[/bold red]"
)
def deploy_streamlit():
streamlit_deploy_cmd = ["streamlit", "run", "app.py"]
try:
console.print(f"🚀 [bold cyan]Running: {' '.join(streamlit_deploy_cmd)}[/bold cyan]")
console.print(
"""\n\n✅ [bold yellow]To deploy a streamlit app, you can directly it from the UI.\n
Click on the 'Deploy' button on the top right corner of the app.\n
For more information, please refer to https://docs.embedchain.ai/deployment/streamlit_io
[/bold yellow]
\n\n"""
)
subprocess.run(streamlit_deploy_cmd, check=True)
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except FileNotFoundError:
console.print(
"""❌ [bold red]'streamlit' command not found.\n
Please ensure Streamlit CLI is installed and in your PATH.[/bold red]"""
)
def deploy_render():
render_deploy_cmd = ["render", "blueprint", "launch"]
try:
console.print(f"🚀 [bold cyan]Running: {' '.join(render_deploy_cmd)}[/bold cyan]")
subprocess.run(render_deploy_cmd, check=True)
console.print("✅ [bold green]'render blueprint launch' executed successfully.[/bold green]")
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except FileNotFoundError:
console.print(
"❌ [bold red]'render' command not found. Please ensure Render CLI is installed and in your PATH.[/bold red]" # noqa:E501
)
def deploy_gradio_app():
gradio_deploy_cmd = ["gradio", "deploy"]
try:
console.print(f"🚀 [bold cyan]Running: {' '.join(gradio_deploy_cmd)}[/bold cyan]")
subprocess.run(gradio_deploy_cmd, check=True)
console.print("✅ [bold green]'gradio deploy' executed successfully.[/bold green]")
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
except FileNotFoundError:
console.print(
"❌ [bold red]'gradio' command not found. Please ensure Gradio CLI is installed and in your PATH.[/bold red]" # noqa:E501
)
def deploy_hf_spaces(ec_app_name):
if not ec_app_name:
console.print("❌ [bold red]'name' not found in embedchain.json[/bold red]")
return
hf_spaces_deploy_cmd = ["huggingface-cli", "upload", ec_app_name, ".", ".", "--repo-type=space"]
try:
console.print(f"🚀 [bold cyan]Running: {' '.join(hf_spaces_deploy_cmd)}[/bold cyan]")
subprocess.run(hf_spaces_deploy_cmd, check=True)
console.print("✅ [bold green]'huggingface-cli upload' executed successfully.[/bold green]")
except subprocess.CalledProcessError as e:
console.print(f"❌ [bold red]An error occurred: {e}[/bold red]")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/utils/misc.py | embedchain/embedchain/utils/misc.py | import datetime
import itertools
import json
import logging
import os
import re
import string
from typing import Any
from schema import Optional, Or, Schema
from tqdm import tqdm
from embedchain.models.data_type import DataType
logger = logging.getLogger(__name__)
def parse_content(content, type):
implemented = ["html.parser", "lxml", "lxml-xml", "xml", "html5lib"]
if type not in implemented:
raise ValueError(f"Parser type {type} not implemented. Please choose one of {implemented}")
from bs4 import BeautifulSoup
soup = BeautifulSoup(content, type)
original_size = len(str(soup.get_text()))
tags_to_exclude = [
"nav",
"aside",
"form",
"header",
"noscript",
"svg",
"canvas",
"footer",
"script",
"style",
]
for tag in soup(tags_to_exclude):
tag.decompose()
ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
for id in ids_to_exclude:
tags = soup.find_all(id=id)
for tag in tags:
tag.decompose()
classes_to_exclude = [
"elementor-location-header",
"navbar-header",
"nav",
"header-sidebar-wrapper",
"blog-sidebar-wrapper",
"related-posts",
]
for class_name in classes_to_exclude:
tags = soup.find_all(class_=class_name)
for tag in tags:
tag.decompose()
content = soup.get_text()
content = clean_string(content)
cleaned_size = len(content)
if original_size != 0:
logger.info(
f"Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
)
return content
def clean_string(text):
"""
This function takes in a string and performs a series of text cleaning operations.
Args:
text (str): The text to be cleaned. This is expected to be a string.
Returns:
cleaned_text (str): The cleaned text after all the cleaning operations
have been performed.
"""
# Stripping and reducing multiple spaces to single:
cleaned_text = re.sub(r"\s+", " ", text.strip())
# Removing backslashes:
cleaned_text = cleaned_text.replace("\\", "")
# Replacing hash characters:
cleaned_text = cleaned_text.replace("#", " ")
# Eliminating consecutive non-alphanumeric characters:
# This regex identifies consecutive non-alphanumeric characters (i.e., not
# a word character [a-zA-Z0-9_] and not a whitespace) in the string
# and replaces each group of such characters with a single occurrence of
# that character.
# For example, "!!! hello !!!" would become "! hello !".
cleaned_text = re.sub(r"([^\w\s])\1*", r"\1", cleaned_text)
return cleaned_text
def is_readable(s):
"""
Heuristic to determine if a string is "readable" (mostly contains printable characters and forms meaningful words)
:param s: string
:return: True if the string is more than 95% printable.
"""
len_s = len(s)
if len_s == 0:
return False
printable_chars = set(string.printable)
printable_ratio = sum(c in printable_chars for c in s) / len_s
return printable_ratio > 0.95 # 95% of characters are printable
def use_pysqlite3():
"""
Swap std-lib sqlite3 with pysqlite3.
"""
import platform
import sqlite3
if platform.system() == "Linux" and sqlite3.sqlite_version_info < (3, 35, 0):
try:
# According to the Chroma team, this patch only works on Linux
import datetime
import subprocess
import sys
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "pysqlite3-binary", "--quiet", "--disable-pip-version-check"]
)
__import__("pysqlite3")
sys.modules["sqlite3"] = sys.modules.pop("pysqlite3")
# Let the user know what happened.
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")[:-3]
print(
f"{current_time} [embedchain] [INFO]",
"Swapped std-lib sqlite3 with pysqlite3 for ChromaDb compatibility.",
f"Your original version was {sqlite3.sqlite_version}.",
)
except Exception as e:
# Escape all exceptions
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")[:-3]
print(
f"{current_time} [embedchain] [ERROR]",
"Failed to swap std-lib sqlite3 with pysqlite3 for ChromaDb compatibility.",
"Error:",
e,
)
def format_source(source: str, limit: int = 20) -> str:
"""
Format a string to only take the first x and last x letters.
This makes it easier to display a URL, keeping familiarity while ensuring a consistent length.
If the string is too short, it is not sliced.
"""
if len(source) > 2 * limit:
return source[:limit] + "..." + source[-limit:]
return source
def detect_datatype(source: Any) -> DataType:
"""
Automatically detect the datatype of the given source.
:param source: the source to base the detection on
:return: data_type string
"""
from urllib.parse import urlparse
import requests
import yaml
def is_openapi_yaml(yaml_content):
# currently the following two fields are required in openapi spec yaml config
return "openapi" in yaml_content and "info" in yaml_content
def is_google_drive_folder(url):
# checks if url is a Google Drive folder url against a regex
regex = r"^drive\.google\.com\/drive\/(?:u\/\d+\/)folders\/([a-zA-Z0-9_-]+)$"
return re.match(regex, url)
try:
if not isinstance(source, str):
raise ValueError("Source is not a string and thus cannot be a URL.")
url = urlparse(source)
# Check if both scheme and netloc are present. Local file system URIs are acceptable too.
if not all([url.scheme, url.netloc]) and url.scheme != "file":
raise ValueError("Not a valid URL.")
except ValueError:
url = False
formatted_source = format_source(str(source), 30)
if url:
YOUTUBE_ALLOWED_NETLOCKS = {
"www.youtube.com",
"m.youtube.com",
"youtu.be",
"youtube.com",
"vid.plus",
"www.youtube-nocookie.com",
}
if url.netloc in YOUTUBE_ALLOWED_NETLOCKS:
logger.debug(f"Source of `{formatted_source}` detected as `youtube_video`.")
return DataType.YOUTUBE_VIDEO
if url.netloc in {"notion.so", "notion.site"}:
logger.debug(f"Source of `{formatted_source}` detected as `notion`.")
return DataType.NOTION
if url.path.endswith(".pdf"):
logger.debug(f"Source of `{formatted_source}` detected as `pdf_file`.")
return DataType.PDF_FILE
if url.path.endswith(".xml"):
logger.debug(f"Source of `{formatted_source}` detected as `sitemap`.")
return DataType.SITEMAP
if url.path.endswith(".csv"):
logger.debug(f"Source of `{formatted_source}` detected as `csv`.")
return DataType.CSV
if url.path.endswith(".mdx") or url.path.endswith(".md"):
logger.debug(f"Source of `{formatted_source}` detected as `mdx`.")
return DataType.MDX
if url.path.endswith(".docx"):
logger.debug(f"Source of `{formatted_source}` detected as `docx`.")
return DataType.DOCX
if url.path.endswith(
(".mp3", ".mp4", ".mp2", ".aac", ".wav", ".flac", ".pcm", ".m4a", ".ogg", ".opus", ".webm")
):
logger.debug(f"Source of `{formatted_source}` detected as `audio`.")
return DataType.AUDIO
if url.path.endswith(".yaml"):
try:
response = requests.get(source)
response.raise_for_status()
try:
yaml_content = yaml.safe_load(response.text)
except yaml.YAMLError as exc:
logger.error(f"Error parsing YAML: {exc}")
raise TypeError(f"Not a valid data type. Error loading YAML: {exc}")
if is_openapi_yaml(yaml_content):
logger.debug(f"Source of `{formatted_source}` detected as `openapi`.")
return DataType.OPENAPI
else:
logger.error(
f"Source of `{formatted_source}` does not contain all the required \
fields of OpenAPI yaml. Check 'https://spec.openapis.org/oas/v3.1.0'"
)
raise TypeError(
"Not a valid data type. Check 'https://spec.openapis.org/oas/v3.1.0', \
make sure you have all the required fields in YAML config data"
)
except requests.exceptions.RequestException as e:
logger.error(f"Error fetching URL {formatted_source}: {e}")
if url.path.endswith(".json"):
logger.debug(f"Source of `{formatted_source}` detected as `json_file`.")
return DataType.JSON
if "docs" in url.netloc or ("docs" in url.path and url.scheme != "file"):
# `docs_site` detection via path is not accepted for local filesystem URIs,
# because that would mean all paths that contain `docs` are now doc sites, which is too aggressive.
logger.debug(f"Source of `{formatted_source}` detected as `docs_site`.")
return DataType.DOCS_SITE
if "github.com" in url.netloc:
logger.debug(f"Source of `{formatted_source}` detected as `github`.")
return DataType.GITHUB
if is_google_drive_folder(url.netloc + url.path):
logger.debug(f"Source of `{formatted_source}` detected as `google drive folder`.")
return DataType.GOOGLE_DRIVE_FOLDER
# If none of the above conditions are met, it's a general web page
logger.debug(f"Source of `{formatted_source}` detected as `web_page`.")
return DataType.WEB_PAGE
elif not isinstance(source, str):
# For datatypes where source is not a string.
if isinstance(source, tuple) and len(source) == 2 and isinstance(source[0], str) and isinstance(source[1], str):
logger.debug(f"Source of `{formatted_source}` detected as `qna_pair`.")
return DataType.QNA_PAIR
# Raise an error if it isn't a string and also not a valid non-string type (one of the previous).
# We could stringify it, but it is better to raise an error and let the user decide how they want to do that.
raise TypeError(
"Source is not a string and a valid non-string type could not be detected. If you want to embed it, please stringify it, for instance by using `str(source)` or `(', ').join(source)`." # noqa: E501
)
elif os.path.isfile(source):
# For datatypes that support conventional file references.
# Note: checking for string is not necessary anymore.
if source.endswith(".docx"):
logger.debug(f"Source of `{formatted_source}` detected as `docx`.")
return DataType.DOCX
if source.endswith(".csv"):
logger.debug(f"Source of `{formatted_source}` detected as `csv`.")
return DataType.CSV
if source.endswith(".xml"):
logger.debug(f"Source of `{formatted_source}` detected as `xml`.")
return DataType.XML
if source.endswith(".mdx") or source.endswith(".md"):
logger.debug(f"Source of `{formatted_source}` detected as `mdx`.")
return DataType.MDX
if source.endswith(".txt"):
logger.debug(f"Source of `{formatted_source}` detected as `text`.")
return DataType.TEXT_FILE
if source.endswith(".pdf"):
logger.debug(f"Source of `{formatted_source}` detected as `pdf_file`.")
return DataType.PDF_FILE
if source.endswith(".yaml"):
with open(source, "r") as file:
yaml_content = yaml.safe_load(file)
if is_openapi_yaml(yaml_content):
logger.debug(f"Source of `{formatted_source}` detected as `openapi`.")
return DataType.OPENAPI
else:
logger.error(
f"Source of `{formatted_source}` does not contain all the required \
fields of OpenAPI yaml. Check 'https://spec.openapis.org/oas/v3.1.0'"
)
raise ValueError(
"Invalid YAML data. Check 'https://spec.openapis.org/oas/v3.1.0', \
make sure to add all the required params"
)
if source.endswith(".json"):
logger.debug(f"Source of `{formatted_source}` detected as `json`.")
return DataType.JSON
if os.path.exists(source) and is_readable(open(source).read()):
logger.debug(f"Source of `{formatted_source}` detected as `text_file`.")
return DataType.TEXT_FILE
# If the source is a valid file, that's not detectable as a type, an error is raised.
# It does not fall back to text.
raise ValueError(
"Source points to a valid file, but based on the filename, no `data_type` can be detected. Please be aware, that not all data_types allow conventional file references, some require the use of the `file URI scheme`. Please refer to the embedchain documentation (https://docs.embedchain.ai/advanced/data_types#remote-data-types)." # noqa: E501
)
else:
# Source is not a URL.
# TODO: check if source is gmail query
# check if the source is valid json string
if is_valid_json_string(source):
logger.debug(f"Source of `{formatted_source}` detected as `json`.")
return DataType.JSON
# Use text as final fallback.
logger.debug(f"Source of `{formatted_source}` detected as `text`.")
return DataType.TEXT
# check if the source is valid json string
def is_valid_json_string(source: str):
try:
_ = json.loads(source)
return True
except json.JSONDecodeError:
return False
def validate_config(config_data):
schema = Schema(
{
Optional("app"): {
Optional("config"): {
Optional("id"): str,
Optional("name"): str,
Optional("log_level"): Or("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"),
Optional("collect_metrics"): bool,
Optional("collection_name"): str,
}
},
Optional("llm"): {
Optional("provider"): Or(
"openai",
"azure_openai",
"anthropic",
"huggingface",
"cohere",
"together",
"gpt4all",
"ollama",
"jina",
"llama2",
"vertexai",
"google",
"aws_bedrock",
"mistralai",
"clarifai",
"vllm",
"groq",
"nvidia",
),
Optional("config"): {
Optional("model"): str,
Optional("model_name"): str,
Optional("number_documents"): int,
Optional("temperature"): float,
Optional("max_tokens"): int,
Optional("top_p"): Or(float, int),
Optional("stream"): bool,
Optional("online"): bool,
Optional("token_usage"): bool,
Optional("template"): str,
Optional("prompt"): str,
Optional("system_prompt"): str,
Optional("deployment_name"): str,
Optional("where"): dict,
Optional("query_type"): str,
Optional("api_key"): str,
Optional("base_url"): str,
Optional("endpoint"): str,
Optional("model_kwargs"): dict,
Optional("local"): bool,
Optional("base_url"): str,
Optional("default_headers"): dict,
Optional("api_version"): Or(str, datetime.date),
Optional("http_client_proxies"): Or(str, dict),
Optional("http_async_client_proxies"): Or(str, dict),
},
},
Optional("vectordb"): {
Optional("provider"): Or(
"chroma", "elasticsearch", "opensearch", "lancedb", "pinecone", "qdrant", "weaviate", "zilliz"
),
Optional("config"): object, # TODO: add particular config schema for each provider
},
Optional("embedder"): {
Optional("provider"): Or(
"openai",
"gpt4all",
"huggingface",
"vertexai",
"azure_openai",
"google",
"mistralai",
"clarifai",
"nvidia",
"ollama",
"cohere",
"aws_bedrock",
),
Optional("config"): {
Optional("model"): Optional(str),
Optional("deployment_name"): Optional(str),
Optional("api_key"): str,
Optional("api_base"): str,
Optional("title"): str,
Optional("task_type"): str,
Optional("vector_dimension"): int,
Optional("base_url"): str,
Optional("endpoint"): str,
Optional("model_kwargs"): dict,
Optional("http_client_proxies"): Or(str, dict),
Optional("http_async_client_proxies"): Or(str, dict),
},
},
Optional("embedding_model"): {
Optional("provider"): Or(
"openai",
"gpt4all",
"huggingface",
"vertexai",
"azure_openai",
"google",
"mistralai",
"clarifai",
"nvidia",
"ollama",
"aws_bedrock",
),
Optional("config"): {
Optional("model"): str,
Optional("deployment_name"): str,
Optional("api_key"): str,
Optional("title"): str,
Optional("task_type"): str,
Optional("vector_dimension"): int,
Optional("base_url"): str,
},
},
Optional("chunker"): {
Optional("chunk_size"): int,
Optional("chunk_overlap"): int,
Optional("length_function"): str,
Optional("min_chunk_size"): int,
},
Optional("cache"): {
Optional("similarity_evaluation"): {
Optional("strategy"): Or("distance", "exact"),
Optional("max_distance"): float,
Optional("positive"): bool,
},
Optional("config"): {
Optional("similarity_threshold"): float,
Optional("auto_flush"): int,
},
},
Optional("memory"): {
Optional("top_k"): int,
},
}
)
return schema.validate(config_data)
def chunks(iterable, batch_size=100, desc="Processing chunks"):
"""A helper function to break an iterable into chunks of size batch_size."""
it = iter(iterable)
total_size = len(iterable)
with tqdm(total=total_size, desc=desc, unit="batch") as pbar:
chunk = tuple(itertools.islice(it, batch_size))
while chunk:
yield chunk
pbar.update(len(chunk))
chunk = tuple(itertools.islice(it, batch_size))
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/utils/evaluation.py | embedchain/embedchain/utils/evaluation.py | from enum import Enum
from typing import Optional
from pydantic import BaseModel
class EvalMetric(Enum):
CONTEXT_RELEVANCY = "context_relevancy"
ANSWER_RELEVANCY = "answer_relevancy"
GROUNDEDNESS = "groundedness"
class EvalData(BaseModel):
question: str
contexts: list[str]
answer: str
ground_truth: Optional[str] = None # Not used as of now
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/utils/__init__.py | embedchain/embedchain/utils/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/migrations/env.py | embedchain/embedchain/migrations/env.py | import os
from alembic import context
from sqlalchemy import engine_from_config, pool
from embedchain.core.db.models import Base
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
config.set_main_option("sqlalchemy.url", os.environ.get("EMBEDCHAIN_DB_URI"))
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/migrations/versions/40a327b3debd_create_initial_migrations.py | embedchain/embedchain/migrations/versions/40a327b3debd_create_initial_migrations.py | """Create initial migrations
Revision ID: 40a327b3debd
Revises:
Create Date: 2024-02-18 15:29:19.409064
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "40a327b3debd"
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"ec_chat_history",
sa.Column("app_id", sa.String(), nullable=False),
sa.Column("id", sa.String(), nullable=False),
sa.Column("session_id", sa.String(), nullable=False),
sa.Column("question", sa.Text(), nullable=True),
sa.Column("answer", sa.Text(), nullable=True),
sa.Column("metadata", sa.Text(), nullable=True),
sa.Column("created_at", sa.TIMESTAMP(), nullable=True),
sa.PrimaryKeyConstraint("app_id", "id", "session_id"),
)
op.create_index(op.f("ix_ec_chat_history_created_at"), "ec_chat_history", ["created_at"], unique=False)
op.create_index(op.f("ix_ec_chat_history_session_id"), "ec_chat_history", ["session_id"], unique=False)
op.create_table(
"ec_data_sources",
sa.Column("id", sa.String(), nullable=False),
sa.Column("app_id", sa.Text(), nullable=True),
sa.Column("hash", sa.Text(), nullable=True),
sa.Column("type", sa.Text(), nullable=True),
sa.Column("value", sa.Text(), nullable=True),
sa.Column("metadata", sa.Text(), nullable=True),
sa.Column("is_uploaded", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_ec_data_sources_hash"), "ec_data_sources", ["hash"], unique=False)
op.create_index(op.f("ix_ec_data_sources_app_id"), "ec_data_sources", ["app_id"], unique=False)
op.create_index(op.f("ix_ec_data_sources_type"), "ec_data_sources", ["type"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_ec_data_sources_type"), table_name="ec_data_sources")
op.drop_index(op.f("ix_ec_data_sources_app_id"), table_name="ec_data_sources")
op.drop_index(op.f("ix_ec_data_sources_hash"), table_name="ec_data_sources")
op.drop_table("ec_data_sources")
op.drop_index(op.f("ix_ec_chat_history_session_id"), table_name="ec_chat_history")
op.drop_index(op.f("ix_ec_chat_history_created_at"), table_name="ec_chat_history")
op.drop_table("ec_chat_history")
# ### end Alembic commands ###
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/clarifai.py | embedchain/embedchain/llm/clarifai.py | import logging
import os
from typing import Optional
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class ClarifaiLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config=config)
if not self.config.api_key and "CLARIFAI_PAT" not in os.environ:
raise ValueError("Please set the CLARIFAI_PAT environment variable.")
def get_llm_model_answer(self, prompt):
return self._get_answer(prompt=prompt, config=self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
try:
from clarifai.client.model import Model
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for Clarifai are not installed."
"Please install with `pip install clarifai==10.0.1`"
) from None
model_name = config.model
logging.info(f"Using clarifai LLM model: {model_name}")
api_key = config.api_key or os.getenv("CLARIFAI_PAT")
model = Model(url=model_name, pat=api_key)
params = config.model_kwargs
try:
(params := {}) if config.model_kwargs is None else config.model_kwargs
predict_response = model.predict_by_bytes(
bytes(prompt, "utf-8"),
input_type="text",
inference_params=params,
)
text = predict_response.outputs[0].data.text.raw
return text
except Exception as e:
logging.error(f"Predict failed, exception: {e}")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/anthropic.py | embedchain/embedchain/llm/anthropic.py | import logging
import os
from typing import Any, Optional
try:
from langchain_anthropic import ChatAnthropic
except ImportError:
raise ImportError("Please install the langchain-anthropic package by running `pip install langchain-anthropic`.")
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
logger = logging.getLogger(__name__)
@register_deserializable
class AnthropicLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config=config)
if not self.config.api_key and "ANTHROPIC_API_KEY" not in os.environ:
raise ValueError("Please set the ANTHROPIC_API_KEY environment variable or pass it in the config.")
def get_llm_model_answer(self, prompt) -> tuple[str, Optional[dict[str, Any]]]:
if self.config.token_usage:
response, token_info = self._get_answer(prompt, self.config)
model_name = "anthropic/" + self.config.model
if model_name not in self.config.model_pricing_map:
raise ValueError(
f"Model {model_name} not found in `model_prices_and_context_window.json`. \
You can disable token usage by setting `token_usage` to False."
)
total_cost = (
self.config.model_pricing_map[model_name]["input_cost_per_token"] * token_info["input_tokens"]
) + self.config.model_pricing_map[model_name]["output_cost_per_token"] * token_info["output_tokens"]
response_token_info = {
"prompt_tokens": token_info["input_tokens"],
"completion_tokens": token_info["output_tokens"],
"total_tokens": token_info["input_tokens"] + token_info["output_tokens"],
"total_cost": round(total_cost, 10),
"cost_currency": "USD",
}
return response, response_token_info
return self._get_answer(prompt, self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
api_key = config.api_key or os.getenv("ANTHROPIC_API_KEY")
chat = ChatAnthropic(anthropic_api_key=api_key, temperature=config.temperature, model_name=config.model)
if config.max_tokens and config.max_tokens != 1000:
logger.warning("Config option `max_tokens` is not supported by this model.")
messages = BaseLlm._get_messages(prompt, system_prompt=config.system_prompt)
chat_response = chat.invoke(messages)
if config.token_usage:
return chat_response.content, chat_response.response_metadata["token_usage"]
return chat_response.content
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/nvidia.py | embedchain/embedchain/llm/nvidia.py | import os
from collections.abc import Iterable
from typing import Any, Optional, Union
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
try:
from langchain_nvidia_ai_endpoints import ChatNVIDIA
except ImportError:
raise ImportError(
"NVIDIA AI endpoints requires extra dependencies. Install with `pip install langchain-nvidia-ai-endpoints`"
) from None
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class NvidiaLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config=config)
if not self.config.api_key and "NVIDIA_API_KEY" not in os.environ:
raise ValueError("Please set the NVIDIA_API_KEY environment variable or pass it in the config.")
def get_llm_model_answer(self, prompt) -> tuple[str, Optional[dict[str, Any]]]:
if self.config.token_usage:
response, token_info = self._get_answer(prompt, self.config)
model_name = "nvidia/" + self.config.model
if model_name not in self.config.model_pricing_map:
raise ValueError(
f"Model {model_name} not found in `model_prices_and_context_window.json`. \
You can disable token usage by setting `token_usage` to False."
)
total_cost = (
self.config.model_pricing_map[model_name]["input_cost_per_token"] * token_info["input_tokens"]
) + self.config.model_pricing_map[model_name]["output_cost_per_token"] * token_info["output_tokens"]
response_token_info = {
"prompt_tokens": token_info["input_tokens"],
"completion_tokens": token_info["output_tokens"],
"total_tokens": token_info["input_tokens"] + token_info["output_tokens"],
"total_cost": round(total_cost, 10),
"cost_currency": "USD",
}
return response, response_token_info
return self._get_answer(prompt, self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> Union[str, Iterable]:
callback_manager = [StreamingStdOutCallbackHandler()] if config.stream else [StdOutCallbackHandler()]
model_kwargs = config.model_kwargs or {}
labels = model_kwargs.get("labels", None)
params = {"model": config.model, "nvidia_api_key": config.api_key or os.getenv("NVIDIA_API_KEY")}
if config.system_prompt:
params["system_prompt"] = config.system_prompt
if config.temperature:
params["temperature"] = config.temperature
if config.top_p:
params["top_p"] = config.top_p
if labels:
params["labels"] = labels
llm = ChatNVIDIA(**params, callback_manager=CallbackManager(callback_manager))
chat_response = llm.invoke(prompt) if labels is None else llm.invoke(prompt, labels=labels)
if config.token_usage:
return chat_response.content, chat_response.response_metadata["token_usage"]
return chat_response.content
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/ollama.py | embedchain/embedchain/llm/ollama.py | import logging
from collections.abc import Iterable
from typing import Optional, Union
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.llms.ollama import Ollama
try:
from ollama import Client
except ImportError:
raise ImportError("Ollama requires extra dependencies. Install with `pip install ollama`") from None
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
logger = logging.getLogger(__name__)
@register_deserializable
class OllamaLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config=config)
if self.config.model is None:
self.config.model = "llama2"
client = Client(host=config.base_url)
local_models = client.list()["models"]
if not any(model.get("name") == self.config.model for model in local_models):
logger.info(f"Pulling {self.config.model} from Ollama!")
client.pull(self.config.model)
def get_llm_model_answer(self, prompt):
return self._get_answer(prompt=prompt, config=self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> Union[str, Iterable]:
if config.stream:
callbacks = config.callbacks if config.callbacks else [StreamingStdOutCallbackHandler()]
else:
callbacks = [StdOutCallbackHandler()]
llm = Ollama(
model=config.model,
system=config.system_prompt,
temperature=config.temperature,
top_p=config.top_p,
callback_manager=CallbackManager(callbacks),
base_url=config.base_url,
)
return llm.invoke(prompt)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/aws_bedrock.py | embedchain/embedchain/llm/aws_bedrock.py | import os
from typing import Optional
try:
from langchain_aws import BedrockLLM
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for AWSBedrock are not installed." "Please install with `pip install langchain_aws`"
) from None
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class AWSBedrockLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config)
def get_llm_model_answer(self, prompt) -> str:
response = self._get_answer(prompt, self.config)
return response
def _get_answer(self, prompt: str, config: BaseLlmConfig) -> str:
try:
import boto3
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for AWSBedrock are not installed."
"Please install with `pip install boto3==1.34.20`."
) from None
self.boto_client = boto3.client(
"bedrock-runtime", os.environ.get("AWS_REGION", os.environ.get("AWS_DEFAULT_REGION", "us-east-1"))
)
kwargs = {
"model_id": config.model or "amazon.titan-text-express-v1",
"client": self.boto_client,
"model_kwargs": config.model_kwargs
or {
"temperature": config.temperature,
},
}
if config.stream:
from langchain.callbacks.streaming_stdout import (
StreamingStdOutCallbackHandler,
)
kwargs["streaming"] = True
kwargs["callbacks"] = [StreamingStdOutCallbackHandler()]
llm = BedrockLLM(**kwargs)
return llm.invoke(prompt)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/together.py | embedchain/embedchain/llm/together.py | import importlib
import os
from typing import Any, Optional
try:
from langchain_together import ChatTogether
except ImportError:
raise ImportError(
"Please install the langchain_together package by running `pip install langchain_together==0.1.3`."
)
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class TogetherLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
try:
importlib.import_module("together")
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for Together are not installed."
'Please install with `pip install --upgrade "embedchain[together]"`'
) from None
super().__init__(config=config)
if not self.config.api_key and "TOGETHER_API_KEY" not in os.environ:
raise ValueError("Please set the TOGETHER_API_KEY environment variable or pass it in the config.")
def get_llm_model_answer(self, prompt) -> tuple[str, Optional[dict[str, Any]]]:
if self.config.system_prompt:
raise ValueError("TogetherLlm does not support `system_prompt`")
if self.config.token_usage:
response, token_info = self._get_answer(prompt, self.config)
model_name = "together/" + self.config.model
if model_name not in self.config.model_pricing_map:
raise ValueError(
f"Model {model_name} not found in `model_prices_and_context_window.json`. \
You can disable token usage by setting `token_usage` to False."
)
total_cost = (
self.config.model_pricing_map[model_name]["input_cost_per_token"] * token_info["prompt_tokens"]
) + self.config.model_pricing_map[model_name]["output_cost_per_token"] * token_info["completion_tokens"]
response_token_info = {
"prompt_tokens": token_info["prompt_tokens"],
"completion_tokens": token_info["completion_tokens"],
"total_tokens": token_info["prompt_tokens"] + token_info["completion_tokens"],
"total_cost": round(total_cost, 10),
"cost_currency": "USD",
}
return response, response_token_info
return self._get_answer(prompt, self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
api_key = config.api_key or os.environ["TOGETHER_API_KEY"]
kwargs = {
"model_name": config.model or "mixtral-8x7b-32768",
"temperature": config.temperature,
"max_tokens": config.max_tokens,
"together_api_key": api_key,
}
chat = ChatTogether(**kwargs)
chat_response = chat.invoke(prompt)
if config.token_usage:
return chat_response.content, chat_response.response_metadata["token_usage"]
return chat_response.content
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/vllm.py | embedchain/embedchain/llm/vllm.py | from typing import Iterable, Optional, Union
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.llms import VLLM as BaseVLLM
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class VLLM(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config=config)
if self.config.model is None:
self.config.model = "mosaicml/mpt-7b"
def get_llm_model_answer(self, prompt):
return self._get_answer(prompt=prompt, config=self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> Union[str, Iterable]:
callback_manager = [StreamingStdOutCallbackHandler()] if config.stream else [StdOutCallbackHandler()]
# Prepare the arguments for BaseVLLM
llm_args = {
"model": config.model,
"temperature": config.temperature,
"top_p": config.top_p,
"callback_manager": CallbackManager(callback_manager),
}
# Add model_kwargs if they are not None
if config.model_kwargs is not None:
llm_args.update(config.model_kwargs)
llm = BaseVLLM(**llm_args)
return llm.invoke(prompt)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/groq.py | embedchain/embedchain/llm/groq.py | import os
from typing import Any, Optional
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import HumanMessage, SystemMessage
try:
from langchain_groq import ChatGroq
except ImportError:
raise ImportError("Groq requires extra dependencies. Install with `pip install langchain-groq`") from None
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class GroqLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config=config)
if not self.config.api_key and "GROQ_API_KEY" not in os.environ:
raise ValueError("Please set the GROQ_API_KEY environment variable or pass it in the config.")
def get_llm_model_answer(self, prompt) -> tuple[str, Optional[dict[str, Any]]]:
if self.config.token_usage:
response, token_info = self._get_answer(prompt, self.config)
model_name = "groq/" + self.config.model
if model_name not in self.config.model_pricing_map:
raise ValueError(
f"Model {model_name} not found in `model_prices_and_context_window.json`. \
You can disable token usage by setting `token_usage` to False."
)
total_cost = (
self.config.model_pricing_map[model_name]["input_cost_per_token"] * token_info["prompt_tokens"]
) + self.config.model_pricing_map[model_name]["output_cost_per_token"] * token_info["completion_tokens"]
response_token_info = {
"prompt_tokens": token_info["prompt_tokens"],
"completion_tokens": token_info["completion_tokens"],
"total_tokens": token_info["prompt_tokens"] + token_info["completion_tokens"],
"total_cost": round(total_cost, 10),
"cost_currency": "USD",
}
return response, response_token_info
return self._get_answer(prompt, self.config)
def _get_answer(self, prompt: str, config: BaseLlmConfig) -> str:
messages = []
if config.system_prompt:
messages.append(SystemMessage(content=config.system_prompt))
messages.append(HumanMessage(content=prompt))
api_key = config.api_key or os.environ["GROQ_API_KEY"]
kwargs = {
"model_name": config.model or "mixtral-8x7b-32768",
"temperature": config.temperature,
"groq_api_key": api_key,
}
if config.stream:
callbacks = config.callbacks if config.callbacks else [StreamingStdOutCallbackHandler()]
chat = ChatGroq(**kwargs, streaming=config.stream, callbacks=callbacks, api_key=api_key)
else:
chat = ChatGroq(**kwargs)
chat_response = chat.invoke(prompt)
if self.config.token_usage:
return chat_response.content, chat_response.response_metadata["token_usage"]
return chat_response.content
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/google.py | embedchain/embedchain/llm/google.py | import logging
import os
from collections.abc import Generator
from typing import Any, Optional, Union
try:
import google.generativeai as genai
except ImportError:
raise ImportError("GoogleLlm requires extra dependencies. Install with `pip install google-generativeai`") from None
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
logger = logging.getLogger(__name__)
@register_deserializable
class GoogleLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config)
if not self.config.api_key and "GOOGLE_API_KEY" not in os.environ:
raise ValueError("Please set the GOOGLE_API_KEY environment variable or pass it in the config.")
api_key = self.config.api_key or os.getenv("GOOGLE_API_KEY")
genai.configure(api_key=api_key)
def get_llm_model_answer(self, prompt):
if self.config.system_prompt:
raise ValueError("GoogleLlm does not support `system_prompt`")
response = self._get_answer(prompt)
return response
def _get_answer(self, prompt: str) -> Union[str, Generator[Any, Any, None]]:
model_name = self.config.model or "gemini-pro"
logger.info(f"Using Google LLM model: {model_name}")
model = genai.GenerativeModel(model_name=model_name)
generation_config_params = {
"candidate_count": 1,
"max_output_tokens": self.config.max_tokens,
"temperature": self.config.temperature or 0.5,
}
if 0.0 <= self.config.top_p <= 1.0:
generation_config_params["top_p"] = self.config.top_p
else:
raise ValueError("`top_p` must be > 0.0 and < 1.0")
generation_config = genai.types.GenerationConfig(**generation_config_params)
response = model.generate_content(
prompt,
generation_config=generation_config,
stream=self.config.stream,
)
if self.config.stream:
# TODO: Implement streaming
response.resolve()
return response.text
else:
return response.text
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/gpt4all.py | embedchain/embedchain/llm/gpt4all.py | import os
from collections.abc import Iterable
from pathlib import Path
from typing import Optional, Union
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class GPT4ALLLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config=config)
if self.config.model is None:
self.config.model = "orca-mini-3b-gguf2-q4_0.gguf"
self.instance = GPT4ALLLlm._get_instance(self.config.model)
self.instance.streaming = self.config.stream
def get_llm_model_answer(self, prompt):
return self._get_answer(prompt=prompt, config=self.config)
@staticmethod
def _get_instance(model):
try:
from langchain_community.llms.gpt4all import GPT4All as LangchainGPT4All
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The GPT4All python package is not installed. Please install it with `pip install --upgrade embedchain[opensource]`" # noqa E501
) from None
model_path = Path(model).expanduser()
if os.path.isabs(model_path):
if os.path.exists(model_path):
return LangchainGPT4All(model=str(model_path))
else:
raise ValueError(f"Model does not exist at {model_path=}")
else:
return LangchainGPT4All(model=model, allow_download=True)
def _get_answer(self, prompt: str, config: BaseLlmConfig) -> Union[str, Iterable]:
if config.model and config.model != self.config.model:
raise RuntimeError(
"GPT4ALLLlm does not support switching models at runtime. Please create a new app instance."
)
messages = []
if config.system_prompt:
messages.append(config.system_prompt)
messages.append(prompt)
kwargs = {
"temp": config.temperature,
"max_tokens": config.max_tokens,
}
if config.top_p:
kwargs["top_p"] = config.top_p
callbacks = [StreamingStdOutCallbackHandler()] if config.stream else [StdOutCallbackHandler()]
response = self.instance.generate(prompts=messages, callbacks=callbacks, **kwargs)
answer = ""
for generations in response.generations:
answer += " ".join(map(lambda generation: generation.text, generations))
return answer
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/huggingface.py | embedchain/embedchain/llm/huggingface.py | import importlib
import logging
import os
from typing import Optional
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
from langchain_community.llms.huggingface_hub import HuggingFaceHub
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
logger = logging.getLogger(__name__)
@register_deserializable
class HuggingFaceLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
try:
importlib.import_module("huggingface_hub")
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for HuggingFaceHub are not installed."
"Please install with `pip install huggingface-hub==0.23.0`"
) from None
super().__init__(config=config)
if not self.config.api_key and "HUGGINGFACE_ACCESS_TOKEN" not in os.environ:
raise ValueError("Please set the HUGGINGFACE_ACCESS_TOKEN environment variable or pass it in the config.")
def get_llm_model_answer(self, prompt):
if self.config.system_prompt:
raise ValueError("HuggingFaceLlm does not support `system_prompt`")
return HuggingFaceLlm._get_answer(prompt=prompt, config=self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
# If the user wants to run the model locally, they can do so by setting the `local` flag to True
if config.model and config.local:
return HuggingFaceLlm._from_pipeline(prompt=prompt, config=config)
elif config.model:
return HuggingFaceLlm._from_model(prompt=prompt, config=config)
elif config.endpoint:
return HuggingFaceLlm._from_endpoint(prompt=prompt, config=config)
else:
raise ValueError("Either `model` or `endpoint` must be set in config")
@staticmethod
def _from_model(prompt: str, config: BaseLlmConfig) -> str:
model_kwargs = {
"temperature": config.temperature or 0.1,
"max_new_tokens": config.max_tokens,
}
if 0.0 < config.top_p < 1.0:
model_kwargs["top_p"] = config.top_p
else:
raise ValueError("`top_p` must be > 0.0 and < 1.0")
model = config.model
api_key = config.api_key or os.getenv("HUGGINGFACE_ACCESS_TOKEN")
logger.info(f"Using HuggingFaceHub with model {model}")
llm = HuggingFaceHub(
huggingfacehub_api_token=api_key,
repo_id=model,
model_kwargs=model_kwargs,
)
return llm.invoke(prompt)
@staticmethod
def _from_endpoint(prompt: str, config: BaseLlmConfig) -> str:
api_key = config.api_key or os.getenv("HUGGINGFACE_ACCESS_TOKEN")
llm = HuggingFaceEndpoint(
huggingfacehub_api_token=api_key,
endpoint_url=config.endpoint,
task="text-generation",
model_kwargs=config.model_kwargs,
)
return llm.invoke(prompt)
@staticmethod
def _from_pipeline(prompt: str, config: BaseLlmConfig) -> str:
model_kwargs = {
"temperature": config.temperature or 0.1,
"max_new_tokens": config.max_tokens,
}
if 0.0 < config.top_p < 1.0:
model_kwargs["top_p"] = config.top_p
else:
raise ValueError("`top_p` must be > 0.0 and < 1.0")
llm = HuggingFacePipeline.from_model_id(
model_id=config.model,
task="text-generation",
pipeline_kwargs=model_kwargs,
)
return llm.invoke(prompt)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/jina.py | embedchain/embedchain/llm/jina.py | import os
from typing import Optional
from langchain.schema import HumanMessage, SystemMessage
from langchain_community.chat_models import JinaChat
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class JinaLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config=config)
if not self.config.api_key and "JINACHAT_API_KEY" not in os.environ:
raise ValueError("Please set the JINACHAT_API_KEY environment variable or pass it in the config.")
def get_llm_model_answer(self, prompt):
response = JinaLlm._get_answer(prompt, self.config)
return response
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
messages = []
if config.system_prompt:
messages.append(SystemMessage(content=config.system_prompt))
messages.append(HumanMessage(content=prompt))
kwargs = {
"temperature": config.temperature,
"max_tokens": config.max_tokens,
"jinachat_api_key": config.api_key or os.environ["JINACHAT_API_KEY"],
"model_kwargs": {},
}
if config.top_p:
kwargs["model_kwargs"]["top_p"] = config.top_p
if config.stream:
from langchain.callbacks.streaming_stdout import (
StreamingStdOutCallbackHandler,
)
chat = JinaChat(**kwargs, streaming=config.stream, callbacks=[StreamingStdOutCallbackHandler()])
else:
chat = JinaChat(**kwargs)
return chat(messages).content
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/cohere.py | embedchain/embedchain/llm/cohere.py | import importlib
import os
from typing import Any, Optional
from langchain_cohere import ChatCohere
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class CohereLlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
try:
importlib.import_module("cohere")
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for Cohere are not installed."
"Please install with `pip install langchain_cohere==1.16.0`"
) from None
super().__init__(config=config)
if not self.config.api_key and "COHERE_API_KEY" not in os.environ:
raise ValueError("Please set the COHERE_API_KEY environment variable or pass it in the config.")
def get_llm_model_answer(self, prompt) -> tuple[str, Optional[dict[str, Any]]]:
if self.config.system_prompt:
raise ValueError("CohereLlm does not support `system_prompt`")
if self.config.token_usage:
response, token_info = self._get_answer(prompt, self.config)
model_name = "cohere/" + self.config.model
if model_name not in self.config.model_pricing_map:
raise ValueError(
f"Model {model_name} not found in `model_prices_and_context_window.json`. \
You can disable token usage by setting `token_usage` to False."
)
total_cost = (
self.config.model_pricing_map[model_name]["input_cost_per_token"] * token_info["input_tokens"]
) + self.config.model_pricing_map[model_name]["output_cost_per_token"] * token_info["output_tokens"]
response_token_info = {
"prompt_tokens": token_info["input_tokens"],
"completion_tokens": token_info["output_tokens"],
"total_tokens": token_info["input_tokens"] + token_info["output_tokens"],
"total_cost": round(total_cost, 10),
"cost_currency": "USD",
}
return response, response_token_info
return self._get_answer(prompt, self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
api_key = config.api_key or os.environ["COHERE_API_KEY"]
kwargs = {
"model_name": config.model or "command-r",
"temperature": config.temperature,
"max_tokens": config.max_tokens,
"together_api_key": api_key,
}
chat = ChatCohere(**kwargs)
chat_response = chat.invoke(prompt)
if config.token_usage:
return chat_response.content, chat_response.response_metadata["token_count"]
return chat_response.content
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/__init__.py | embedchain/embedchain/llm/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/azure_openai.py | embedchain/embedchain/llm/azure_openai.py | import logging
from typing import Optional
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
logger = logging.getLogger(__name__)
@register_deserializable
class AzureOpenAILlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config=config)
def get_llm_model_answer(self, prompt):
return self._get_answer(prompt=prompt, config=self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
from langchain_openai import AzureChatOpenAI
if not config.deployment_name:
raise ValueError("Deployment name must be provided for Azure OpenAI")
chat = AzureChatOpenAI(
deployment_name=config.deployment_name,
openai_api_version=str(config.api_version) if config.api_version else "2024-02-01",
model_name=config.model or "gpt-4o-mini",
temperature=config.temperature,
max_tokens=config.max_tokens,
streaming=config.stream,
http_client=config.http_client,
http_async_client=config.http_async_client,
)
if config.top_p and config.top_p != 1:
logger.warning("Config option `top_p` is not supported by this model.")
messages = BaseLlm._get_messages(prompt, system_prompt=config.system_prompt)
return chat.invoke(messages).content
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/base.py | embedchain/embedchain/llm/base.py | import logging
import os
from collections.abc import Generator
from typing import Any, Optional
from langchain.schema import BaseMessage as LCBaseMessage
from embedchain.config import BaseLlmConfig
from embedchain.config.llm.base import (
DEFAULT_PROMPT,
DEFAULT_PROMPT_WITH_HISTORY_TEMPLATE,
DEFAULT_PROMPT_WITH_MEM0_MEMORY_TEMPLATE,
DOCS_SITE_PROMPT_TEMPLATE,
)
from embedchain.constants import SQLITE_PATH
from embedchain.core.db.database import init_db, setup_engine
from embedchain.helpers.json_serializable import JSONSerializable
from embedchain.memory.base import ChatHistory
from embedchain.memory.message import ChatMessage
logger = logging.getLogger(__name__)
class BaseLlm(JSONSerializable):
def __init__(self, config: Optional[BaseLlmConfig] = None):
"""Initialize a base LLM class
:param config: LLM configuration option class, defaults to None
:type config: Optional[BaseLlmConfig], optional
"""
if config is None:
self.config = BaseLlmConfig()
else:
self.config = config
# Initialize the metadata db for the app here since llmfactory needs it for initialization of
# the llm memory
setup_engine(database_uri=os.environ.get("EMBEDCHAIN_DB_URI", f"sqlite:///{SQLITE_PATH}"))
init_db()
self.memory = ChatHistory()
self.is_docs_site_instance = False
self.history: Any = None
def get_llm_model_answer(self):
"""
Usually implemented by child class
"""
raise NotImplementedError
def set_history(self, history: Any):
"""
Provide your own history.
Especially interesting for the query method, which does not internally manage conversation history.
:param history: History to set
:type history: Any
"""
self.history = history
def update_history(self, app_id: str, session_id: str = "default"):
"""Update class history attribute with history in memory (for chat method)"""
chat_history = self.memory.get(app_id=app_id, session_id=session_id, num_rounds=10)
self.set_history([str(history) for history in chat_history])
def add_history(
self,
app_id: str,
question: str,
answer: str,
metadata: Optional[dict[str, Any]] = None,
session_id: str = "default",
):
chat_message = ChatMessage()
chat_message.add_user_message(question, metadata=metadata)
chat_message.add_ai_message(answer, metadata=metadata)
self.memory.add(app_id=app_id, chat_message=chat_message, session_id=session_id)
self.update_history(app_id=app_id, session_id=session_id)
def _format_history(self) -> str:
"""Format history to be used in prompt
:return: Formatted history
:rtype: str
"""
return "\n".join(self.history)
def _format_memories(self, memories: list[dict]) -> str:
"""Format memories to be used in prompt
:param memories: Memories to format
:type memories: list[dict]
:return: Formatted memories
:rtype: str
"""
return "\n".join([memory["text"] for memory in memories])
def generate_prompt(self, input_query: str, contexts: list[str], **kwargs: dict[str, Any]) -> str:
"""
Generates a prompt based on the given query and context, ready to be
passed to an LLM
:param input_query: The query to use.
:type input_query: str
:param contexts: List of similar documents to the query used as context.
:type contexts: list[str]
:return: The prompt
:rtype: str
"""
context_string = " | ".join(contexts)
web_search_result = kwargs.get("web_search_result", "")
memories = kwargs.get("memories", None)
if web_search_result:
context_string = self._append_search_and_context(context_string, web_search_result)
prompt_contains_history = self.config._validate_prompt_history(self.config.prompt)
if prompt_contains_history:
prompt = self.config.prompt.substitute(
context=context_string, query=input_query, history=self._format_history() or "No history"
)
elif self.history and not prompt_contains_history:
# History is present, but not included in the prompt.
# check if it's the default prompt without history
if (
not self.config._validate_prompt_history(self.config.prompt)
and self.config.prompt.template == DEFAULT_PROMPT
):
if memories:
# swap in the template with Mem0 memory template
prompt = DEFAULT_PROMPT_WITH_MEM0_MEMORY_TEMPLATE.substitute(
context=context_string,
query=input_query,
history=self._format_history(),
memories=self._format_memories(memories),
)
else:
# swap in the template with history
prompt = DEFAULT_PROMPT_WITH_HISTORY_TEMPLATE.substitute(
context=context_string, query=input_query, history=self._format_history()
)
else:
# If we can't swap in the default, we still proceed but tell users that the history is ignored.
logger.warning(
"Your bot contains a history, but prompt does not include `$history` key. History is ignored."
)
prompt = self.config.prompt.substitute(context=context_string, query=input_query)
else:
# basic use case, no history.
prompt = self.config.prompt.substitute(context=context_string, query=input_query)
return prompt
@staticmethod
def _append_search_and_context(context: str, web_search_result: str) -> str:
"""Append web search context to existing context
:param context: Existing context
:type context: str
:param web_search_result: Web search result
:type web_search_result: str
:return: Concatenated web search result
:rtype: str
"""
return f"{context}\nWeb Search Result: {web_search_result}"
def get_answer_from_llm(self, prompt: str):
"""
Gets an answer based on the given query and context by passing it
to an LLM.
:param prompt: Gets an answer based on the given query and context by passing it to an LLM.
:type prompt: str
:return: The answer.
:rtype: _type_
"""
return self.get_llm_model_answer(prompt)
@staticmethod
def access_search_and_get_results(input_query: str):
"""
Search the internet for additional context
:param input_query: search query
:type input_query: str
:return: Search results
:rtype: Unknown
"""
try:
from langchain.tools import DuckDuckGoSearchRun
except ImportError:
raise ImportError(
"Searching requires extra dependencies. Install with `pip install duckduckgo-search==6.1.5`"
) from None
search = DuckDuckGoSearchRun()
logger.info(f"Access search to get answers for {input_query}")
return search.run(input_query)
@staticmethod
def _stream_response(answer: Any, token_info: Optional[dict[str, Any]] = None) -> Generator[Any, Any, None]:
"""Generator to be used as streaming response
:param answer: Answer chunk from llm
:type answer: Any
:yield: Answer chunk from llm
:rtype: Generator[Any, Any, None]
"""
streamed_answer = ""
for chunk in answer:
streamed_answer = streamed_answer + chunk
yield chunk
logger.info(f"Answer: {streamed_answer}")
if token_info:
logger.info(f"Token Info: {token_info}")
def query(self, input_query: str, contexts: list[str], config: BaseLlmConfig = None, dry_run=False, memories=None):
"""
Queries the vector database based on the given input query.
Gets relevant doc based on the query and then passes it to an
LLM as context to get the answer.
:param input_query: The query to use.
:type input_query: str
:param contexts: Embeddings retrieved from the database to be used as context.
:type contexts: list[str]
:param config: The `BaseLlmConfig` instance to use as configuration options. This is used for one method call.
To persistently use a config, declare it during app init., defaults to None
:type config: Optional[BaseLlmConfig], optional
:param dry_run: A dry run does everything except send the resulting prompt to
the LLM. The purpose is to test the prompt, not the response., defaults to False
:type dry_run: bool, optional
:return: The answer to the query or the dry run result
:rtype: str
"""
try:
if config:
# A config instance passed to this method will only be applied temporarily, for one call.
# So we will save the previous config and restore it at the end of the execution.
# For this we use the serializer.
prev_config = self.config.serialize()
self.config = config
if config is not None and config.query_type == "Images":
return contexts
if self.is_docs_site_instance:
self.config.prompt = DOCS_SITE_PROMPT_TEMPLATE
self.config.number_documents = 5
k = {}
if self.config.online:
k["web_search_result"] = self.access_search_and_get_results(input_query)
k["memories"] = memories
prompt = self.generate_prompt(input_query, contexts, **k)
logger.info(f"Prompt: {prompt}")
if dry_run:
return prompt
if self.config.token_usage:
answer, token_info = self.get_answer_from_llm(prompt)
else:
answer = self.get_answer_from_llm(prompt)
if isinstance(answer, str):
logger.info(f"Answer: {answer}")
if self.config.token_usage:
return answer, token_info
return answer
else:
if self.config.token_usage:
return self._stream_response(answer, token_info)
return self._stream_response(answer)
finally:
if config:
# Restore previous config
self.config: BaseLlmConfig = BaseLlmConfig.deserialize(prev_config)
def chat(
self, input_query: str, contexts: list[str], config: BaseLlmConfig = None, dry_run=False, session_id: str = None
):
"""
Queries the vector database on the given input query.
Gets relevant doc based on the query and then passes it to an
LLM as context to get the answer.
Maintains the whole conversation in memory.
:param input_query: The query to use.
:type input_query: str
:param contexts: Embeddings retrieved from the database to be used as context.
:type contexts: list[str]
:param config: The `BaseLlmConfig` instance to use as configuration options. This is used for one method call.
To persistently use a config, declare it during app init., defaults to None
:type config: Optional[BaseLlmConfig], optional
:param dry_run: A dry run does everything except send the resulting prompt to
the LLM. The purpose is to test the prompt, not the response., defaults to False
:type dry_run: bool, optional
:param session_id: Session ID to use for the conversation, defaults to None
:type session_id: str, optional
:return: The answer to the query or the dry run result
:rtype: str
"""
try:
if config:
# A config instance passed to this method will only be applied temporarily, for one call.
# So we will save the previous config and restore it at the end of the execution.
# For this we use the serializer.
prev_config = self.config.serialize()
self.config = config
if self.is_docs_site_instance:
self.config.prompt = DOCS_SITE_PROMPT_TEMPLATE
self.config.number_documents = 5
k = {}
if self.config.online:
k["web_search_result"] = self.access_search_and_get_results(input_query)
prompt = self.generate_prompt(input_query, contexts, **k)
logger.info(f"Prompt: {prompt}")
if dry_run:
return prompt
answer, token_info = self.get_answer_from_llm(prompt)
if isinstance(answer, str):
logger.info(f"Answer: {answer}")
return answer, token_info
else:
# this is a streamed response and needs to be handled differently.
return self._stream_response(answer, token_info)
finally:
if config:
# Restore previous config
self.config: BaseLlmConfig = BaseLlmConfig.deserialize(prev_config)
@staticmethod
def _get_messages(prompt: str, system_prompt: Optional[str] = None) -> list[LCBaseMessage]:
"""
Construct a list of langchain messages
:param prompt: User prompt
:type prompt: str
:param system_prompt: System prompt, defaults to None
:type system_prompt: Optional[str], optional
:return: List of messages
:rtype: list[BaseMessage]
"""
from langchain.schema import HumanMessage, SystemMessage
messages = []
if system_prompt:
messages.append(SystemMessage(content=system_prompt))
messages.append(HumanMessage(content=prompt))
return messages
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/mistralai.py | embedchain/embedchain/llm/mistralai.py | import os
from typing import Any, Optional
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class MistralAILlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
super().__init__(config)
if not self.config.api_key and "MISTRAL_API_KEY" not in os.environ:
raise ValueError("Please set the MISTRAL_API_KEY environment variable or pass it in the config.")
def get_llm_model_answer(self, prompt) -> tuple[str, Optional[dict[str, Any]]]:
if self.config.token_usage:
response, token_info = self._get_answer(prompt, self.config)
model_name = "mistralai/" + self.config.model
if model_name not in self.config.model_pricing_map:
raise ValueError(
f"Model {model_name} not found in `model_prices_and_context_window.json`. \
You can disable token usage by setting `token_usage` to False."
)
total_cost = (
self.config.model_pricing_map[model_name]["input_cost_per_token"] * token_info["prompt_tokens"]
) + self.config.model_pricing_map[model_name]["output_cost_per_token"] * token_info["completion_tokens"]
response_token_info = {
"prompt_tokens": token_info["prompt_tokens"],
"completion_tokens": token_info["completion_tokens"],
"total_tokens": token_info["prompt_tokens"] + token_info["completion_tokens"],
"total_cost": round(total_cost, 10),
"cost_currency": "USD",
}
return response, response_token_info
return self._get_answer(prompt, self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig):
try:
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_mistralai.chat_models import ChatMistralAI
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for MistralAI are not installed."
'Please install with `pip install --upgrade "embedchain[mistralai]"`'
) from None
api_key = config.api_key or os.getenv("MISTRAL_API_KEY")
client = ChatMistralAI(mistral_api_key=api_key)
messages = []
if config.system_prompt:
messages.append(SystemMessage(content=config.system_prompt))
messages.append(HumanMessage(content=prompt))
kwargs = {
"model": config.model or "mistral-tiny",
"temperature": config.temperature,
"max_tokens": config.max_tokens,
"top_p": config.top_p,
}
# TODO: Add support for streaming
if config.stream:
answer = ""
for chunk in client.stream(**kwargs, input=messages):
answer += chunk.content
return answer
else:
chat_response = client.invoke(**kwargs, input=messages)
if config.token_usage:
return chat_response.content, chat_response.response_metadata["token_usage"]
return chat_response.content
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/vertex_ai.py | embedchain/embedchain/llm/vertex_ai.py | import importlib
import logging
from typing import Any, Optional
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_google_vertexai import ChatVertexAI
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
logger = logging.getLogger(__name__)
@register_deserializable
class VertexAILlm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
try:
importlib.import_module("vertexai")
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for VertexAI are not installed."
'Please install with `pip install --upgrade "embedchain[vertexai]"`'
) from None
super().__init__(config=config)
def get_llm_model_answer(self, prompt) -> tuple[str, Optional[dict[str, Any]]]:
if self.config.token_usage:
response, token_info = self._get_answer(prompt, self.config)
model_name = "vertexai/" + self.config.model
if model_name not in self.config.model_pricing_map:
raise ValueError(
f"Model {model_name} not found in `model_prices_and_context_window.json`. \
You can disable token usage by setting `token_usage` to False."
)
total_cost = (
self.config.model_pricing_map[model_name]["input_cost_per_token"] * token_info["prompt_token_count"]
) + self.config.model_pricing_map[model_name]["output_cost_per_token"] * token_info[
"candidates_token_count"
]
response_token_info = {
"prompt_tokens": token_info["prompt_token_count"],
"completion_tokens": token_info["candidates_token_count"],
"total_tokens": token_info["prompt_token_count"] + token_info["candidates_token_count"],
"total_cost": round(total_cost, 10),
"cost_currency": "USD",
}
return response, response_token_info
return self._get_answer(prompt, self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
if config.top_p and config.top_p != 1:
logger.warning("Config option `top_p` is not supported by this model.")
if config.stream:
callbacks = config.callbacks if config.callbacks else [StreamingStdOutCallbackHandler()]
llm = ChatVertexAI(
temperature=config.temperature, model=config.model, callbacks=callbacks, streaming=config.stream
)
else:
llm = ChatVertexAI(temperature=config.temperature, model=config.model)
messages = VertexAILlm._get_messages(prompt)
chat_response = llm.invoke(messages)
if config.token_usage:
return chat_response.content, chat_response.response_metadata["usage_metadata"]
return chat_response.content
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/openai.py | embedchain/embedchain/llm/openai.py | import json
import os
import warnings
from typing import Any, Callable, Dict, Optional, Type, Union
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import BaseMessage, HumanMessage, SystemMessage
from langchain_core.tools import BaseTool
from langchain_openai import ChatOpenAI
from pydantic import BaseModel
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class OpenAILlm(BaseLlm):
def __init__(
self,
config: Optional[BaseLlmConfig] = None,
tools: Optional[Union[Dict[str, Any], Type[BaseModel], Callable[..., Any], BaseTool]] = None,
):
self.tools = tools
super().__init__(config=config)
def get_llm_model_answer(self, prompt) -> tuple[str, Optional[dict[str, Any]]]:
if self.config.token_usage:
response, token_info = self._get_answer(prompt, self.config)
model_name = "openai/" + self.config.model
if model_name not in self.config.model_pricing_map:
raise ValueError(
f"Model {model_name} not found in `model_prices_and_context_window.json`. \
You can disable token usage by setting `token_usage` to False."
)
total_cost = (
self.config.model_pricing_map[model_name]["input_cost_per_token"] * token_info["prompt_tokens"]
) + self.config.model_pricing_map[model_name]["output_cost_per_token"] * token_info["completion_tokens"]
response_token_info = {
"prompt_tokens": token_info["prompt_tokens"],
"completion_tokens": token_info["completion_tokens"],
"total_tokens": token_info["prompt_tokens"] + token_info["completion_tokens"],
"total_cost": round(total_cost, 10),
"cost_currency": "USD",
}
return response, response_token_info
return self._get_answer(prompt, self.config)
def _get_answer(self, prompt: str, config: BaseLlmConfig) -> str:
messages = []
if config.system_prompt:
messages.append(SystemMessage(content=config.system_prompt))
messages.append(HumanMessage(content=prompt))
kwargs = {
"model": config.model or "gpt-4o-mini",
"temperature": config.temperature,
"max_tokens": config.max_tokens,
"model_kwargs": config.model_kwargs or {},
}
api_key = config.api_key or os.environ["OPENAI_API_KEY"]
base_url = (
config.base_url
or os.getenv("OPENAI_API_BASE")
or os.getenv("OPENAI_BASE_URL")
or "https://api.openai.com/v1"
)
if os.environ.get("OPENAI_API_BASE"):
warnings.warn(
"The environment variable 'OPENAI_API_BASE' is deprecated and will be removed in the 0.1.140. "
"Please use 'OPENAI_BASE_URL' instead.",
DeprecationWarning
)
if config.top_p:
kwargs["top_p"] = config.top_p
if config.default_headers:
kwargs["default_headers"] = config.default_headers
if config.stream:
callbacks = config.callbacks if config.callbacks else [StreamingStdOutCallbackHandler()]
chat = ChatOpenAI(
**kwargs,
streaming=config.stream,
callbacks=callbacks,
api_key=api_key,
base_url=base_url,
http_client=config.http_client,
http_async_client=config.http_async_client,
)
else:
chat = ChatOpenAI(
**kwargs,
api_key=api_key,
base_url=base_url,
http_client=config.http_client,
http_async_client=config.http_async_client,
)
if self.tools:
return self._query_function_call(chat, self.tools, messages)
chat_response = chat.invoke(messages)
if self.config.token_usage:
return chat_response.content, chat_response.response_metadata["token_usage"]
return chat_response.content
def _query_function_call(
self,
chat: ChatOpenAI,
tools: Optional[Union[Dict[str, Any], Type[BaseModel], Callable[..., Any], BaseTool]],
messages: list[BaseMessage],
) -> str:
from langchain.output_parsers.openai_tools import JsonOutputToolsParser
from langchain_core.utils.function_calling import convert_to_openai_tool
openai_tools = [convert_to_openai_tool(tools)]
chat = chat.bind(tools=openai_tools).pipe(JsonOutputToolsParser())
try:
return json.dumps(chat.invoke(messages)[0])
except IndexError:
return "Input could not be mapped to the function!"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/llm/llama2.py | embedchain/embedchain/llm/llama2.py | import importlib
import os
from typing import Optional
from langchain_community.llms.replicate import Replicate
from embedchain.config import BaseLlmConfig
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.llm.base import BaseLlm
@register_deserializable
class Llama2Llm(BaseLlm):
def __init__(self, config: Optional[BaseLlmConfig] = None):
try:
importlib.import_module("replicate")
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for Llama2 are not installed."
'Please install with `pip install --upgrade "embedchain[llama2]"`'
) from None
# Set default config values specific to this llm
if not config:
config = BaseLlmConfig()
# Add variables to this block that have a default value in the parent class
config.max_tokens = 500
config.temperature = 0.75
# Add variables that are `none` by default to this block.
if not config.model:
config.model = (
"a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5"
)
super().__init__(config=config)
if not self.config.api_key and "REPLICATE_API_TOKEN" not in os.environ:
raise ValueError("Please set the REPLICATE_API_TOKEN environment variable or pass it in the config.")
def get_llm_model_answer(self, prompt):
# TODO: Move the model and other inputs into config
if self.config.system_prompt:
raise ValueError("Llama2 does not support `system_prompt`")
api_key = self.config.api_key or os.getenv("REPLICATE_API_TOKEN")
llm = Replicate(
model=self.config.model,
replicate_api_token=api_key,
input={
"temperature": self.config.temperature,
"max_length": self.config.max_tokens,
"top_p": self.config.top_p,
},
)
return llm.invoke(prompt)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/core/__init__.py | embedchain/embedchain/core/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/core/db/models.py | embedchain/embedchain/core/db/models.py | import uuid
from sqlalchemy import TIMESTAMP, Column, Integer, String, Text, func
from sqlalchemy.orm import declarative_base
Base = declarative_base()
metadata = Base.metadata
class DataSource(Base):
__tablename__ = "ec_data_sources"
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
app_id = Column(Text, index=True)
hash = Column(Text, index=True)
type = Column(Text, index=True)
value = Column(Text)
meta_data = Column(Text, name="metadata")
is_uploaded = Column(Integer, default=0)
class ChatHistory(Base):
__tablename__ = "ec_chat_history"
app_id = Column(String, primary_key=True)
id = Column(String, primary_key=True)
session_id = Column(String, primary_key=True, index=True)
question = Column(Text)
answer = Column(Text)
meta_data = Column(Text, name="metadata")
created_at = Column(TIMESTAMP, default=func.current_timestamp(), index=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/core/db/database.py | embedchain/embedchain/core/db/database.py | import os
from alembic import command
from alembic.config import Config
from sqlalchemy import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import Session as SQLAlchemySession
from sqlalchemy.orm import scoped_session, sessionmaker
from .models import Base
class DatabaseManager:
def __init__(self, echo: bool = False):
self.database_uri = os.environ.get("EMBEDCHAIN_DB_URI")
self.echo = echo
self.engine: Engine = None
self._session_factory = None
def setup_engine(self) -> None:
"""Initializes the database engine and session factory."""
if not self.database_uri:
raise RuntimeError("Database URI is not set. Set the EMBEDCHAIN_DB_URI environment variable.")
connect_args = {}
if self.database_uri.startswith("sqlite"):
connect_args["check_same_thread"] = False
self.engine = create_engine(self.database_uri, echo=self.echo, connect_args=connect_args)
self._session_factory = scoped_session(sessionmaker(bind=self.engine))
Base.metadata.bind = self.engine
def init_db(self) -> None:
"""Creates all tables defined in the Base metadata."""
if not self.engine:
raise RuntimeError("Database engine is not initialized. Call setup_engine() first.")
Base.metadata.create_all(self.engine)
def get_session(self) -> SQLAlchemySession:
"""Provides a session for database operations."""
if not self._session_factory:
raise RuntimeError("Session factory is not initialized. Call setup_engine() first.")
return self._session_factory()
def close_session(self) -> None:
"""Closes the current session."""
if self._session_factory:
self._session_factory.remove()
def execute_transaction(self, transaction_block):
"""Executes a block of code within a database transaction."""
session = self.get_session()
try:
transaction_block(session)
session.commit()
except Exception as e:
session.rollback()
raise e
finally:
self.close_session()
# Singleton pattern to use throughout the application
database_manager = DatabaseManager()
# Convenience functions for backward compatibility and ease of use
def setup_engine(database_uri: str, echo: bool = False) -> None:
database_manager.database_uri = database_uri
database_manager.echo = echo
database_manager.setup_engine()
def alembic_upgrade() -> None:
"""Upgrades the database to the latest version."""
alembic_config_path = os.path.join(os.path.dirname(__file__), "..", "..", "alembic.ini")
alembic_cfg = Config(alembic_config_path)
command.upgrade(alembic_cfg, "head")
def init_db() -> None:
alembic_upgrade()
def get_session() -> SQLAlchemySession:
return database_manager.get_session()
def execute_transaction(transaction_block):
database_manager.execute_transaction(transaction_block)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/core/db/__init__.py | embedchain/embedchain/core/db/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/app_config.py | embedchain/embedchain/config/app_config.py | from typing import Optional
from embedchain.helpers.json_serializable import register_deserializable
from .base_app_config import BaseAppConfig
@register_deserializable
class AppConfig(BaseAppConfig):
"""
Config to initialize an embedchain custom `App` instance, with extra config options.
"""
def __init__(
self,
log_level: str = "WARNING",
id: Optional[str] = None,
name: Optional[str] = None,
collect_metrics: Optional[bool] = True,
**kwargs,
):
"""
Initializes a configuration class instance for an App. This is the simplest form of an embedchain app.
Most of the configuration is done in the `App` class itself.
:param log_level: Debug level ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], defaults to "WARNING"
:type log_level: str, optional
:param id: ID of the app. Document metadata will have this id., defaults to None
:type id: Optional[str], optional
:param collect_metrics: Send anonymous telemetry to improve embedchain, defaults to True
:type collect_metrics: Optional[bool], optional
"""
self.name = name
super().__init__(log_level=log_level, id=id, collect_metrics=collect_metrics, **kwargs)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/base_config.py | embedchain/embedchain/config/base_config.py | from typing import Any
from embedchain.helpers.json_serializable import JSONSerializable
class BaseConfig(JSONSerializable):
"""
Base config.
"""
def __init__(self):
"""Initializes a configuration class for a class."""
pass
def as_dict(self) -> dict[str, Any]:
"""Return config object as a dict
:return: config object as dict
:rtype: dict[str, Any]
"""
return vars(self)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/add_config.py | embedchain/embedchain/config/add_config.py | import builtins
import logging
from collections.abc import Callable
from importlib import import_module
from typing import Optional
from embedchain.config.base_config import BaseConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class ChunkerConfig(BaseConfig):
"""
Config for the chunker used in `add` method
"""
def __init__(
self,
chunk_size: Optional[int] = 2000,
chunk_overlap: Optional[int] = 0,
length_function: Optional[Callable[[str], int]] = None,
min_chunk_size: Optional[int] = 0,
):
self.chunk_size = chunk_size
self.chunk_overlap = chunk_overlap
self.min_chunk_size = min_chunk_size
if self.min_chunk_size >= self.chunk_size:
raise ValueError(f"min_chunk_size {min_chunk_size} should be less than chunk_size {chunk_size}")
if self.min_chunk_size < self.chunk_overlap:
logging.warning(
f"min_chunk_size {min_chunk_size} should be greater than chunk_overlap {chunk_overlap}, otherwise it is redundant." # noqa:E501
)
if isinstance(length_function, str):
self.length_function = self.load_func(length_function)
else:
self.length_function = length_function if length_function else len
@staticmethod
def load_func(dotpath: str):
if "." not in dotpath:
return getattr(builtins, dotpath)
else:
module_, func = dotpath.rsplit(".", maxsplit=1)
m = import_module(module_)
return getattr(m, func)
@register_deserializable
class LoaderConfig(BaseConfig):
"""
Config for the loader used in `add` method
"""
def __init__(self):
pass
@register_deserializable
class AddConfig(BaseConfig):
"""
Config for the `add` method.
"""
def __init__(
self,
chunker: Optional[ChunkerConfig] = None,
loader: Optional[LoaderConfig] = None,
):
"""
Initializes a configuration class instance for the `add` method.
:param chunker: Chunker config, defaults to None
:type chunker: Optional[ChunkerConfig], optional
:param loader: Loader config, defaults to None
:type loader: Optional[LoaderConfig], optional
"""
self.loader = loader
self.chunker = chunker
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/base_app_config.py | embedchain/embedchain/config/base_app_config.py | import logging
from typing import Optional
from embedchain.config.base_config import BaseConfig
from embedchain.helpers.json_serializable import JSONSerializable
from embedchain.vectordb.base import BaseVectorDB
logger = logging.getLogger(__name__)
class BaseAppConfig(BaseConfig, JSONSerializable):
"""
Parent config to initialize an instance of `App`.
"""
def __init__(
self,
log_level: str = "WARNING",
db: Optional[BaseVectorDB] = None,
id: Optional[str] = None,
collect_metrics: bool = True,
collection_name: Optional[str] = None,
):
"""
Initializes a configuration class instance for an App.
Most of the configuration is done in the `App` class itself.
:param log_level: Debug level ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], defaults to "WARNING"
:type log_level: str, optional
:param db: A database class. It is recommended to set this directly in the `App` class, not this config,
defaults to None
:type db: Optional[BaseVectorDB], optional
:param id: ID of the app. Document metadata will have this id., defaults to None
:type id: Optional[str], optional
:param collect_metrics: Send anonymous telemetry to improve embedchain, defaults to True
:type collect_metrics: Optional[bool], optional
:param collection_name: Default collection name. It's recommended to use app.db.set_collection_name() instead,
defaults to None
:type collection_name: Optional[str], optional
"""
self.id = id
self.collect_metrics = True if (collect_metrics is True or collect_metrics is None) else False
self.collection_name = collection_name
if db:
self._db = db
logger.warning(
"DEPRECATION WARNING: Please supply the database as the second parameter during app init. "
"Such as `app(config=config, db=db)`."
)
if collection_name:
logger.warning("DEPRECATION WARNING: Please supply the collection name to the database config.")
return
def _setup_logging(self, log_level):
logger.basicConfig(format="%(asctime)s [%(name)s] [%(levelname)s] %(message)s", level=log_level)
self.logger = logger.getLogger(__name__)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/__init__.py | embedchain/embedchain/config/__init__.py | # flake8: noqa: F401
from .add_config import AddConfig, ChunkerConfig
from .app_config import AppConfig
from .base_config import BaseConfig
from .cache_config import CacheConfig
from .embedder.base import BaseEmbedderConfig
from .embedder.base import BaseEmbedderConfig as EmbedderConfig
from .embedder.ollama import OllamaEmbedderConfig
from .llm.base import BaseLlmConfig
from .mem0_config import Mem0Config
from .vector_db.chroma import ChromaDbConfig
from .vector_db.elasticsearch import ElasticsearchDBConfig
from .vector_db.opensearch import OpenSearchDBConfig
from .vector_db.zilliz import ZillizDBConfig
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/mem0_config.py | embedchain/embedchain/config/mem0_config.py | from typing import Any, Optional
from embedchain.config.base_config import BaseConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class Mem0Config(BaseConfig):
def __init__(self, api_key: str, top_k: Optional[int] = 10):
self.api_key = api_key
self.top_k = top_k
@staticmethod
def from_config(config: Optional[dict[str, Any]]):
if config is None:
return Mem0Config()
else:
return Mem0Config(
api_key=config.get("api_key", ""),
init_config=config.get("top_k", 10),
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/cache_config.py | embedchain/embedchain/config/cache_config.py | from typing import Any, Optional
from embedchain.config.base_config import BaseConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class CacheSimilarityEvalConfig(BaseConfig):
"""
This is the evaluator to compare two embeddings according to their distance computed in embedding retrieval stage.
In the retrieval stage, `search_result` is the distance used for approximate nearest neighbor search and have been
put into `cache_dict`. `max_distance` is used to bound this distance to make it between [0-`max_distance`].
`positive` is used to indicate this distance is directly proportional to the similarity of two entities.
If `positive` is set `False`, `max_distance` will be used to subtract this distance to get the final score.
:param max_distance: the bound of maximum distance.
:type max_distance: float
:param positive: if the larger distance indicates more similar of two entities, It is True. Otherwise, it is False.
:type positive: bool
"""
def __init__(
self,
strategy: Optional[str] = "distance",
max_distance: Optional[float] = 1.0,
positive: Optional[bool] = False,
):
self.strategy = strategy
self.max_distance = max_distance
self.positive = positive
@staticmethod
def from_config(config: Optional[dict[str, Any]]):
if config is None:
return CacheSimilarityEvalConfig()
else:
return CacheSimilarityEvalConfig(
strategy=config.get("strategy", "distance"),
max_distance=config.get("max_distance", 1.0),
positive=config.get("positive", False),
)
@register_deserializable
class CacheInitConfig(BaseConfig):
"""
This is a cache init config. Used to initialize a cache.
:param similarity_threshold: a threshold ranged from 0 to 1 to filter search results with similarity score higher \
than the threshold. When it is 0, there is no hits. When it is 1, all search results will be returned as hits.
:type similarity_threshold: float
:param auto_flush: it will be automatically flushed every time xx pieces of data are added, default to 20
:type auto_flush: int
"""
def __init__(
self,
similarity_threshold: Optional[float] = 0.8,
auto_flush: Optional[int] = 20,
):
if similarity_threshold < 0 or similarity_threshold > 1:
raise ValueError(f"similarity_threshold {similarity_threshold} should be between 0 and 1")
self.similarity_threshold = similarity_threshold
self.auto_flush = auto_flush
@staticmethod
def from_config(config: Optional[dict[str, Any]]):
if config is None:
return CacheInitConfig()
else:
return CacheInitConfig(
similarity_threshold=config.get("similarity_threshold", 0.8),
auto_flush=config.get("auto_flush", 20),
)
@register_deserializable
class CacheConfig(BaseConfig):
def __init__(
self,
similarity_eval_config: Optional[CacheSimilarityEvalConfig] = CacheSimilarityEvalConfig(),
init_config: Optional[CacheInitConfig] = CacheInitConfig(),
):
self.similarity_eval_config = similarity_eval_config
self.init_config = init_config
@staticmethod
def from_config(config: Optional[dict[str, Any]]):
if config is None:
return CacheConfig()
else:
return CacheConfig(
similarity_eval_config=CacheSimilarityEvalConfig.from_config(config.get("similarity_evaluation", {})),
init_config=CacheInitConfig.from_config(config.get("init_config", {})),
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/vectordb/__init__.py | embedchain/embedchain/config/vectordb/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/llm/__init__.py | embedchain/embedchain/config/llm/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/llm/base.py | embedchain/embedchain/config/llm/base.py | import json
import logging
import re
from pathlib import Path
from string import Template
from typing import Any, Dict, Mapping, Optional, Union
import httpx
from embedchain.config.base_config import BaseConfig
from embedchain.helpers.json_serializable import register_deserializable
logger = logging.getLogger(__name__)
DEFAULT_PROMPT = """
You are a Q&A expert system. Your responses must always be rooted in the context provided for each query. Here are some guidelines to follow:
1. Refrain from explicitly mentioning the context provided in your response.
2. The context should silently guide your answers without being directly acknowledged.
3. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc.
Context information:
----------------------
$context
----------------------
Query: $query
Answer:
""" # noqa:E501
DEFAULT_PROMPT_WITH_HISTORY = """
You are a Q&A expert system. Your responses must always be rooted in the context provided for each query. You are also provided with the conversation history with the user. Make sure to use relevant context from conversation history as needed.
Here are some guidelines to follow:
1. Refrain from explicitly mentioning the context provided in your response.
2. The context should silently guide your answers without being directly acknowledged.
3. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc.
Context information:
----------------------
$context
----------------------
Conversation history:
----------------------
$history
----------------------
Query: $query
Answer:
""" # noqa:E501
DEFAULT_PROMPT_WITH_MEM0_MEMORY = """
You are an expert at answering questions based on provided memories. You are also provided with the context and conversation history of the user. Make sure to use relevant context from conversation history and context as needed.
Here are some guidelines to follow:
1. Refrain from explicitly mentioning the context provided in your response.
2. Take into consideration the conversation history and context provided.
3. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc.
Striclty return the query exactly as it is if it is not a question or if no relevant information is found.
Context information:
----------------------
$context
----------------------
Conversation history:
----------------------
$history
----------------------
Memories/Preferences:
----------------------
$memories
----------------------
Query: $query
Answer:
""" # noqa:E501
DOCS_SITE_DEFAULT_PROMPT = """
You are an expert AI assistant for developer support product. Your responses must always be rooted in the context provided for each query. Wherever possible, give complete code snippet. Dont make up any code snippet on your own.
Here are some guidelines to follow:
1. Refrain from explicitly mentioning the context provided in your response.
2. The context should silently guide your answers without being directly acknowledged.
3. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc.
Context information:
----------------------
$context
----------------------
Query: $query
Answer:
""" # noqa:E501
DEFAULT_PROMPT_TEMPLATE = Template(DEFAULT_PROMPT)
DEFAULT_PROMPT_WITH_HISTORY_TEMPLATE = Template(DEFAULT_PROMPT_WITH_HISTORY)
DEFAULT_PROMPT_WITH_MEM0_MEMORY_TEMPLATE = Template(DEFAULT_PROMPT_WITH_MEM0_MEMORY)
DOCS_SITE_PROMPT_TEMPLATE = Template(DOCS_SITE_DEFAULT_PROMPT)
query_re = re.compile(r"\$\{*query\}*")
context_re = re.compile(r"\$\{*context\}*")
history_re = re.compile(r"\$\{*history\}*")
@register_deserializable
class BaseLlmConfig(BaseConfig):
"""
Config for the `query` method.
"""
def __init__(
self,
number_documents: int = 3,
template: Optional[Template] = None,
prompt: Optional[Template] = None,
model: Optional[str] = None,
temperature: float = 0,
max_tokens: int = 1000,
top_p: float = 1,
stream: bool = False,
online: bool = False,
token_usage: bool = False,
deployment_name: Optional[str] = None,
system_prompt: Optional[str] = None,
where: dict[str, Any] = None,
query_type: Optional[str] = None,
callbacks: Optional[list] = None,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
endpoint: Optional[str] = None,
model_kwargs: Optional[dict[str, Any]] = None,
http_client_proxies: Optional[Union[Dict, str]] = None,
http_async_client_proxies: Optional[Union[Dict, str]] = None,
local: Optional[bool] = False,
default_headers: Optional[Mapping[str, str]] = None,
api_version: Optional[str] = None,
):
"""
Initializes a configuration class instance for the LLM.
Takes the place of the former `QueryConfig` or `ChatConfig`.
:param number_documents: Number of documents to pull from the database as
context, defaults to 1
:type number_documents: int, optional
:param template: The `Template` instance to use as a template for
prompt, defaults to None (deprecated)
:type template: Optional[Template], optional
:param prompt: The `Template` instance to use as a template for
prompt, defaults to None
:type prompt: Optional[Template], optional
:param model: Controls the OpenAI model used, defaults to None
:type model: Optional[str], optional
:param temperature: Controls the randomness of the model's output.
Higher values (closer to 1) make output more random, lower values make it more deterministic, defaults to 0
:type temperature: float, optional
:param max_tokens: Controls how many tokens are generated, defaults to 1000
:type max_tokens: int, optional
:param top_p: Controls the diversity of words. Higher values (closer to 1) make word selection more diverse,
defaults to 1
:type top_p: float, optional
:param stream: Control if response is streamed back to user, defaults to False
:type stream: bool, optional
:param online: Controls whether to use internet for answering query, defaults to False
:type online: bool, optional
:param token_usage: Controls whether to return token usage in response, defaults to False
:type token_usage: bool, optional
:param deployment_name: t.b.a., defaults to None
:type deployment_name: Optional[str], optional
:param system_prompt: System prompt string, defaults to None
:type system_prompt: Optional[str], optional
:param where: A dictionary of key-value pairs to filter the database results., defaults to None
:type where: dict[str, Any], optional
:param api_key: The api key of the custom endpoint, defaults to None
:type api_key: Optional[str], optional
:param endpoint: The api url of the custom endpoint, defaults to None
:type endpoint: Optional[str], optional
:param model_kwargs: A dictionary of key-value pairs to pass to the model, defaults to None
:type model_kwargs: Optional[Dict[str, Any]], optional
:param callbacks: Langchain callback functions to use, defaults to None
:type callbacks: Optional[list], optional
:param query_type: The type of query to use, defaults to None
:type query_type: Optional[str], optional
:param http_client_proxies: The proxy server settings used to create self.http_client, defaults to None
:type http_client_proxies: Optional[Dict | str], optional
:param http_async_client_proxies: The proxy server settings for async calls used to create
self.http_async_client, defaults to None
:type http_async_client_proxies: Optional[Dict | str], optional
:param local: If True, the model will be run locally, defaults to False (for huggingface provider)
:type local: Optional[bool], optional
:param default_headers: Set additional HTTP headers to be sent with requests to OpenAI
:type default_headers: Optional[Mapping[str, str]], optional
:raises ValueError: If the template is not valid as template should
contain $context and $query (and optionally $history)
:raises ValueError: Stream is not boolean
"""
if template is not None:
logger.warning(
"The `template` argument is deprecated and will be removed in a future version. "
+ "Please use `prompt` instead."
)
if prompt is None:
prompt = template
if prompt is None:
prompt = DEFAULT_PROMPT_TEMPLATE
self.number_documents = number_documents
self.temperature = temperature
self.max_tokens = max_tokens
self.model = model
self.top_p = top_p
self.online = online
self.token_usage = token_usage
self.deployment_name = deployment_name
self.system_prompt = system_prompt
self.query_type = query_type
self.callbacks = callbacks
self.api_key = api_key
self.base_url = base_url
self.endpoint = endpoint
self.model_kwargs = model_kwargs
self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None
self.http_async_client = (
httpx.AsyncClient(proxies=http_async_client_proxies) if http_async_client_proxies else None
)
self.local = local
self.default_headers = default_headers
self.online = online
self.api_version = api_version
if token_usage:
f = Path(__file__).resolve().parent.parent / "model_prices_and_context_window.json"
self.model_pricing_map = json.load(f.open())
if isinstance(prompt, str):
prompt = Template(prompt)
if self.validate_prompt(prompt):
self.prompt = prompt
else:
raise ValueError("The 'prompt' should have 'query' and 'context' keys and potentially 'history' (if used).")
if not isinstance(stream, bool):
raise ValueError("`stream` should be bool")
self.stream = stream
self.where = where
@staticmethod
def validate_prompt(prompt: Template) -> Optional[re.Match[str]]:
"""
validate the prompt
:param prompt: the prompt to validate
:type prompt: Template
:return: valid (true) or invalid (false)
:rtype: Optional[re.Match[str]]
"""
return re.search(query_re, prompt.template) and re.search(context_re, prompt.template)
@staticmethod
def _validate_prompt_history(prompt: Template) -> Optional[re.Match[str]]:
"""
validate the prompt with history
:param prompt: the prompt to validate
:type prompt: Template
:return: valid (true) or invalid (false)
:rtype: Optional[re.Match[str]]
"""
return re.search(history_re, prompt.template)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/vector_db/lancedb.py | embedchain/embedchain/config/vector_db/lancedb.py | from typing import Optional
from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class LanceDBConfig(BaseVectorDbConfig):
def __init__(
self,
collection_name: Optional[str] = None,
dir: Optional[str] = None,
host: Optional[str] = None,
port: Optional[str] = None,
allow_reset=True,
):
"""
Initializes a configuration class instance for LanceDB.
:param collection_name: Default name for the collection, defaults to None
:type collection_name: Optional[str], optional
:param dir: Path to the database directory, where the database is stored, defaults to None
:type dir: Optional[str], optional
:param host: Database connection remote host. Use this if you run Embedchain as a client, defaults to None
:type host: Optional[str], optional
:param port: Database connection remote port. Use this if you run Embedchain as a client, defaults to None
:type port: Optional[str], optional
:param allow_reset: Resets the database. defaults to False
:type allow_reset: bool
"""
self.allow_reset = allow_reset
super().__init__(collection_name=collection_name, dir=dir, host=host, port=port)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/vector_db/opensearch.py | embedchain/embedchain/config/vector_db/opensearch.py | from typing import Optional
from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class OpenSearchDBConfig(BaseVectorDbConfig):
def __init__(
self,
opensearch_url: str,
http_auth: tuple[str, str],
vector_dimension: int = 1536,
collection_name: Optional[str] = None,
dir: Optional[str] = None,
batch_size: Optional[int] = 100,
**extra_params: dict[str, any],
):
"""
Initializes a configuration class instance for an OpenSearch client.
:param collection_name: Default name for the collection, defaults to None
:type collection_name: Optional[str], optional
:param opensearch_url: URL of the OpenSearch domain
:type opensearch_url: str, Eg, "http://localhost:9200"
:param http_auth: Tuple of username and password
:type http_auth: tuple[str, str], Eg, ("username", "password")
:param vector_dimension: Dimension of the vector, defaults to 1536 (openai embedding model)
:type vector_dimension: int, optional
:param dir: Path to the database directory, where the database is stored, defaults to None
:type dir: Optional[str], optional
:param batch_size: Number of items to insert in one batch, defaults to 100
:type batch_size: Optional[int], optional
"""
self.opensearch_url = opensearch_url
self.http_auth = http_auth
self.vector_dimension = vector_dimension
self.extra_params = extra_params
self.batch_size = batch_size
super().__init__(collection_name=collection_name, dir=dir)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/vector_db/chroma.py | embedchain/embedchain/config/vector_db/chroma.py | from typing import Optional
from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class ChromaDbConfig(BaseVectorDbConfig):
def __init__(
self,
collection_name: Optional[str] = None,
dir: Optional[str] = None,
host: Optional[str] = None,
port: Optional[str] = None,
batch_size: Optional[int] = 100,
allow_reset=False,
chroma_settings: Optional[dict] = None,
):
"""
Initializes a configuration class instance for ChromaDB.
:param collection_name: Default name for the collection, defaults to None
:type collection_name: Optional[str], optional
:param dir: Path to the database directory, where the database is stored, defaults to None
:type dir: Optional[str], optional
:param host: Database connection remote host. Use this if you run Embedchain as a client, defaults to None
:type host: Optional[str], optional
:param port: Database connection remote port. Use this if you run Embedchain as a client, defaults to None
:type port: Optional[str], optional
:param batch_size: Number of items to insert in one batch, defaults to 100
:type batch_size: Optional[int], optional
:param allow_reset: Resets the database. defaults to False
:type allow_reset: bool
:param chroma_settings: Chroma settings dict, defaults to None
:type chroma_settings: Optional[dict], optional
"""
self.chroma_settings = chroma_settings
self.allow_reset = allow_reset
self.batch_size = batch_size
super().__init__(collection_name=collection_name, dir=dir, host=host, port=port)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/vector_db/base.py | embedchain/embedchain/config/vector_db/base.py | from typing import Optional
from embedchain.config.base_config import BaseConfig
class BaseVectorDbConfig(BaseConfig):
def __init__(
self,
collection_name: Optional[str] = None,
dir: str = "db",
host: Optional[str] = None,
port: Optional[str] = None,
**kwargs,
):
"""
Initializes a configuration class instance for the vector database.
:param collection_name: Default name for the collection, defaults to None
:type collection_name: Optional[str], optional
:param dir: Path to the database directory, where the database is stored, defaults to "db"
:type dir: str, optional
:param host: Database connection remote host. Use this if you run Embedchain as a client, defaults to None
:type host: Optional[str], optional
:param host: Database connection remote port. Use this if you run Embedchain as a client, defaults to None
:type port: Optional[str], optional
:param kwargs: Additional keyword arguments
:type kwargs: dict
"""
self.collection_name = collection_name or "embedchain_store"
self.dir = dir
self.host = host
self.port = port
# Assign additional keyword arguments
if kwargs:
for key, value in kwargs.items():
setattr(self, key, value)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/vector_db/pinecone.py | embedchain/embedchain/config/vector_db/pinecone.py | import os
from typing import Optional
from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class PineconeDBConfig(BaseVectorDbConfig):
def __init__(
self,
index_name: Optional[str] = None,
api_key: Optional[str] = None,
vector_dimension: int = 1536,
metric: Optional[str] = "cosine",
pod_config: Optional[dict[str, any]] = None,
serverless_config: Optional[dict[str, any]] = None,
hybrid_search: bool = False,
bm25_encoder: any = None,
batch_size: Optional[int] = 100,
**extra_params: dict[str, any],
):
self.metric = metric
self.api_key = api_key
self.index_name = index_name
self.vector_dimension = vector_dimension
self.extra_params = extra_params
self.hybrid_search = hybrid_search
self.bm25_encoder = bm25_encoder
self.batch_size = batch_size
if pod_config is None and serverless_config is None:
# If no config is provided, use the default pod spec config
pod_environment = os.environ.get("PINECONE_ENV", "gcp-starter")
self.pod_config = {"environment": pod_environment, "metadata_config": {"indexed": ["*"]}}
else:
self.pod_config = pod_config
self.serverless_config = serverless_config
if self.pod_config and self.serverless_config:
raise ValueError("Only one of pod_config or serverless_config can be provided.")
if self.hybrid_search and self.metric != "dotproduct":
raise ValueError(
"Hybrid search is only supported with dotproduct metric in Pinecone. See full docs here: https://docs.pinecone.io/docs/hybrid-search#limitations"
) # noqa:E501
super().__init__(collection_name=self.index_name, dir=None)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/vector_db/weaviate.py | embedchain/embedchain/config/vector_db/weaviate.py | from typing import Optional
from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class WeaviateDBConfig(BaseVectorDbConfig):
def __init__(
self,
collection_name: Optional[str] = None,
dir: Optional[str] = None,
batch_size: Optional[int] = 100,
**extra_params: dict[str, any],
):
self.batch_size = batch_size
self.extra_params = extra_params
super().__init__(collection_name=collection_name, dir=dir)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/vector_db/elasticsearch.py | embedchain/embedchain/config/vector_db/elasticsearch.py | import os
from typing import Optional, Union
from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class ElasticsearchDBConfig(BaseVectorDbConfig):
def __init__(
self,
collection_name: Optional[str] = None,
dir: Optional[str] = None,
es_url: Union[str, list[str]] = None,
cloud_id: Optional[str] = None,
batch_size: Optional[int] = 100,
**ES_EXTRA_PARAMS: dict[str, any],
):
"""
Initializes a configuration class instance for an Elasticsearch client.
:param collection_name: Default name for the collection, defaults to None
:type collection_name: Optional[str], optional
:param dir: Path to the database directory, where the database is stored, defaults to None
:type dir: Optional[str], optional
:param es_url: elasticsearch url or list of nodes url to be used for connection, defaults to None
:type es_url: Union[str, list[str]], optional
:param cloud_id: cloud id of the elasticsearch cluster, defaults to None
:type cloud_id: Optional[str], optional
:param batch_size: Number of items to insert in one batch, defaults to 100
:type batch_size: Optional[int], optional
:param ES_EXTRA_PARAMS: extra params dict that can be passed to elasticsearch.
:type ES_EXTRA_PARAMS: dict[str, Any], optional
"""
if es_url and cloud_id:
raise ValueError("Only one of `es_url` and `cloud_id` can be set.")
# self, es_url: Union[str, list[str]] = None, **ES_EXTRA_PARAMS: dict[str, any]):
self.ES_URL = es_url or os.environ.get("ELASTICSEARCH_URL")
self.CLOUD_ID = cloud_id or os.environ.get("ELASTICSEARCH_CLOUD_ID")
if not self.ES_URL and not self.CLOUD_ID:
raise AttributeError(
"Elasticsearch needs a URL or CLOUD_ID attribute, "
"this can either be passed to `ElasticsearchDBConfig` or as `ELASTICSEARCH_URL` or `ELASTICSEARCH_CLOUD_ID` in `.env`" # noqa: E501
)
self.ES_EXTRA_PARAMS = ES_EXTRA_PARAMS
# Load API key from .env if it's not explicitly passed.
# Can only set one of 'api_key', 'basic_auth', and 'bearer_auth'
if (
not self.ES_EXTRA_PARAMS.get("api_key")
and not self.ES_EXTRA_PARAMS.get("basic_auth")
and not self.ES_EXTRA_PARAMS.get("bearer_auth")
):
self.ES_EXTRA_PARAMS["api_key"] = os.environ.get("ELASTICSEARCH_API_KEY")
self.batch_size = batch_size
super().__init__(collection_name=collection_name, dir=dir)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/vector_db/zilliz.py | embedchain/embedchain/config/vector_db/zilliz.py | import os
from typing import Optional
from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class ZillizDBConfig(BaseVectorDbConfig):
def __init__(
self,
collection_name: Optional[str] = None,
dir: Optional[str] = None,
uri: Optional[str] = None,
token: Optional[str] = None,
vector_dim: Optional[str] = None,
metric_type: Optional[str] = None,
):
"""
Initializes a configuration class instance for the vector database.
:param collection_name: Default name for the collection, defaults to None
:type collection_name: Optional[str], optional
:param dir: Path to the database directory, where the database is stored, defaults to "db"
:type dir: str, optional
:param uri: Cluster endpoint obtained from the Zilliz Console, defaults to None
:type uri: Optional[str], optional
:param token: API Key, if a Serverless Cluster, username:password, if a Dedicated Cluster, defaults to None
:type token: Optional[str], optional
"""
self.uri = uri or os.environ.get("ZILLIZ_CLOUD_URI")
if not self.uri:
raise AttributeError(
"Zilliz needs a URI attribute, "
"this can either be passed to `ZILLIZ_CLOUD_URI` or as `ZILLIZ_CLOUD_URI` in `.env`"
)
self.token = token or os.environ.get("ZILLIZ_CLOUD_TOKEN")
if not self.token:
raise AttributeError(
"Zilliz needs a token attribute, "
"this can either be passed to `ZILLIZ_CLOUD_TOKEN` or as `ZILLIZ_CLOUD_TOKEN` in `.env`,"
"if having a username and password, pass it in the form 'username:password' to `ZILLIZ_CLOUD_TOKEN`"
)
self.metric_type = metric_type if metric_type else "L2"
self.vector_dim = vector_dim
super().__init__(collection_name=collection_name, dir=dir)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/vector_db/qdrant.py | embedchain/embedchain/config/vector_db/qdrant.py | from typing import Optional
from embedchain.config.vector_db.base import BaseVectorDbConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class QdrantDBConfig(BaseVectorDbConfig):
"""
Config to initialize a qdrant client.
:param: url. qdrant url or list of nodes url to be used for connection
"""
def __init__(
self,
collection_name: Optional[str] = None,
dir: Optional[str] = None,
hnsw_config: Optional[dict[str, any]] = None,
quantization_config: Optional[dict[str, any]] = None,
on_disk: Optional[bool] = None,
batch_size: Optional[int] = 10,
**extra_params: dict[str, any],
):
"""
Initializes a configuration class instance for a qdrant client.
:param collection_name: Default name for the collection, defaults to None
:type collection_name: Optional[str], optional
:param dir: Path to the database directory, where the database is stored, defaults to None
:type dir: Optional[str], optional
:param hnsw_config: Params for HNSW index
:type hnsw_config: Optional[dict[str, any]], defaults to None
:param quantization_config: Params for quantization, if None - quantization will be disabled
:type quantization_config: Optional[dict[str, any]], defaults to None
:param on_disk: If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are indexed - remain in RAM.
:type on_disk: bool, optional, defaults to None
:param batch_size: Number of items to insert in one batch, defaults to 10
:type batch_size: Optional[int], optional
"""
self.hnsw_config = hnsw_config
self.quantization_config = quantization_config
self.on_disk = on_disk
self.batch_size = batch_size
self.extra_params = extra_params
super().__init__(collection_name=collection_name, dir=dir)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/evaluation/__init__.py | embedchain/embedchain/config/evaluation/__init__.py | from .base import ( # noqa: F401
AnswerRelevanceConfig,
ContextRelevanceConfig,
GroundednessConfig,
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/evaluation/base.py | embedchain/embedchain/config/evaluation/base.py | from typing import Optional
from embedchain.config.base_config import BaseConfig
ANSWER_RELEVANCY_PROMPT = """
Please provide $num_gen_questions questions from the provided answer.
You must provide the complete question, if are not able to provide the complete question, return empty string ("").
Please only provide one question per line without numbers or bullets to distinguish them.
You must only provide the questions and no other text.
$answer
""" # noqa:E501
CONTEXT_RELEVANCY_PROMPT = """
Please extract relevant sentences from the provided context that is required to answer the given question.
If no relevant sentences are found, or if you believe the question cannot be answered from the given context, return the empty string ("").
While extracting candidate sentences you're not allowed to make any changes to sentences from given context or make up any sentences.
You must only provide sentences from the given context and nothing else.
Context: $context
Question: $question
""" # noqa:E501
GROUNDEDNESS_ANSWER_CLAIMS_PROMPT = """
Please provide one or more statements from each sentence of the provided answer.
You must provide the symantically equivalent statements for each sentence of the answer.
You must provide the complete statement, if are not able to provide the complete statement, return empty string ("").
Please only provide one statement per line WITHOUT numbers or bullets.
If the question provided is not being answered in the provided answer, return empty string ("").
You must only provide the statements and no other text.
$question
$answer
""" # noqa:E501
GROUNDEDNESS_CLAIMS_INFERENCE_PROMPT = """
Given the context and the provided claim statements, please provide a verdict for each claim statement whether it can be completely inferred from the given context or not.
Use only "1" (yes), "0" (no) and "-1" (null) for "yes", "no" or "null" respectively.
You must provide one verdict per line, ONLY WITH "1", "0" or "-1" as per your verdict to the given statement and nothing else.
You must provide the verdicts in the same order as the claim statements.
Contexts:
$context
Claim statements:
$claim_statements
""" # noqa:E501
class GroundednessConfig(BaseConfig):
def __init__(
self,
model: str = "gpt-4",
api_key: Optional[str] = None,
answer_claims_prompt: str = GROUNDEDNESS_ANSWER_CLAIMS_PROMPT,
claims_inference_prompt: str = GROUNDEDNESS_CLAIMS_INFERENCE_PROMPT,
):
self.model = model
self.api_key = api_key
self.answer_claims_prompt = answer_claims_prompt
self.claims_inference_prompt = claims_inference_prompt
class AnswerRelevanceConfig(BaseConfig):
def __init__(
self,
model: str = "gpt-4",
embedder: str = "text-embedding-ada-002",
api_key: Optional[str] = None,
num_gen_questions: int = 1,
prompt: str = ANSWER_RELEVANCY_PROMPT,
):
self.model = model
self.embedder = embedder
self.api_key = api_key
self.num_gen_questions = num_gen_questions
self.prompt = prompt
class ContextRelevanceConfig(BaseConfig):
def __init__(
self,
model: str = "gpt-4",
api_key: Optional[str] = None,
language: str = "en",
prompt: str = CONTEXT_RELEVANCY_PROMPT,
):
self.model = model
self.api_key = api_key
self.language = language
self.prompt = prompt
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/embedder/ollama.py | embedchain/embedchain/config/embedder/ollama.py | from typing import Optional
from embedchain.config.embedder.base import BaseEmbedderConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class OllamaEmbedderConfig(BaseEmbedderConfig):
def __init__(
self,
model: Optional[str] = None,
base_url: Optional[str] = None,
vector_dimension: Optional[int] = None,
):
super().__init__(model=model, vector_dimension=vector_dimension)
self.base_url = base_url or "http://localhost:11434"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/embedder/aws_bedrock.py | embedchain/embedchain/config/embedder/aws_bedrock.py | from typing import Any, Dict, Optional
from embedchain.config.embedder.base import BaseEmbedderConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class AWSBedrockEmbedderConfig(BaseEmbedderConfig):
def __init__(
self,
model: Optional[str] = None,
deployment_name: Optional[str] = None,
vector_dimension: Optional[int] = None,
task_type: Optional[str] = None,
title: Optional[str] = None,
model_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(model, deployment_name, vector_dimension)
self.task_type = task_type or "retrieval_document"
self.title = title or "Embeddings for Embedchain"
self.model_kwargs = model_kwargs or {}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/embedder/google.py | embedchain/embedchain/config/embedder/google.py | from typing import Optional
from embedchain.config.embedder.base import BaseEmbedderConfig
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class GoogleAIEmbedderConfig(BaseEmbedderConfig):
def __init__(
self,
model: Optional[str] = None,
deployment_name: Optional[str] = None,
vector_dimension: Optional[int] = None,
task_type: Optional[str] = None,
title: Optional[str] = None,
):
super().__init__(model, deployment_name, vector_dimension)
self.task_type = task_type or "retrieval_document"
self.title = title or "Embeddings for Embedchain"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/embedder/__init__.py | embedchain/embedchain/config/embedder/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/config/embedder/base.py | embedchain/embedchain/config/embedder/base.py | from typing import Any, Dict, Optional, Union
import httpx
from embedchain.helpers.json_serializable import register_deserializable
@register_deserializable
class BaseEmbedderConfig:
def __init__(
self,
model: Optional[str] = None,
deployment_name: Optional[str] = None,
vector_dimension: Optional[int] = None,
endpoint: Optional[str] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
model_kwargs: Optional[Dict[str, Any]] = None,
http_client_proxies: Optional[Union[Dict, str]] = None,
http_async_client_proxies: Optional[Union[Dict, str]] = None,
):
"""
Initialize a new instance of an embedder config class.
:param model: model name of the llm embedding model (not applicable to all providers), defaults to None
:type model: Optional[str], optional
:param deployment_name: deployment name for llm embedding model, defaults to None
:type deployment_name: Optional[str], optional
:param vector_dimension: vector dimension of the embedding model, defaults to None
:type vector_dimension: Optional[int], optional
:param endpoint: endpoint for the embedding model, defaults to None
:type endpoint: Optional[str], optional
:param api_key: hugginface api key, defaults to None
:type api_key: Optional[str], optional
:param api_base: huggingface api base, defaults to None
:type api_base: Optional[str], optional
:param model_kwargs: key-value arguments for the embedding model, defaults a dict inside init.
:type model_kwargs: Optional[Dict[str, Any]], defaults a dict inside init.
:param http_client_proxies: The proxy server settings used to create self.http_client, defaults to None
:type http_client_proxies: Optional[Dict | str], optional
:param http_async_client_proxies: The proxy server settings for async calls used to create
self.http_async_client, defaults to None
:type http_async_client_proxies: Optional[Dict | str], optional
"""
self.model = model
self.deployment_name = deployment_name
self.vector_dimension = vector_dimension
self.endpoint = endpoint
self.api_key = api_key
self.api_base = api_base
self.model_kwargs = model_kwargs or {}
self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None
self.http_async_client = (
httpx.AsyncClient(proxies=http_async_client_proxies) if http_async_client_proxies else None
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/deployment/gradio.app/app.py | embedchain/embedchain/deployment/gradio.app/app.py | import os
import gradio as gr
from embedchain import App
os.environ["OPENAI_API_KEY"] = "sk-xxx"
app = App()
def query(message, history):
return app.chat(message)
demo = gr.ChatInterface(query)
demo.launch()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/deployment/modal.com/app.py | embedchain/embedchain/deployment/modal.com/app.py | from dotenv import load_dotenv
from fastapi import Body, FastAPI, responses
from modal import Image, Secret, Stub, asgi_app
from embedchain import App
load_dotenv(".env")
image = Image.debian_slim().pip_install(
"embedchain",
"lanchain_community==0.2.6",
"youtube-transcript-api==0.6.1",
"pytube==15.0.0",
"beautifulsoup4==4.12.3",
"slack-sdk==3.21.3",
"huggingface_hub==0.23.0",
"gitpython==3.1.38",
"yt_dlp==2023.11.14",
"PyGithub==1.59.1",
"feedparser==6.0.10",
"newspaper3k==0.2.8",
"listparser==0.19",
)
stub = Stub(
name="embedchain-app",
image=image,
secrets=[Secret.from_dotenv(".env")],
)
web_app = FastAPI()
embedchain_app = App(name="embedchain-modal-app")
@web_app.post("/add")
async def add(
source: str = Body(..., description="Source to be added"),
data_type: str | None = Body(None, description="Type of the data source"),
):
"""
Adds a new source to the EmbedChain app.
Expects a JSON with a "source" and "data_type" key.
"data_type" is optional.
"""
if source and data_type:
embedchain_app.add(source, data_type)
elif source:
embedchain_app.add(source)
else:
return {"message": "No source provided."}
return {"message": f"Source '{source}' added successfully."}
@web_app.post("/query")
async def query(question: str = Body(..., description="Question to be answered")):
"""
Handles a query to the EmbedChain app.
Expects a JSON with a "question" key.
"""
if not question:
return {"message": "No question provided."}
answer = embedchain_app.query(question)
return {"answer": answer}
@web_app.get("/chat")
async def chat(question: str = Body(..., description="Question to be answered")):
"""
Handles a chat request to the EmbedChain app.
Expects a JSON with a "question" key.
"""
if not question:
return {"message": "No question provided."}
response = embedchain_app.chat(question)
return {"response": response}
@web_app.get("/")
async def root():
return responses.RedirectResponse(url="/docs")
@stub.function(image=image)
@asgi_app()
def fastapi_app():
return web_app
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/deployment/streamlit.io/app.py | embedchain/embedchain/deployment/streamlit.io/app.py | import streamlit as st
from embedchain import App
@st.cache_resource
def embedchain_bot():
return App()
st.title("💬 Chatbot")
st.caption("🚀 An Embedchain app powered by OpenAI!")
if "messages" not in st.session_state:
st.session_state.messages = [
{
"role": "assistant",
"content": """
Hi! I'm a chatbot. I can answer questions and learn new things!\n
Ask me anything and if you want me to learn something do `/add <source>`.\n
I can learn mostly everything. :)
""",
}
]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Ask me anything!"):
app = embedchain_bot()
if prompt.startswith("/add"):
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
prompt = prompt.replace("/add", "").strip()
with st.chat_message("assistant"):
message_placeholder = st.empty()
message_placeholder.markdown("Adding to knowledge base...")
app.add(prompt)
message_placeholder.markdown(f"Added {prompt} to knowledge base!")
st.session_state.messages.append({"role": "assistant", "content": f"Added {prompt} to knowledge base!"})
st.stop()
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("assistant"):
msg_placeholder = st.empty()
msg_placeholder.markdown("Thinking...")
full_response = ""
for response in app.chat(prompt):
msg_placeholder.empty()
full_response += response
msg_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/deployment/render.com/app.py | embedchain/embedchain/deployment/render.com/app.py | from fastapi import FastAPI, responses
from pydantic import BaseModel
from embedchain import App
app = FastAPI(title="Embedchain FastAPI App")
embedchain_app = App()
class SourceModel(BaseModel):
source: str
class QuestionModel(BaseModel):
question: str
@app.post("/add")
async def add_source(source_model: SourceModel):
"""
Adds a new source to the EmbedChain app.
Expects a JSON with a "source" key.
"""
source = source_model.source
embedchain_app.add(source)
return {"message": f"Source '{source}' added successfully."}
@app.post("/query")
async def handle_query(question_model: QuestionModel):
"""
Handles a query to the EmbedChain app.
Expects a JSON with a "question" key.
"""
question = question_model.question
answer = embedchain_app.query(question)
return {"answer": answer}
@app.post("/chat")
async def handle_chat(question_model: QuestionModel):
"""
Handles a chat request to the EmbedChain app.
Expects a JSON with a "question" key.
"""
question = question_model.question
response = embedchain_app.chat(question)
return {"response": response}
@app.get("/")
async def root():
return responses.RedirectResponse(url="/docs")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/deployment/fly.io/app.py | embedchain/embedchain/deployment/fly.io/app.py | from dotenv import load_dotenv
from fastapi import FastAPI, responses
from pydantic import BaseModel
from embedchain import App
load_dotenv(".env")
app = FastAPI(title="Embedchain FastAPI App")
embedchain_app = App()
class SourceModel(BaseModel):
source: str
class QuestionModel(BaseModel):
question: str
@app.post("/add")
async def add_source(source_model: SourceModel):
"""
Adds a new source to the EmbedChain app.
Expects a JSON with a "source" key.
"""
source = source_model.source
embedchain_app.add(source)
return {"message": f"Source '{source}' added successfully."}
@app.post("/query")
async def handle_query(question_model: QuestionModel):
"""
Handles a query to the EmbedChain app.
Expects a JSON with a "question" key.
"""
question = question_model.question
answer = embedchain_app.query(question)
return {"answer": answer}
@app.post("/chat")
async def handle_chat(question_model: QuestionModel):
"""
Handles a chat request to the EmbedChain app.
Expects a JSON with a "question" key.
"""
question = question_model.question
response = embedchain_app.chat(question)
return {"response": response}
@app.get("/")
async def root():
return responses.RedirectResponse(url="/docs")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/evaluation/__init__.py | embedchain/embedchain/evaluation/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/evaluation/base.py | embedchain/embedchain/evaluation/base.py | from abc import ABC, abstractmethod
from embedchain.utils.evaluation import EvalData
class BaseMetric(ABC):
"""Base class for a metric.
This class provides a common interface for all metrics.
"""
def __init__(self, name: str = "base_metric"):
"""
Initialize the BaseMetric.
"""
self.name = name
@abstractmethod
def evaluate(self, dataset: list[EvalData]):
"""
Abstract method to evaluate the dataset.
This method should be implemented by subclasses to perform the actual
evaluation on the dataset.
:param dataset: dataset to evaluate
:type dataset: list[EvalData]
"""
raise NotImplementedError()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/evaluation/metrics/answer_relevancy.py | embedchain/embedchain/evaluation/metrics/answer_relevancy.py | import concurrent.futures
import logging
import os
from string import Template
from typing import Optional
import numpy as np
from openai import OpenAI
from tqdm import tqdm
from embedchain.config.evaluation.base import AnswerRelevanceConfig
from embedchain.evaluation.base import BaseMetric
from embedchain.utils.evaluation import EvalData, EvalMetric
logger = logging.getLogger(__name__)
class AnswerRelevance(BaseMetric):
"""
Metric for evaluating the relevance of answers.
"""
def __init__(self, config: Optional[AnswerRelevanceConfig] = AnswerRelevanceConfig()):
super().__init__(name=EvalMetric.ANSWER_RELEVANCY.value)
self.config = config
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("API key not found. Set 'OPENAI_API_KEY' or pass it in the config.")
self.client = OpenAI(api_key=api_key)
def _generate_prompt(self, data: EvalData) -> str:
"""
Generates a prompt based on the provided data.
"""
return Template(self.config.prompt).substitute(
num_gen_questions=self.config.num_gen_questions, answer=data.answer
)
def _generate_questions(self, prompt: str) -> list[str]:
"""
Generates questions from the prompt.
"""
response = self.client.chat.completions.create(
model=self.config.model,
messages=[{"role": "user", "content": prompt}],
)
return response.choices[0].message.content.strip().split("\n")
def _generate_embedding(self, question: str) -> np.ndarray:
"""
Generates the embedding for a question.
"""
response = self.client.embeddings.create(
input=question,
model=self.config.embedder,
)
return np.array(response.data[0].embedding)
def _compute_similarity(self, original: np.ndarray, generated: np.ndarray) -> float:
"""
Computes the cosine similarity between two embeddings.
"""
original = original.reshape(1, -1)
norm = np.linalg.norm(original) * np.linalg.norm(generated, axis=1)
return np.dot(generated, original.T).flatten() / norm
def _compute_score(self, data: EvalData) -> float:
"""
Computes the relevance score for a given data item.
"""
prompt = self._generate_prompt(data)
generated_questions = self._generate_questions(prompt)
original_embedding = self._generate_embedding(data.question)
generated_embeddings = np.array([self._generate_embedding(q) for q in generated_questions])
similarities = self._compute_similarity(original_embedding, generated_embeddings)
return np.mean(similarities)
def evaluate(self, dataset: list[EvalData]) -> float:
"""
Evaluates the dataset and returns the average answer relevance score.
"""
results = []
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_data = {executor.submit(self._compute_score, data): data for data in dataset}
for future in tqdm(
concurrent.futures.as_completed(future_to_data), total=len(dataset), desc="Evaluating Answer Relevancy"
):
data = future_to_data[future]
try:
results.append(future.result())
except Exception as e:
logger.error(f"Error evaluating answer relevancy for {data}: {e}")
return np.mean(results) if results else 0.0
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/evaluation/metrics/groundedness.py | embedchain/embedchain/evaluation/metrics/groundedness.py | import concurrent.futures
import logging
import os
from string import Template
from typing import Optional
import numpy as np
from openai import OpenAI
from tqdm import tqdm
from embedchain.config.evaluation.base import GroundednessConfig
from embedchain.evaluation.base import BaseMetric
from embedchain.utils.evaluation import EvalData, EvalMetric
logger = logging.getLogger(__name__)
class Groundedness(BaseMetric):
"""
Metric for groundedness of answer from the given contexts.
"""
def __init__(self, config: Optional[GroundednessConfig] = None):
super().__init__(name=EvalMetric.GROUNDEDNESS.value)
self.config = config or GroundednessConfig()
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("Please set the OPENAI_API_KEY environment variable or pass the `api_key` in config.")
self.client = OpenAI(api_key=api_key)
def _generate_answer_claim_prompt(self, data: EvalData) -> str:
"""
Generate the prompt for the given data.
"""
prompt = Template(self.config.answer_claims_prompt).substitute(question=data.question, answer=data.answer)
return prompt
def _get_claim_statements(self, prompt: str) -> np.ndarray:
"""
Get claim statements from the answer.
"""
response = self.client.chat.completions.create(
model=self.config.model,
messages=[{"role": "user", "content": f"{prompt}"}],
)
result = response.choices[0].message.content.strip()
claim_statements = np.array([statement for statement in result.split("\n") if statement])
return claim_statements
def _generate_claim_inference_prompt(self, data: EvalData, claim_statements: list[str]) -> str:
"""
Generate the claim inference prompt for the given data and claim statements.
"""
prompt = Template(self.config.claims_inference_prompt).substitute(
context="\n".join(data.contexts), claim_statements="\n".join(claim_statements)
)
return prompt
def _get_claim_verdict_scores(self, prompt: str) -> np.ndarray:
"""
Get verdicts for claim statements.
"""
response = self.client.chat.completions.create(
model=self.config.model,
messages=[{"role": "user", "content": f"{prompt}"}],
)
result = response.choices[0].message.content.strip()
claim_verdicts = result.split("\n")
verdict_score_map = {"1": 1, "0": 0, "-1": np.nan}
verdict_scores = np.array([verdict_score_map[verdict] for verdict in claim_verdicts])
return verdict_scores
def _compute_score(self, data: EvalData) -> float:
"""
Compute the groundedness score for a single data point.
"""
answer_claims_prompt = self._generate_answer_claim_prompt(data)
claim_statements = self._get_claim_statements(answer_claims_prompt)
claim_inference_prompt = self._generate_claim_inference_prompt(data, claim_statements)
verdict_scores = self._get_claim_verdict_scores(claim_inference_prompt)
return np.sum(verdict_scores) / claim_statements.size
def evaluate(self, dataset: list[EvalData]):
"""
Evaluate the dataset and returns the average groundedness score.
"""
results = []
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_data = {executor.submit(self._compute_score, data): data for data in dataset}
for future in tqdm(
concurrent.futures.as_completed(future_to_data),
total=len(future_to_data),
desc="Evaluating Groundedness",
):
data = future_to_data[future]
try:
score = future.result()
results.append(score)
except Exception as e:
logger.error(f"Error while evaluating groundedness for data point {data}: {e}")
return np.mean(results) if results else 0.0
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/evaluation/metrics/__init__.py | embedchain/embedchain/evaluation/metrics/__init__.py | from .answer_relevancy import AnswerRelevance # noqa: F401
from .context_relevancy import ContextRelevance # noqa: F401
from .groundedness import Groundedness # noqa: F401
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/evaluation/metrics/context_relevancy.py | embedchain/embedchain/evaluation/metrics/context_relevancy.py | import concurrent.futures
import os
from string import Template
from typing import Optional
import numpy as np
import pysbd
from openai import OpenAI
from tqdm import tqdm
from embedchain.config.evaluation.base import ContextRelevanceConfig
from embedchain.evaluation.base import BaseMetric
from embedchain.utils.evaluation import EvalData, EvalMetric
class ContextRelevance(BaseMetric):
"""
Metric for evaluating the relevance of context in a dataset.
"""
def __init__(self, config: Optional[ContextRelevanceConfig] = ContextRelevanceConfig()):
super().__init__(name=EvalMetric.CONTEXT_RELEVANCY.value)
self.config = config
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("API key not found. Set 'OPENAI_API_KEY' or pass it in the config.")
self.client = OpenAI(api_key=api_key)
self._sbd = pysbd.Segmenter(language=self.config.language, clean=False)
def _sentence_segmenter(self, text: str) -> list[str]:
"""
Segments the given text into sentences.
"""
return self._sbd.segment(text)
def _compute_score(self, data: EvalData) -> float:
"""
Computes the context relevance score for a given data item.
"""
original_context = "\n".join(data.contexts)
prompt = Template(self.config.prompt).substitute(context=original_context, question=data.question)
response = self.client.chat.completions.create(
model=self.config.model, messages=[{"role": "user", "content": prompt}]
)
useful_context = response.choices[0].message.content.strip()
useful_context_sentences = self._sentence_segmenter(useful_context)
original_context_sentences = self._sentence_segmenter(original_context)
if not original_context_sentences:
return 0.0
return len(useful_context_sentences) / len(original_context_sentences)
def evaluate(self, dataset: list[EvalData]) -> float:
"""
Evaluates the dataset and returns the average context relevance score.
"""
scores = []
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(self._compute_score, data) for data in dataset]
for future in tqdm(
concurrent.futures.as_completed(futures), total=len(dataset), desc="Evaluating Context Relevancy"
):
try:
scores.append(future.result())
except Exception as e:
print(f"Error during evaluation: {e}")
return np.mean(scores) if scores else 0.0
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/clarifai.py | embedchain/embedchain/embedder/clarifai.py | import os
from typing import Optional, Union
from chromadb import EmbeddingFunction, Embeddings
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
class ClarifaiEmbeddingFunction(EmbeddingFunction):
def __init__(self, config: BaseEmbedderConfig) -> None:
super().__init__()
try:
from clarifai.client.input import Inputs
from clarifai.client.model import Model
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for ClarifaiEmbeddingFunction are not installed."
'Please install with `pip install --upgrade "embedchain[clarifai]"`'
) from None
self.config = config
self.api_key = config.api_key or os.getenv("CLARIFAI_PAT")
self.model = config.model
self.model_obj = Model(url=self.model, pat=self.api_key)
self.input_obj = Inputs(pat=self.api_key)
def __call__(self, input: Union[str, list[str]]) -> Embeddings:
if isinstance(input, str):
input = [input]
batch_size = 32
embeddings = []
try:
for i in range(0, len(input), batch_size):
batch = input[i : i + batch_size]
input_batch = [
self.input_obj.get_text_input(input_id=str(id), raw_text=inp) for id, inp in enumerate(batch)
]
response = self.model_obj.predict(input_batch)
embeddings.extend([list(output.data.embeddings[0].vector) for output in response.outputs])
except Exception as e:
print(f"Predict failed, exception: {e}")
return embeddings
class ClarifaiEmbedder(BaseEmbedder):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
embedding_func = ClarifaiEmbeddingFunction(config=self.config)
self.set_embedding_fn(embedding_fn=embedding_func)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/nvidia.py | embedchain/embedchain/embedder/nvidia.py | import logging
import os
from typing import Optional
from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
logger = logging.getLogger(__name__)
class NvidiaEmbedder(BaseEmbedder):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
if "NVIDIA_API_KEY" not in os.environ:
raise ValueError("NVIDIA_API_KEY environment variable must be set")
super().__init__(config=config)
model = self.config.model or "nvolveqa_40k"
logger.info(f"Using NVIDIA embedding model: {model}")
embedder = NVIDIAEmbeddings(model=model)
embedding_fn = BaseEmbedder._langchain_default_concept(embedder)
self.set_embedding_fn(embedding_fn=embedding_fn)
vector_dimension = self.config.vector_dimension or VectorDimensions.NVIDIA_AI.value
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/ollama.py | embedchain/embedchain/embedder/ollama.py | import logging
from typing import Optional
try:
from ollama import Client
except ImportError:
raise ImportError("Ollama Embedder requires extra dependencies. Install with `pip install ollama`") from None
from langchain_community.embeddings import OllamaEmbeddings
from embedchain.config import OllamaEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
logger = logging.getLogger(__name__)
class OllamaEmbedder(BaseEmbedder):
def __init__(self, config: Optional[OllamaEmbedderConfig] = None):
super().__init__(config=config)
client = Client(host=config.base_url)
local_models = client.list()["models"]
if not any(model.get("name") == self.config.model for model in local_models):
logger.info(f"Pulling {self.config.model} from Ollama!")
client.pull(self.config.model)
embeddings = OllamaEmbeddings(model=self.config.model, base_url=config.base_url)
embedding_fn = BaseEmbedder._langchain_default_concept(embeddings)
self.set_embedding_fn(embedding_fn=embedding_fn)
vector_dimension = self.config.vector_dimension or VectorDimensions.OLLAMA.value
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/aws_bedrock.py | embedchain/embedchain/embedder/aws_bedrock.py | from typing import Optional
try:
from langchain_aws import BedrockEmbeddings
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for AWSBedrock are not installed." "Please install with `pip install langchain_aws`"
) from None
from embedchain.config.embedder.aws_bedrock import AWSBedrockEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
class AWSBedrockEmbedder(BaseEmbedder):
def __init__(self, config: Optional[AWSBedrockEmbedderConfig] = None):
super().__init__(config)
if self.config.model is None or self.config.model == "amazon.titan-embed-text-v2:0":
self.config.model = "amazon.titan-embed-text-v2:0" # Default model if not specified
vector_dimension = self.config.vector_dimension or VectorDimensions.AMAZON_TITAN_V2.value
elif self.config.model == "amazon.titan-embed-text-v1":
vector_dimension = VectorDimensions.AMAZON_TITAN_V1.value
else:
vector_dimension = self.config.vector_dimension
embeddings = BedrockEmbeddings(model_id=self.config.model, model_kwargs=self.config.model_kwargs)
embedding_fn = BaseEmbedder._langchain_default_concept(embeddings)
self.set_embedding_fn(embedding_fn=embedding_fn)
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/google.py | embedchain/embedchain/embedder/google.py | from typing import Optional, Union
import google.generativeai as genai
from chromadb import EmbeddingFunction, Embeddings
from embedchain.config.embedder.google import GoogleAIEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
class GoogleAIEmbeddingFunction(EmbeddingFunction):
def __init__(self, config: Optional[GoogleAIEmbedderConfig] = None) -> None:
super().__init__()
self.config = config or GoogleAIEmbedderConfig()
def __call__(self, input: Union[list[str], str]) -> Embeddings:
model = self.config.model
title = self.config.title
task_type = self.config.task_type
if isinstance(input, str):
input_ = [input]
else:
input_ = input
data = genai.embed_content(model=model, content=input_, task_type=task_type, title=title)
embeddings = data["embedding"]
if isinstance(input_, str):
embeddings = [embeddings]
return embeddings
class GoogleAIEmbedder(BaseEmbedder):
def __init__(self, config: Optional[GoogleAIEmbedderConfig] = None):
super().__init__(config)
embedding_fn = GoogleAIEmbeddingFunction(config=config)
self.set_embedding_fn(embedding_fn=embedding_fn)
vector_dimension = self.config.vector_dimension or VectorDimensions.GOOGLE_AI.value
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/gpt4all.py | embedchain/embedchain/embedder/gpt4all.py | from typing import Optional
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
class GPT4AllEmbedder(BaseEmbedder):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config=config)
from langchain_community.embeddings import (
GPT4AllEmbeddings as LangchainGPT4AllEmbeddings,
)
model_name = self.config.model or "all-MiniLM-L6-v2-f16.gguf"
gpt4all_kwargs = {'allow_download': 'True'}
embeddings = LangchainGPT4AllEmbeddings(model_name=model_name, gpt4all_kwargs=gpt4all_kwargs)
embedding_fn = BaseEmbedder._langchain_default_concept(embeddings)
self.set_embedding_fn(embedding_fn=embedding_fn)
vector_dimension = self.config.vector_dimension or VectorDimensions.GPT4ALL.value
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/huggingface.py | embedchain/embedchain/embedder/huggingface.py | import os
from typing import Optional
from langchain_community.embeddings import HuggingFaceEmbeddings
try:
from langchain_huggingface import HuggingFaceEndpointEmbeddings
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for HuggingFaceHub are not installed."
"Please install with `pip install langchain_huggingface`"
) from None
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
class HuggingFaceEmbedder(BaseEmbedder):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config=config)
if self.config.endpoint:
if not self.config.api_key and "HUGGINGFACE_ACCESS_TOKEN" not in os.environ:
raise ValueError(
"Please set the HUGGINGFACE_ACCESS_TOKEN environment variable or pass API Key in the config."
)
embeddings = HuggingFaceEndpointEmbeddings(
model=self.config.endpoint,
huggingfacehub_api_token=self.config.api_key or os.getenv("HUGGINGFACE_ACCESS_TOKEN"),
)
else:
embeddings = HuggingFaceEmbeddings(model_name=self.config.model, model_kwargs=self.config.model_kwargs)
embedding_fn = BaseEmbedder._langchain_default_concept(embeddings)
self.set_embedding_fn(embedding_fn=embedding_fn)
vector_dimension = self.config.vector_dimension or VectorDimensions.HUGGING_FACE.value
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/cohere.py | embedchain/embedchain/embedder/cohere.py | from typing import Optional
from langchain_cohere.embeddings import CohereEmbeddings
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
class CohereEmbedder(BaseEmbedder):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config=config)
embeddings = CohereEmbeddings(model=self.config.model)
embedding_fn = BaseEmbedder._langchain_default_concept(embeddings)
self.set_embedding_fn(embedding_fn=embedding_fn)
vector_dimension = self.config.vector_dimension or VectorDimensions.COHERE.value
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/__init__.py | embedchain/embedchain/embedder/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/vertexai.py | embedchain/embedchain/embedder/vertexai.py | from typing import Optional
from langchain_google_vertexai import VertexAIEmbeddings
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
class VertexAIEmbedder(BaseEmbedder):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config=config)
embeddings = VertexAIEmbeddings(model_name=config.model)
embedding_fn = BaseEmbedder._langchain_default_concept(embeddings)
self.set_embedding_fn(embedding_fn=embedding_fn)
vector_dimension = self.config.vector_dimension or VectorDimensions.VERTEX_AI.value
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/azure_openai.py | embedchain/embedchain/embedder/azure_openai.py | from typing import Optional
from langchain_openai import AzureOpenAIEmbeddings
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
class AzureOpenAIEmbedder(BaseEmbedder):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config=config)
if self.config.model is None:
self.config.model = "text-embedding-ada-002"
embeddings = AzureOpenAIEmbeddings(
deployment=self.config.deployment_name,
http_client=self.config.http_client,
http_async_client=self.config.http_async_client,
)
embedding_fn = BaseEmbedder._langchain_default_concept(embeddings)
self.set_embedding_fn(embedding_fn=embedding_fn)
vector_dimension = self.config.vector_dimension or VectorDimensions.OPENAI.value
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/base.py | embedchain/embedchain/embedder/base.py | from collections.abc import Callable
from typing import Any, Optional
from embedchain.config.embedder.base import BaseEmbedderConfig
try:
from chromadb.api.types import Embeddable, EmbeddingFunction, Embeddings
except RuntimeError:
from embedchain.utils.misc import use_pysqlite3
use_pysqlite3()
from chromadb.api.types import Embeddable, EmbeddingFunction, Embeddings
class EmbeddingFunc(EmbeddingFunction):
def __init__(self, embedding_fn: Callable[[list[str]], list[str]]):
self.embedding_fn = embedding_fn
def __call__(self, input: Embeddable) -> Embeddings:
return self.embedding_fn(input)
class BaseEmbedder:
"""
Class that manages everything regarding embeddings. Including embedding function, loaders and chunkers.
Embedding functions and vector dimensions are set based on the child class you choose.
To manually overwrite you can use this classes `set_...` methods.
"""
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
"""
Initialize the embedder class.
:param config: embedder configuration option class, defaults to None
:type config: Optional[BaseEmbedderConfig], optional
"""
if config is None:
self.config = BaseEmbedderConfig()
else:
self.config = config
self.vector_dimension: int
def set_embedding_fn(self, embedding_fn: Callable[[list[str]], list[str]]):
"""
Set or overwrite the embedding function to be used by the database to store and retrieve documents.
:param embedding_fn: Function to be used to generate embeddings.
:type embedding_fn: Callable[[list[str]], list[str]]
:raises ValueError: Embedding function is not callable.
"""
if not hasattr(embedding_fn, "__call__"):
raise ValueError("Embedding function is not a function")
self.embedding_fn = embedding_fn
def set_vector_dimension(self, vector_dimension: int):
"""
Set or overwrite the vector dimension size
:param vector_dimension: vector dimension size
:type vector_dimension: int
"""
if not isinstance(vector_dimension, int):
raise TypeError("vector dimension must be int")
self.vector_dimension = vector_dimension
@staticmethod
def _langchain_default_concept(embeddings: Any):
"""
Langchains default function layout for embeddings.
:param embeddings: Langchain embeddings
:type embeddings: Any
:return: embedding function
:rtype: Callable
"""
return EmbeddingFunc(embeddings.embed_documents)
def to_embeddings(self, data: str, **_):
"""
Convert data to embeddings
:param data: data to convert to embeddings
:type data: str
:return: embeddings
:rtype: list[float]
"""
embeddings = self.embedding_fn([data])
return embeddings[0]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/mistralai.py | embedchain/embedchain/embedder/mistralai.py | import os
from typing import Optional, Union
from chromadb import EmbeddingFunction, Embeddings
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
class MistralAIEmbeddingFunction(EmbeddingFunction):
def __init__(self, config: BaseEmbedderConfig) -> None:
super().__init__()
try:
from langchain_mistralai import MistralAIEmbeddings
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The required dependencies for MistralAI are not installed."
'Please install with `pip install --upgrade "embedchain[mistralai]"`'
) from None
self.config = config
api_key = self.config.api_key or os.getenv("MISTRAL_API_KEY")
self.client = MistralAIEmbeddings(mistral_api_key=api_key)
self.client.model = self.config.model
def __call__(self, input: Union[list[str], str]) -> Embeddings:
if isinstance(input, str):
input_ = [input]
else:
input_ = input
response = self.client.embed_documents(input_)
return response
class MistralAIEmbedder(BaseEmbedder):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
if self.config.model is None:
self.config.model = "mistral-embed"
embedding_fn = MistralAIEmbeddingFunction(config=self.config)
self.set_embedding_fn(embedding_fn=embedding_fn)
vector_dimension = self.config.vector_dimension or VectorDimensions.MISTRAL_AI.value
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/embedder/openai.py | embedchain/embedchain/embedder/openai.py | import os
import warnings
from typing import Optional
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.models import VectorDimensions
class OpenAIEmbedder(BaseEmbedder):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config=config)
if self.config.model is None:
self.config.model = "text-embedding-ada-002"
api_key = self.config.api_key or os.environ["OPENAI_API_KEY"]
api_base = (
self.config.api_base
or os.environ.get("OPENAI_API_BASE")
or os.getenv("OPENAI_BASE_URL")
or "https://api.openai.com/v1"
)
if os.environ.get("OPENAI_API_BASE"):
warnings.warn(
"The environment variable 'OPENAI_API_BASE' is deprecated and will be removed in the 0.1.140. "
"Please use 'OPENAI_BASE_URL' instead.",
DeprecationWarning
)
if api_key is None and os.getenv("OPENAI_ORGANIZATION") is None:
raise ValueError("OPENAI_API_KEY or OPENAI_ORGANIZATION environment variables not provided") # noqa:E501
embedding_fn = OpenAIEmbeddingFunction(
api_key=api_key,
api_base=api_base,
organization_id=os.getenv("OPENAI_ORGANIZATION"),
model_name=self.config.model,
)
self.set_embedding_fn(embedding_fn=embedding_fn)
vector_dimension = self.config.vector_dimension or VectorDimensions.OPENAI.value
self.set_vector_dimension(vector_dimension=vector_dimension)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.