repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/telemetry/posthog.py | embedchain/embedchain/telemetry/posthog.py | import json
import logging
import os
import uuid
from posthog import Posthog
import embedchain
from embedchain.constants import CONFIG_DIR, CONFIG_FILE
class AnonymousTelemetry:
def __init__(self, host="https://app.posthog.com", enabled=True):
self.project_api_key = "phc_PHQDA5KwztijnSojsxJ2c1DuJd52QCzJzT2xnSGvjN2"
self.host = host
self.posthog = Posthog(project_api_key=self.project_api_key, host=self.host)
self.user_id = self._get_user_id()
self.enabled = enabled
# Check if telemetry tracking is disabled via environment variable
if "EC_TELEMETRY" in os.environ and os.environ["EC_TELEMETRY"].lower() not in [
"1",
"true",
"yes",
]:
self.enabled = False
if not self.enabled:
self.posthog.disabled = True
# Silence posthog logging
posthog_logger = logging.getLogger("posthog")
posthog_logger.disabled = True
@staticmethod
def _get_user_id():
os.makedirs(CONFIG_DIR, exist_ok=True)
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE, "r") as f:
data = json.load(f)
if "user_id" in data:
return data["user_id"]
user_id = str(uuid.uuid4())
with open(CONFIG_FILE, "w") as f:
json.dump({"user_id": user_id}, f)
return user_id
def capture(self, event_name, properties=None):
default_properties = {
"version": embedchain.__version__,
"language": "python",
"pid": os.getpid(),
}
properties.update(default_properties)
try:
self.posthog.capture(self.user_id, event_name, properties)
except Exception:
logging.exception(f"Failed to send telemetry {event_name=}")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/telemetry/__init__.py | embedchain/embedchain/telemetry/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/image.py | embedchain/embedchain/loaders/image.py | import base64
import hashlib
import os
from pathlib import Path
from openai import OpenAI
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
DESCRIBE_IMAGE_PROMPT = "Describe the image:"
@register_deserializable
class ImageLoader(BaseLoader):
def __init__(self, max_tokens: int = 500, api_key: str = None, prompt: str = None):
super().__init__()
self.custom_prompt = prompt or DESCRIBE_IMAGE_PROMPT
self.max_tokens = max_tokens
self.api_key = api_key or os.environ["OPENAI_API_KEY"]
self.client = OpenAI(api_key=self.api_key)
@staticmethod
def _encode_image(image_path: str):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def _create_completion_request(self, content: str):
return self.client.chat.completions.create(
model="gpt-4o", messages=[{"role": "user", "content": content}], max_tokens=self.max_tokens
)
def _process_url(self, url: str):
if url.startswith("http"):
return [{"type": "text", "text": self.custom_prompt}, {"type": "image_url", "image_url": {"url": url}}]
elif Path(url).is_file():
extension = Path(url).suffix.lstrip(".")
encoded_image = self._encode_image(url)
image_data = f"data:image/{extension};base64,{encoded_image}"
return [{"type": "text", "text": self.custom_prompt}, {"type": "image", "image_url": {"url": image_data}}]
else:
raise ValueError(f"Invalid URL or file path: {url}")
def load_data(self, url: str):
content = self._process_url(url)
response = self._create_completion_request(content)
content = response.choices[0].message.content
doc_id = hashlib.sha256((content + url).encode()).hexdigest()
return {"doc_id": doc_id, "data": [{"content": content, "meta_data": {"url": url, "type": "image"}}]}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/web_page.py | embedchain/embedchain/loaders/web_page.py | import hashlib
import logging
from typing import Any, Optional
import requests
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"Webpage requires extra dependencies. Install with `pip install beautifulsoup4==4.12.3`"
) from None
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
logger = logging.getLogger(__name__)
@register_deserializable
class WebPageLoader(BaseLoader):
# Shared session for all instances
_session = requests.Session()
def load_data(self, url, **kwargs: Optional[dict[str, Any]]):
"""Load data from a web page using a shared requests' session."""
all_references = False
for key, value in kwargs.items():
if key == "all_references":
all_references = kwargs["all_references"]
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36", # noqa:E501
}
response = self._session.get(url, headers=headers, timeout=30)
response.raise_for_status()
data = response.content
reference_links = self.fetch_reference_links(response)
if all_references:
for i in reference_links:
try:
response = self._session.get(i, headers=headers, timeout=30)
response.raise_for_status()
data += response.content
except Exception as e:
logging.error(f"Failed to add URL {url}: {e}")
continue
content = self._get_clean_content(data, url)
metadata = {"url": url}
doc_id = hashlib.sha256((content + url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": [
{
"content": content,
"meta_data": metadata,
}
],
}
@staticmethod
def _get_clean_content(html, url) -> str:
soup = BeautifulSoup(html, "html.parser")
original_size = len(str(soup.get_text()))
tags_to_exclude = [
"nav",
"aside",
"form",
"header",
"noscript",
"svg",
"canvas",
"footer",
"script",
"style",
]
for tag in soup(tags_to_exclude):
tag.decompose()
ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
for id_ in ids_to_exclude:
tags = soup.find_all(id=id_)
for tag in tags:
tag.decompose()
classes_to_exclude = [
"elementor-location-header",
"navbar-header",
"nav",
"header-sidebar-wrapper",
"blog-sidebar-wrapper",
"related-posts",
]
for class_name in classes_to_exclude:
tags = soup.find_all(class_=class_name)
for tag in tags:
tag.decompose()
content = soup.get_text()
content = clean_string(content)
cleaned_size = len(content)
if original_size != 0:
logger.info(
f"[{url}] Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
)
return content
@classmethod
def close_session(cls):
cls._session.close()
def fetch_reference_links(self, response):
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
a_tags = soup.find_all("a", href=True)
reference_links = [a["href"] for a in a_tags if a["href"].startswith("http")]
return reference_links
else:
print(f"Failed to retrieve the page. Status code: {response.status_code}")
return []
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/discord.py | embedchain/embedchain/loaders/discord.py | import hashlib
import logging
import os
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
logger = logging.getLogger(__name__)
@register_deserializable
class DiscordLoader(BaseLoader):
"""
Load data from a Discord Channel ID.
"""
def __init__(self):
if not os.environ.get("DISCORD_TOKEN"):
raise ValueError("DISCORD_TOKEN is not set")
self.token = os.environ.get("DISCORD_TOKEN")
@staticmethod
def _format_message(message):
return {
"message_id": message.id,
"content": message.content,
"author": {
"id": message.author.id,
"name": message.author.name,
"discriminator": message.author.discriminator,
},
"created_at": message.created_at.isoformat(),
"attachments": [
{
"id": attachment.id,
"filename": attachment.filename,
"size": attachment.size,
"url": attachment.url,
"proxy_url": attachment.proxy_url,
"height": attachment.height,
"width": attachment.width,
}
for attachment in message.attachments
],
"embeds": [
{
"title": embed.title,
"type": embed.type,
"description": embed.description,
"url": embed.url,
"timestamp": embed.timestamp.isoformat(),
"color": embed.color,
"footer": {
"text": embed.footer.text,
"icon_url": embed.footer.icon_url,
"proxy_icon_url": embed.footer.proxy_icon_url,
},
"image": {
"url": embed.image.url,
"proxy_url": embed.image.proxy_url,
"height": embed.image.height,
"width": embed.image.width,
},
"thumbnail": {
"url": embed.thumbnail.url,
"proxy_url": embed.thumbnail.proxy_url,
"height": embed.thumbnail.height,
"width": embed.thumbnail.width,
},
"video": {
"url": embed.video.url,
"height": embed.video.height,
"width": embed.video.width,
},
"provider": {
"name": embed.provider.name,
"url": embed.provider.url,
},
"author": {
"name": embed.author.name,
"url": embed.author.url,
"icon_url": embed.author.icon_url,
"proxy_icon_url": embed.author.proxy_icon_url,
},
"fields": [
{
"name": field.name,
"value": field.value,
"inline": field.inline,
}
for field in embed.fields
],
}
for embed in message.embeds
],
}
def load_data(self, channel_id: str):
"""Load data from a Discord Channel ID."""
import discord
messages = []
class DiscordClient(discord.Client):
async def on_ready(self) -> None:
logger.info("Logged on as {0}!".format(self.user))
try:
channel = self.get_channel(int(channel_id))
if not isinstance(channel, discord.TextChannel):
raise ValueError(
f"Channel {channel_id} is not a text channel. " "Only text channels are supported for now."
)
threads = {}
for thread in channel.threads:
threads[thread.id] = thread
async for message in channel.history(limit=None):
messages.append(DiscordLoader._format_message(message))
if message.id in threads:
async for thread_message in threads[message.id].history(limit=None):
messages.append(DiscordLoader._format_message(thread_message))
except Exception as e:
logger.error(e)
await self.close()
finally:
await self.close()
intents = discord.Intents.default()
intents.message_content = True
client = DiscordClient(intents=intents)
client.run(self.token)
metadata = {
"url": channel_id,
}
messages = str(messages)
doc_id = hashlib.sha256((messages + channel_id).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": [
{
"content": messages,
"meta_data": metadata,
}
],
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/sitemap.py | embedchain/embedchain/loaders/sitemap.py | import concurrent.futures
import hashlib
import logging
import os
from urllib.parse import urlparse
import requests
from tqdm import tqdm
try:
from bs4 import BeautifulSoup
from bs4.builder import ParserRejectedMarkup
except ImportError:
raise ImportError(
"Sitemap requires extra dependencies. Install with `pip install beautifulsoup4==4.12.3`"
) from None
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.loaders.web_page import WebPageLoader
logger = logging.getLogger(__name__)
@register_deserializable
class SitemapLoader(BaseLoader):
"""
This method takes a sitemap URL or local file path as input and retrieves
all the URLs to use the WebPageLoader to load content
of each page.
"""
def load_data(self, sitemap_source):
output = []
web_page_loader = WebPageLoader()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36", # noqa:E501
}
if urlparse(sitemap_source).scheme in ("http", "https"):
try:
response = requests.get(sitemap_source, headers=headers)
response.raise_for_status()
soup = BeautifulSoup(response.text, "xml")
except requests.RequestException as e:
logger.error(f"Error fetching sitemap from URL: {e}")
return
elif os.path.isfile(sitemap_source):
with open(sitemap_source, "r") as file:
soup = BeautifulSoup(file, "xml")
else:
raise ValueError("Invalid sitemap source. Please provide a valid URL or local file path.")
links = [link.text for link in soup.find_all("loc") if link.parent.name == "url"]
if len(links) == 0:
links = [link.text for link in soup.find_all("loc")]
doc_id = hashlib.sha256((" ".join(links) + sitemap_source).encode()).hexdigest()
def load_web_page(link):
try:
loader_data = web_page_loader.load_data(link)
return loader_data.get("data")
except ParserRejectedMarkup as e:
logger.error(f"Failed to parse {link}: {e}")
return None
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_link = {executor.submit(load_web_page, link): link for link in links}
for future in tqdm(concurrent.futures.as_completed(future_to_link), total=len(links), desc="Loading pages"):
link = future_to_link[future]
try:
data = future.result()
if data:
output.extend(data)
except Exception as e:
logger.error(f"Error loading page {link}: {e}")
return {"doc_id": doc_id, "data": output}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/docs_site_loader.py | embedchain/embedchain/loaders/docs_site_loader.py | import hashlib
import logging
from urllib.parse import urljoin, urlparse
import requests
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"DocsSite requires extra dependencies. Install with `pip install beautifulsoup4==4.12.3`"
) from None
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
logger = logging.getLogger(__name__)
@register_deserializable
class DocsSiteLoader(BaseLoader):
def __init__(self):
self.visited_links = set()
def _get_child_links_recursive(self, url):
if url in self.visited_links:
return
parsed_url = urlparse(url)
base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
current_path = parsed_url.path
response = requests.get(url)
if response.status_code != 200:
logger.info(f"Failed to fetch the website: {response.status_code}")
return
soup = BeautifulSoup(response.text, "html.parser")
all_links = (link.get("href") for link in soup.find_all("a", href=True))
child_links = (link for link in all_links if link.startswith(current_path) and link != current_path)
absolute_paths = set(urljoin(base_url, link) for link in child_links)
self.visited_links.update(absolute_paths)
[self._get_child_links_recursive(link) for link in absolute_paths if link not in self.visited_links]
def _get_all_urls(self, url):
self.visited_links = set()
self._get_child_links_recursive(url)
urls = [link for link in self.visited_links if urlparse(link).netloc == urlparse(url).netloc]
return urls
@staticmethod
def _load_data_from_url(url: str) -> list:
response = requests.get(url)
if response.status_code != 200:
logger.info(f"Failed to fetch the website: {response.status_code}")
return []
soup = BeautifulSoup(response.content, "html.parser")
selectors = [
"article.bd-article",
'article[role="main"]',
"div.md-content",
'div[role="main"]',
"div.container",
"div.section",
"article",
"main",
]
output = []
for selector in selectors:
element = soup.select_one(selector)
if element:
content = element.prettify()
break
else:
content = soup.get_text()
soup = BeautifulSoup(content, "html.parser")
ignored_tags = [
"nav",
"aside",
"form",
"header",
"noscript",
"svg",
"canvas",
"footer",
"script",
"style",
]
for tag in soup(ignored_tags):
tag.decompose()
content = " ".join(soup.stripped_strings)
output.append(
{
"content": content,
"meta_data": {"url": url},
}
)
return output
def load_data(self, url):
all_urls = self._get_all_urls(url)
output = []
for u in all_urls:
output.extend(self._load_data_from_url(u))
doc_id = hashlib.sha256((" ".join(all_urls) + url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": output,
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/pdf_file.py | embedchain/embedchain/loaders/pdf_file.py | import hashlib
from langchain_community.document_loaders import PyPDFLoader
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
@register_deserializable
class PdfFileLoader(BaseLoader):
def load_data(self, url):
"""Load data from a PDF file."""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36", # noqa:E501
}
loader = PyPDFLoader(url, headers=headers)
data = []
all_content = []
pages = loader.load_and_split()
if not len(pages):
raise ValueError("No data found")
for page in pages:
content = page.page_content
content = clean_string(content)
metadata = page.metadata
metadata["url"] = url
data.append(
{
"content": content,
"meta_data": metadata,
}
)
all_content.append(content)
doc_id = hashlib.sha256((" ".join(all_content) + url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": data,
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/substack.py | embedchain/embedchain/loaders/substack.py | import hashlib
import logging
import time
from xml.etree import ElementTree
import requests
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import is_readable
logger = logging.getLogger(__name__)
@register_deserializable
class SubstackLoader(BaseLoader):
"""
This loader is used to load data from Substack URLs.
"""
def load_data(self, url: str):
try:
from bs4 import BeautifulSoup
from bs4.builder import ParserRejectedMarkup
except ImportError:
raise ImportError(
"Substack requires extra dependencies. Install with `pip install beautifulsoup4==4.12.3`"
) from None
if not url.endswith("sitemap.xml"):
url = url + "/sitemap.xml"
output = []
response = requests.get(url)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise ValueError(
f"""
Failed to load {url}: {e}. Please use the root substack URL. For example, https://example.substack.com
"""
)
try:
ElementTree.fromstring(response.content)
except ElementTree.ParseError:
raise ValueError(
f"""
Failed to parse {url}. Please use the root substack URL. For example, https://example.substack.com
"""
)
soup = BeautifulSoup(response.text, "xml")
links = [link.text for link in soup.find_all("loc") if link.parent.name == "url" and "/p/" in link.text]
if len(links) == 0:
links = [link.text for link in soup.find_all("loc") if "/p/" in link.text]
doc_id = hashlib.sha256((" ".join(links) + url).encode()).hexdigest()
def serialize_response(soup: BeautifulSoup):
data = {}
h1_els = soup.find_all("h1")
if h1_els is not None and len(h1_els) > 0:
data["title"] = h1_els[1].text
description_el = soup.find("meta", {"name": "description"})
if description_el is not None:
data["description"] = description_el["content"]
content_el = soup.find("div", {"class": "available-content"})
if content_el is not None:
data["content"] = content_el.text
like_btn = soup.find("div", {"class": "like-button-container"})
if like_btn is not None:
no_of_likes_div = like_btn.find("div", {"class": "label"})
if no_of_likes_div is not None:
data["no_of_likes"] = no_of_likes_div.text
return data
def load_link(link: str):
try:
substack_data = requests.get(link)
substack_data.raise_for_status()
soup = BeautifulSoup(substack_data.text, "html.parser")
data = serialize_response(soup)
data = str(data)
if is_readable(data):
return data
else:
logger.warning(f"Page is not readable (too many invalid characters): {link}")
except ParserRejectedMarkup as e:
logger.error(f"Failed to parse {link}: {e}")
return None
for link in links:
data = load_link(link)
if data:
output.append({"content": data, "meta_data": {"url": link}})
# TODO: allow users to configure this
time.sleep(1.0) # added to avoid rate limiting
return {"doc_id": doc_id, "data": output}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/audio.py | embedchain/embedchain/loaders/audio.py | import hashlib
import os
import validators
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
try:
from deepgram import DeepgramClient, PrerecordedOptions
except ImportError:
raise ImportError(
"Audio file requires extra dependencies. Install with `pip install deepgram-sdk==3.2.7`"
) from None
@register_deserializable
class AudioLoader(BaseLoader):
def __init__(self):
if not os.environ.get("DEEPGRAM_API_KEY"):
raise ValueError("DEEPGRAM_API_KEY is not set")
DG_KEY = os.environ.get("DEEPGRAM_API_KEY")
self.client = DeepgramClient(DG_KEY)
def load_data(self, url: str):
"""Load data from a audio file or URL."""
options = PrerecordedOptions(
model="nova-2",
smart_format=True,
)
if validators.url(url):
source = {"url": url}
response = self.client.listen.prerecorded.v("1").transcribe_url(source, options)
else:
with open(url, "rb") as audio:
source = {"buffer": audio}
response = self.client.listen.prerecorded.v("1").transcribe_file(source, options)
content = response["results"]["channels"][0]["alternatives"][0]["transcript"]
doc_id = hashlib.sha256((content + url).encode()).hexdigest()
metadata = {"url": url}
return {
"doc_id": doc_id,
"data": [
{
"content": content,
"meta_data": metadata,
}
],
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/excel_file.py | embedchain/embedchain/loaders/excel_file.py | import hashlib
import importlib.util
try:
import unstructured # noqa: F401
from langchain_community.document_loaders import UnstructuredExcelLoader
except ImportError:
raise ImportError(
'Excel file requires extra dependencies. Install with `pip install "unstructured[local-inference, all-docs]"`'
) from None
if importlib.util.find_spec("openpyxl") is None and importlib.util.find_spec("xlrd") is None:
raise ImportError("Excel file requires extra dependencies. Install with `pip install openpyxl xlrd`") from None
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
@register_deserializable
class ExcelFileLoader(BaseLoader):
def load_data(self, excel_url):
"""Load data from a Excel file."""
loader = UnstructuredExcelLoader(excel_url)
pages = loader.load_and_split()
data = []
for page in pages:
content = page.page_content
content = clean_string(content)
metadata = page.metadata
metadata["url"] = excel_url
data.append({"content": content, "meta_data": metadata})
doc_id = hashlib.sha256((content + excel_url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": data,
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/base_loader.py | embedchain/embedchain/loaders/base_loader.py | from typing import Any, Optional
from embedchain.helpers.json_serializable import JSONSerializable
class BaseLoader(JSONSerializable):
def __init__(self):
pass
def load_data(self, url, **kwargs: Optional[dict[str, Any]]):
"""
Implemented by child classes
"""
pass
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/xml.py | embedchain/embedchain/loaders/xml.py | import hashlib
try:
import unstructured # noqa: F401
from langchain_community.document_loaders import UnstructuredXMLLoader
except ImportError:
raise ImportError(
'XML file requires extra dependencies. Install with `pip install "unstructured[local-inference, all-docs]"`'
) from None
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
@register_deserializable
class XmlLoader(BaseLoader):
def load_data(self, xml_url):
"""Load data from a XML file."""
loader = UnstructuredXMLLoader(xml_url)
data = loader.load()
content = data[0].page_content
content = clean_string(content)
metadata = data[0].metadata
metadata["url"] = metadata["source"]
del metadata["source"]
output = [{"content": content, "meta_data": metadata}]
doc_id = hashlib.sha256((content + xml_url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": output,
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/youtube_video.py | embedchain/embedchain/loaders/youtube_video.py | import hashlib
import json
import logging
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError("YouTube video requires extra dependencies. Install with `pip install youtube-transcript-api`")
try:
from langchain_community.document_loaders import YoutubeLoader
from langchain_community.document_loaders.youtube import _parse_video_id
except ImportError:
raise ImportError("YouTube video requires extra dependencies. Install with `pip install pytube==15.0.0`") from None
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
@register_deserializable
class YoutubeVideoLoader(BaseLoader):
def load_data(self, url):
"""Load data from a Youtube video."""
video_id = _parse_video_id(url)
languages = ["en"]
try:
# Fetching transcript data
languages = [transcript.language_code for transcript in YouTubeTranscriptApi.list_transcripts(video_id)]
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=languages)
# convert transcript to json to avoid unicode symboles
transcript = json.dumps(transcript, ensure_ascii=True)
except Exception:
logging.exception(f"Failed to fetch transcript for video {url}")
transcript = "Unavailable"
loader = YoutubeLoader.from_youtube_url(url, add_video_info=True, language=languages)
doc = loader.load()
output = []
if not len(doc):
raise ValueError(f"No data found for url: {url}")
content = doc[0].page_content
content = clean_string(content)
metadata = doc[0].metadata
metadata["url"] = url
metadata["transcript"] = transcript
output.append(
{
"content": content,
"meta_data": metadata,
}
)
doc_id = hashlib.sha256((content + url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": output,
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/unstructured_file.py | embedchain/embedchain/loaders/unstructured_file.py | import hashlib
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
@register_deserializable
class UnstructuredLoader(BaseLoader):
def load_data(self, url):
"""Load data from an Unstructured file."""
try:
import unstructured # noqa: F401
from langchain_community.document_loaders import UnstructuredFileLoader
except ImportError:
raise ImportError(
'Unstructured file requires extra dependencies. Install with `pip install "unstructured[local-inference, all-docs]"`' # noqa: E501
) from None
loader = UnstructuredFileLoader(url)
data = []
all_content = []
pages = loader.load_and_split()
if not len(pages):
raise ValueError("No data found")
for page in pages:
content = page.page_content
content = clean_string(content)
metadata = page.metadata
metadata["url"] = url
data.append(
{
"content": content,
"meta_data": metadata,
}
)
all_content.append(content)
doc_id = hashlib.sha256((" ".join(all_content) + url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": data,
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/mysql.py | embedchain/embedchain/loaders/mysql.py | import hashlib
import logging
from typing import Any, Optional
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
logger = logging.getLogger(__name__)
class MySQLLoader(BaseLoader):
def __init__(self, config: Optional[dict[str, Any]]):
super().__init__()
if not config:
raise ValueError(
f"Invalid sql config: {config}.",
"Provide the correct config, refer `https://docs.embedchain.ai/data-sources/mysql`.",
)
self.config = config
self.connection = None
self.cursor = None
self._setup_loader(config=config)
def _setup_loader(self, config: dict[str, Any]):
try:
import mysql.connector as sqlconnector
except ImportError as e:
raise ImportError(
"Unable to import required packages for MySQL loader. Run `pip install --upgrade 'embedchain[mysql]'`." # noqa: E501
) from e
try:
self.connection = sqlconnector.connection.MySQLConnection(**config)
self.cursor = self.connection.cursor()
except (sqlconnector.Error, IOError) as err:
logger.info(f"Connection failed: {err}")
raise ValueError(
f"Unable to connect with the given config: {config}.",
"Please provide the correct configuration to load data from you MySQL DB. \
Refer `https://docs.embedchain.ai/data-sources/mysql`.",
)
@staticmethod
def _check_query(query):
if not isinstance(query, str):
raise ValueError(
f"Invalid mysql query: {query}",
"Provide the valid query to add from mysql, \
make sure you are following `https://docs.embedchain.ai/data-sources/mysql`",
)
def load_data(self, query):
self._check_query(query=query)
data = []
data_content = []
self.cursor.execute(query)
rows = self.cursor.fetchall()
for row in rows:
doc_content = clean_string(str(row))
data.append({"content": doc_content, "meta_data": {"url": query}})
data_content.append(doc_content)
doc_id = hashlib.sha256((query + ", ".join(data_content)).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": data,
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/postgres.py | embedchain/embedchain/loaders/postgres.py | import hashlib
import logging
from typing import Any, Optional
from embedchain.loaders.base_loader import BaseLoader
logger = logging.getLogger(__name__)
class PostgresLoader(BaseLoader):
def __init__(self, config: Optional[dict[str, Any]] = None):
super().__init__()
if not config:
raise ValueError(f"Must provide the valid config. Received: {config}")
self.connection = None
self.cursor = None
self._setup_loader(config=config)
def _setup_loader(self, config: dict[str, Any]):
try:
import psycopg
except ImportError as e:
raise ImportError(
"Unable to import required packages. \
Run `pip install --upgrade 'embedchain[postgres]'`"
) from e
if "url" in config:
config_info = config.get("url")
else:
conn_params = []
for key, value in config.items():
conn_params.append(f"{key}={value}")
config_info = " ".join(conn_params)
logger.info(f"Connecting to postrgres sql: {config_info}")
self.connection = psycopg.connect(conninfo=config_info)
self.cursor = self.connection.cursor()
@staticmethod
def _check_query(query):
if not isinstance(query, str):
raise ValueError(
f"Invalid postgres query: {query}. Provide the valid source to add from postgres, make sure you are following `https://docs.embedchain.ai/data-sources/postgres`", # noqa:E501
)
def load_data(self, query):
self._check_query(query)
try:
data = []
data_content = []
self.cursor.execute(query)
results = self.cursor.fetchall()
for result in results:
doc_content = str(result)
data.append({"content": doc_content, "meta_data": {"url": query}})
data_content.append(doc_content)
doc_id = hashlib.sha256((query + ", ".join(data_content)).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": data,
}
except Exception as e:
raise ValueError(f"Failed to load data using query={query} with: {e}")
def close_connection(self):
if self.cursor:
self.cursor.close()
self.cursor = None
if self.connection:
self.connection.close()
self.connection = None
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/docx_file.py | embedchain/embedchain/loaders/docx_file.py | import hashlib
try:
from langchain_community.document_loaders import Docx2txtLoader
except ImportError:
raise ImportError("Docx file requires extra dependencies. Install with `pip install docx2txt==0.8`") from None
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
@register_deserializable
class DocxFileLoader(BaseLoader):
def load_data(self, url):
"""Load data from a .docx file."""
loader = Docx2txtLoader(url)
output = []
data = loader.load()
content = data[0].page_content
metadata = data[0].metadata
metadata["url"] = "local"
output.append({"content": content, "meta_data": metadata})
doc_id = hashlib.sha256((content + url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": output,
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/beehiiv.py | embedchain/embedchain/loaders/beehiiv.py | import hashlib
import logging
import time
from xml.etree import ElementTree
import requests
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import is_readable
logger = logging.getLogger(__name__)
@register_deserializable
class BeehiivLoader(BaseLoader):
"""
This loader is used to load data from Beehiiv URLs.
"""
def load_data(self, url: str):
try:
from bs4 import BeautifulSoup
from bs4.builder import ParserRejectedMarkup
except ImportError:
raise ImportError(
"Beehiiv requires extra dependencies. Install with `pip install beautifulsoup4==4.12.3`"
) from None
if not url.endswith("sitemap.xml"):
url = url + "/sitemap.xml"
output = []
# we need to set this as a header to avoid 403
headers = {
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 "
"Safari/537.36"
),
}
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise ValueError(
f"""
Failed to load {url}: {e}. Please use the root substack URL. For example, https://example.substack.com
"""
)
try:
ElementTree.fromstring(response.content)
except ElementTree.ParseError:
raise ValueError(
f"""
Failed to parse {url}. Please use the root substack URL. For example, https://example.substack.com
"""
)
soup = BeautifulSoup(response.text, "xml")
links = [link.text for link in soup.find_all("loc") if link.parent.name == "url" and "/p/" in link.text]
if len(links) == 0:
links = [link.text for link in soup.find_all("loc") if "/p/" in link.text]
doc_id = hashlib.sha256((" ".join(links) + url).encode()).hexdigest()
def serialize_response(soup: BeautifulSoup):
data = {}
h1_el = soup.find("h1")
if h1_el is not None:
data["title"] = h1_el.text
description_el = soup.find("meta", {"name": "description"})
if description_el is not None:
data["description"] = description_el["content"]
content_el = soup.find("div", {"id": "content-blocks"})
if content_el is not None:
data["content"] = content_el.text
return data
def load_link(link: str):
try:
beehiiv_data = requests.get(link, headers=headers)
beehiiv_data.raise_for_status()
soup = BeautifulSoup(beehiiv_data.text, "html.parser")
data = serialize_response(soup)
data = str(data)
if is_readable(data):
return data
else:
logger.warning(f"Page is not readable (too many invalid characters): {link}")
except ParserRejectedMarkup as e:
logger.error(f"Failed to parse {link}: {e}")
return None
for link in links:
data = load_link(link)
if data:
output.append({"content": data, "meta_data": {"url": link}})
# TODO: allow users to configure this
time.sleep(1.0) # added to avoid rate limiting
return {"doc_id": doc_id, "data": output}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/mdx.py | embedchain/embedchain/loaders/mdx.py | import hashlib
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
@register_deserializable
class MdxLoader(BaseLoader):
def load_data(self, url):
"""Load data from a mdx file."""
with open(url, "r", encoding="utf-8") as infile:
content = infile.read()
metadata = {
"url": url,
}
doc_id = hashlib.sha256((content + url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": [
{
"content": content,
"meta_data": metadata,
}
],
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/discourse.py | embedchain/embedchain/loaders/discourse.py | import hashlib
import logging
import time
from typing import Any, Optional
import requests
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
logger = logging.getLogger(__name__)
class DiscourseLoader(BaseLoader):
def __init__(self, config: Optional[dict[str, Any]] = None):
super().__init__()
if not config:
raise ValueError(
"DiscourseLoader requires a config. Check the documentation for the correct format - `https://docs.embedchain.ai/components/data-sources/discourse`" # noqa: E501
)
self.domain = config.get("domain")
if not self.domain:
raise ValueError(
"DiscourseLoader requires a domain. Check the documentation for the correct format - `https://docs.embedchain.ai/components/data-sources/discourse`" # noqa: E501
)
def _check_query(self, query):
if not query or not isinstance(query, str):
raise ValueError(
"DiscourseLoader requires a query. Check the documentation for the correct format - `https://docs.embedchain.ai/components/data-sources/discourse`" # noqa: E501
)
def _load_post(self, post_id):
post_url = f"{self.domain}posts/{post_id}.json"
response = requests.get(post_url)
try:
response.raise_for_status()
except Exception as e:
logger.error(f"Failed to load post {post_id}: {e}")
return
response_data = response.json()
post_contents = clean_string(response_data.get("raw"))
metadata = {
"url": post_url,
"created_at": response_data.get("created_at", ""),
"username": response_data.get("username", ""),
"topic_slug": response_data.get("topic_slug", ""),
"score": response_data.get("score", ""),
}
data = {
"content": post_contents,
"meta_data": metadata,
}
return data
def load_data(self, query):
self._check_query(query)
data = []
data_contents = []
logger.info(f"Searching data on discourse url: {self.domain}, for query: {query}")
search_url = f"{self.domain}search.json?q={query}"
response = requests.get(search_url)
try:
response.raise_for_status()
except Exception as e:
raise ValueError(f"Failed to search query {query}: {e}")
response_data = response.json()
post_ids = response_data.get("grouped_search_result").get("post_ids")
for id in post_ids:
post_data = self._load_post(id)
if post_data:
data.append(post_data)
data_contents.append(post_data.get("content"))
# Sleep for 0.4 sec, to avoid rate limiting. Check `https://meta.discourse.org/t/api-rate-limits/208405/6`
time.sleep(0.4)
doc_id = hashlib.sha256((query + ", ".join(data_contents)).encode()).hexdigest()
response_data = {"doc_id": doc_id, "data": data}
return response_data
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/youtube_channel.py | embedchain/embedchain/loaders/youtube_channel.py | import concurrent.futures
import hashlib
import logging
from tqdm import tqdm
from embedchain.loaders.base_loader import BaseLoader
from embedchain.loaders.youtube_video import YoutubeVideoLoader
logger = logging.getLogger(__name__)
class YoutubeChannelLoader(BaseLoader):
"""Loader for youtube channel."""
def load_data(self, channel_name):
try:
import yt_dlp
except ImportError as e:
raise ValueError(
"YoutubeChannelLoader requires extra dependencies. Install with `pip install yt_dlp==2023.11.14 youtube-transcript-api==0.6.1`" # noqa: E501
) from e
data = []
data_urls = []
youtube_url = f"https://www.youtube.com/{channel_name}/videos"
youtube_video_loader = YoutubeVideoLoader()
def _get_yt_video_links():
try:
ydl_opts = {
"quiet": True,
"extract_flat": True,
}
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(youtube_url, download=False)
if "entries" in info_dict:
videos = [entry["url"] for entry in info_dict["entries"]]
return videos
except Exception:
logger.error(f"Failed to fetch youtube videos for channel: {channel_name}")
return []
def _load_yt_video(video_link):
try:
each_load_data = youtube_video_loader.load_data(video_link)
if each_load_data:
return each_load_data.get("data")
except Exception as e:
logger.error(f"Failed to load youtube video {video_link}: {e}")
return None
def _add_youtube_channel():
video_links = _get_yt_video_links()
logger.info("Loading videos from youtube channel...")
with concurrent.futures.ThreadPoolExecutor() as executor:
# Submitting all tasks and storing the future object with the video link
future_to_video = {
executor.submit(_load_yt_video, video_link): video_link for video_link in video_links
}
for future in tqdm(
concurrent.futures.as_completed(future_to_video), total=len(video_links), desc="Processing videos"
):
video = future_to_video[future]
try:
results = future.result()
if results:
data.extend(results)
data_urls.extend([result.get("meta_data").get("url") for result in results])
except Exception as e:
logger.error(f"Failed to process youtube video {video}: {e}")
_add_youtube_channel()
doc_id = hashlib.sha256((youtube_url + ", ".join(data_urls)).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": data,
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/__init__.py | embedchain/embedchain/loaders/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/rss_feed.py | embedchain/embedchain/loaders/rss_feed.py | import hashlib
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
@register_deserializable
class RSSFeedLoader(BaseLoader):
"""Loader for RSS Feed."""
def load_data(self, url):
"""Load data from a rss feed."""
output = self.get_rss_content(url)
doc_id = hashlib.sha256((str(output) + url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": output,
}
@staticmethod
def serialize_metadata(metadata):
for key, value in metadata.items():
if not isinstance(value, (str, int, float, bool)):
metadata[key] = str(value)
return metadata
@staticmethod
def get_rss_content(url: str):
try:
from langchain_community.document_loaders import (
RSSFeedLoader as LangchainRSSFeedLoader,
)
except ImportError:
raise ImportError(
"""RSSFeedLoader file requires extra dependencies.
Install with `pip install feedparser==6.0.10 newspaper3k==0.2.8 listparser==0.19`"""
) from None
output = []
loader = LangchainRSSFeedLoader(urls=[url])
data = loader.load()
for entry in data:
metadata = RSSFeedLoader.serialize_metadata(entry.metadata)
metadata.update({"url": url})
output.append(
{
"content": entry.page_content,
"meta_data": metadata,
}
)
return output
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/slack.py | embedchain/embedchain/loaders/slack.py | import hashlib
import logging
import os
import ssl
from typing import Any, Optional
import certifi
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
SLACK_API_BASE_URL = "https://www.slack.com/api/"
logger = logging.getLogger(__name__)
class SlackLoader(BaseLoader):
def __init__(self, config: Optional[dict[str, Any]] = None):
super().__init__()
self.config = config if config else {}
if "base_url" not in self.config:
self.config["base_url"] = SLACK_API_BASE_URL
self.client = None
self._setup_loader(self.config)
def _setup_loader(self, config: dict[str, Any]):
try:
from slack_sdk import WebClient
except ImportError as e:
raise ImportError(
"Slack loader requires extra dependencies. \
Install with `pip install --upgrade embedchain[slack]`"
) from e
if os.getenv("SLACK_USER_TOKEN") is None:
raise ValueError(
"SLACK_USER_TOKEN environment variables not provided. Check `https://docs.embedchain.ai/data-sources/slack` to learn more." # noqa:E501
)
logger.info(f"Creating Slack Loader with config: {config}")
# get slack client config params
slack_bot_token = os.getenv("SLACK_USER_TOKEN")
ssl_cert = ssl.create_default_context(cafile=certifi.where())
base_url = config.get("base_url", SLACK_API_BASE_URL)
headers = config.get("headers")
# for Org-Wide App
team_id = config.get("team_id")
self.client = WebClient(
token=slack_bot_token,
base_url=base_url,
ssl=ssl_cert,
headers=headers,
team_id=team_id,
)
logger.info("Slack Loader setup successful!")
@staticmethod
def _check_query(query):
if not isinstance(query, str):
raise ValueError(
f"Invalid query passed to Slack loader, found: {query}. Check `https://docs.embedchain.ai/data-sources/slack` to learn more." # noqa:E501
)
def load_data(self, query):
self._check_query(query)
try:
data = []
data_content = []
logger.info(f"Searching slack conversations for query: {query}")
results = self.client.search_messages(
query=query,
sort="timestamp",
sort_dir="desc",
count=self.config.get("count", 100),
)
messages = results.get("messages")
num_message = len(messages)
logger.info(f"Found {num_message} messages for query: {query}")
matches = messages.get("matches", [])
for message in matches:
url = message.get("permalink")
text = message.get("text")
content = clean_string(text)
message_meta_data_keys = ["iid", "team", "ts", "type", "user", "username"]
metadata = {}
for key in message.keys():
if key in message_meta_data_keys:
metadata[key] = message.get(key)
metadata.update({"url": url})
data.append(
{
"content": content,
"meta_data": metadata,
}
)
data_content.append(content)
doc_id = hashlib.md5((query + ", ".join(data_content)).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": data,
}
except Exception as e:
logger.warning(f"Error in loading slack data: {e}")
raise ValueError(
f"Error in loading slack data: {e}. Check `https://docs.embedchain.ai/data-sources/slack` to learn more." # noqa:E501
) from e
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/text_file.py | embedchain/embedchain/loaders/text_file.py | import hashlib
import os
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
@register_deserializable
class TextFileLoader(BaseLoader):
def load_data(self, url: str):
"""Load data from a text file located at a local path."""
if not os.path.exists(url):
raise FileNotFoundError(f"The file at {url} does not exist.")
with open(url, "r", encoding="utf-8") as file:
content = file.read()
doc_id = hashlib.sha256((content + url).encode()).hexdigest()
metadata = {"url": url, "file_size": os.path.getsize(url), "file_type": url.split(".")[-1]}
return {
"doc_id": doc_id,
"data": [
{
"content": content,
"meta_data": metadata,
}
],
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/json.py | embedchain/embedchain/loaders/json.py | import hashlib
import json
import os
import re
from typing import Union
import requests
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string, is_valid_json_string
class JSONReader:
def __init__(self) -> None:
"""Initialize the JSONReader."""
pass
@staticmethod
def load_data(json_data: Union[dict, str]) -> list[str]:
"""Load data from a JSON structure.
Args:
json_data (Union[dict, str]): The JSON data to load.
Returns:
list[str]: A list of strings representing the leaf nodes of the JSON.
"""
if isinstance(json_data, str):
json_data = json.loads(json_data)
else:
json_data = json_data
json_output = json.dumps(json_data, indent=0)
lines = json_output.split("\n")
useful_lines = [line for line in lines if not re.match(r"^[{}\[\],]*$", line)]
return ["\n".join(useful_lines)]
VALID_URL_PATTERN = (
"^https?://(?:www\.)?(?:\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[a-zA-Z0-9.-]+)(?::\d+)?/(?:[^/\s]+/)*[^/\s]+\.json$"
)
class JSONLoader(BaseLoader):
@staticmethod
def _check_content(content):
if not isinstance(content, str):
raise ValueError(
"Invaid content input. \
If you want to upload (list, dict, etc.), do \
`json.dump(data, indent=0)` and add the stringified JSON. \
Check - `https://docs.embedchain.ai/data-sources/json`"
)
@staticmethod
def load_data(content):
"""Load a json file. Each data point is a key value pair."""
JSONLoader._check_content(content)
loader = JSONReader()
data = []
data_content = []
content_url_str = content
if os.path.isfile(content):
with open(content, "r", encoding="utf-8") as json_file:
json_data = json.load(json_file)
elif re.match(VALID_URL_PATTERN, content):
response = requests.get(content)
if response.status_code == 200:
json_data = response.json()
else:
raise ValueError(
f"Loading data from the given url: {content} failed. \
Make sure the url is working."
)
elif is_valid_json_string(content):
json_data = content
content_url_str = hashlib.sha256((content).encode("utf-8")).hexdigest()
else:
raise ValueError(f"Invalid content to load json data from: {content}")
docs = loader.load_data(json_data)
for doc in docs:
text = doc if isinstance(doc, str) else doc["text"]
doc_content = clean_string(text)
data.append({"content": doc_content, "meta_data": {"url": content_url_str}})
data_content.append(doc_content)
doc_id = hashlib.sha256((content_url_str + ", ".join(data_content)).encode()).hexdigest()
return {"doc_id": doc_id, "data": data}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/local_qna_pair.py | embedchain/embedchain/loaders/local_qna_pair.py | import hashlib
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
@register_deserializable
class LocalQnaPairLoader(BaseLoader):
def load_data(self, content):
"""Load data from a local QnA pair."""
question, answer = content
content = f"Q: {question}\nA: {answer}"
url = "local"
metadata = {"url": url, "question": question}
doc_id = hashlib.sha256((content + url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": [
{
"content": content,
"meta_data": metadata,
}
],
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/directory_loader.py | embedchain/embedchain/loaders/directory_loader.py | import hashlib
import logging
from pathlib import Path
from typing import Any, Optional
from embedchain.config import AddConfig
from embedchain.data_formatter.data_formatter import DataFormatter
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.loaders.text_file import TextFileLoader
from embedchain.utils.misc import detect_datatype
logger = logging.getLogger(__name__)
@register_deserializable
class DirectoryLoader(BaseLoader):
"""Load data from a directory."""
def __init__(self, config: Optional[dict[str, Any]] = None):
super().__init__()
config = config or {}
self.recursive = config.get("recursive", True)
self.extensions = config.get("extensions", None)
self.errors = []
def load_data(self, path: str):
directory_path = Path(path)
if not directory_path.is_dir():
raise ValueError(f"Invalid path: {path}")
logger.info(f"Loading data from directory: {path}")
data_list = self._process_directory(directory_path)
doc_id = hashlib.sha256((str(data_list) + str(directory_path)).encode()).hexdigest()
for error in self.errors:
logger.warning(error)
return {"doc_id": doc_id, "data": data_list}
def _process_directory(self, directory_path: Path):
data_list = []
for file_path in directory_path.rglob("*") if self.recursive else directory_path.glob("*"):
# don't include dotfiles
if file_path.name.startswith("."):
continue
if file_path.is_file() and (not self.extensions or any(file_path.suffix == ext for ext in self.extensions)):
loader = self._predict_loader(file_path)
data_list.extend(loader.load_data(str(file_path))["data"])
elif file_path.is_dir():
logger.info(f"Loading data from directory: {file_path}")
return data_list
def _predict_loader(self, file_path: Path) -> BaseLoader:
try:
data_type = detect_datatype(str(file_path))
config = AddConfig()
return DataFormatter(data_type=data_type, config=config)._get_loader(
data_type=data_type, config=config.loader, loader=None
)
except Exception as e:
self.errors.append(f"Error processing {file_path}: {e}")
return TextFileLoader()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/dropbox.py | embedchain/embedchain/loaders/dropbox.py | import hashlib
import os
from dropbox.files import FileMetadata
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.loaders.directory_loader import DirectoryLoader
@register_deserializable
class DropboxLoader(BaseLoader):
def __init__(self):
access_token = os.environ.get("DROPBOX_ACCESS_TOKEN")
if not access_token:
raise ValueError("Please set the `DROPBOX_ACCESS_TOKEN` environment variable.")
try:
from dropbox import Dropbox, exceptions
except ImportError:
raise ImportError("Dropbox requires extra dependencies. Install with `pip install dropbox==11.36.2`")
try:
dbx = Dropbox(access_token)
dbx.users_get_current_account()
self.dbx = dbx
except exceptions.AuthError as ex:
raise ValueError("Invalid Dropbox access token. Please verify your token and try again.") from ex
def _download_folder(self, path: str, local_root: str) -> list[FileMetadata]:
"""Download a folder from Dropbox and save it preserving the directory structure."""
entries = self.dbx.files_list_folder(path).entries
for entry in entries:
local_path = os.path.join(local_root, entry.name)
if isinstance(entry, FileMetadata):
self.dbx.files_download_to_file(local_path, f"{path}/{entry.name}")
else:
os.makedirs(local_path, exist_ok=True)
self._download_folder(f"{path}/{entry.name}", local_path)
return entries
def _generate_dir_id_from_all_paths(self, path: str) -> str:
"""Generate a unique ID for a directory based on all of its paths."""
entries = self.dbx.files_list_folder(path).entries
paths = [f"{path}/{entry.name}" for entry in entries]
return hashlib.sha256("".join(paths).encode()).hexdigest()
def load_data(self, path: str):
"""Load data from a Dropbox URL, preserving the folder structure."""
root_dir = f"dropbox_{self._generate_dir_id_from_all_paths(path)}"
os.makedirs(root_dir, exist_ok=True)
for entry in self.dbx.files_list_folder(path).entries:
local_path = os.path.join(root_dir, entry.name)
if isinstance(entry, FileMetadata):
self.dbx.files_download_to_file(local_path, f"{path}/{entry.name}")
else:
os.makedirs(local_path, exist_ok=True)
self._download_folder(f"{path}/{entry.name}", local_path)
dir_loader = DirectoryLoader()
data = dir_loader.load_data(root_dir)["data"]
# Clean up
self._clean_directory(root_dir)
return {
"doc_id": hashlib.sha256(path.encode()).hexdigest(),
"data": data,
}
def _clean_directory(self, dir_path):
"""Recursively delete a directory and its contents."""
for item in os.listdir(dir_path):
item_path = os.path.join(dir_path, item)
if os.path.isdir(item_path):
self._clean_directory(item_path)
else:
os.remove(item_path)
os.rmdir(dir_path)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/google_drive.py | embedchain/embedchain/loaders/google_drive.py | import hashlib
import re
try:
from googleapiclient.errors import HttpError
except ImportError:
raise ImportError(
"Google Drive requires extra dependencies. Install with `pip install embedchain[googledrive]`"
) from None
from langchain_community.document_loaders import GoogleDriveLoader as Loader
try:
import unstructured # noqa: F401
from langchain_community.document_loaders import UnstructuredFileIOLoader
except ImportError:
raise ImportError(
'Unstructured file requires extra dependencies. Install with `pip install "unstructured[local-inference, all-docs]"`' # noqa: E501
) from None
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
@register_deserializable
class GoogleDriveLoader(BaseLoader):
@staticmethod
def _get_drive_id_from_url(url: str):
regex = r"^https:\/\/drive\.google\.com\/drive\/(?:u\/\d+\/)folders\/([a-zA-Z0-9_-]+)$"
if re.match(regex, url):
return url.split("/")[-1]
raise ValueError(
f"The url provided {url} does not match a google drive folder url. Example drive url: "
f"https://drive.google.com/drive/u/0/folders/xxxx"
)
def load_data(self, url: str):
"""Load data from a Google drive folder."""
folder_id: str = self._get_drive_id_from_url(url)
try:
loader = Loader(
folder_id=folder_id,
recursive=True,
file_loader_cls=UnstructuredFileIOLoader,
)
data = []
all_content = []
docs = loader.load()
for doc in docs:
all_content.append(doc.page_content)
# renames source to url for later use.
doc.metadata["url"] = doc.metadata.pop("source")
data.append({"content": doc.page_content, "meta_data": doc.metadata})
doc_id = hashlib.sha256((" ".join(all_content) + url).encode()).hexdigest()
return {"doc_id": doc_id, "data": data}
except HttpError:
raise FileNotFoundError("Unable to locate folder or files, check provided drive URL and try again")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/gmail.py | embedchain/embedchain/loaders/gmail.py | import base64
import hashlib
import logging
import os
from email import message_from_bytes
from email.utils import parsedate_to_datetime
from textwrap import dedent
from typing import Optional
from bs4 import BeautifulSoup
try:
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
except ImportError:
raise ImportError(
'Gmail requires extra dependencies. Install with `pip install --upgrade "embedchain[gmail]"`'
) from None
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
logger = logging.getLogger(__name__)
class GmailReader:
SCOPES = ["https://www.googleapis.com/auth/gmail.readonly"]
def __init__(self, query: str, service=None, results_per_page: int = 10):
self.query = query
self.service = service or self._initialize_service()
self.results_per_page = results_per_page
@staticmethod
def _initialize_service():
credentials = GmailReader._get_credentials()
return build("gmail", "v1", credentials=credentials)
@staticmethod
def _get_credentials():
if not os.path.exists("credentials.json"):
raise FileNotFoundError("Missing 'credentials.json'. Download it from your Google Developer account.")
creds = (
Credentials.from_authorized_user_file("token.json", GmailReader.SCOPES)
if os.path.exists("token.json")
else None
)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file("credentials.json", GmailReader.SCOPES)
creds = flow.run_local_server(port=8080)
with open("token.json", "w") as token:
token.write(creds.to_json())
return creds
def load_emails(self) -> list[dict]:
response = self.service.users().messages().list(userId="me", q=self.query).execute()
messages = response.get("messages", [])
return [self._parse_email(self._get_email(message["id"])) for message in messages]
def _get_email(self, message_id: str):
raw_message = self.service.users().messages().get(userId="me", id=message_id, format="raw").execute()
return base64.urlsafe_b64decode(raw_message["raw"])
def _parse_email(self, raw_email) -> dict:
mime_msg = message_from_bytes(raw_email)
return {
"subject": self._get_header(mime_msg, "Subject"),
"from": self._get_header(mime_msg, "From"),
"to": self._get_header(mime_msg, "To"),
"date": self._format_date(mime_msg),
"body": self._get_body(mime_msg),
}
@staticmethod
def _get_header(mime_msg, header_name: str) -> str:
return mime_msg.get(header_name, "")
@staticmethod
def _format_date(mime_msg) -> Optional[str]:
date_header = GmailReader._get_header(mime_msg, "Date")
return parsedate_to_datetime(date_header).isoformat() if date_header else None
@staticmethod
def _get_body(mime_msg) -> str:
def decode_payload(part):
charset = part.get_content_charset() or "utf-8"
try:
return part.get_payload(decode=True).decode(charset)
except UnicodeDecodeError:
return part.get_payload(decode=True).decode(charset, errors="replace")
if mime_msg.is_multipart():
for part in mime_msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get("Content-Disposition"))
if ctype == "text/plain" and "attachment" not in cdispo:
return decode_payload(part)
elif ctype == "text/html":
return decode_payload(part)
else:
return decode_payload(mime_msg)
return ""
class GmailLoader(BaseLoader):
def load_data(self, query: str):
reader = GmailReader(query=query)
emails = reader.load_emails()
logger.info(f"Gmail Loader: {len(emails)} emails found for query '{query}'")
data = []
for email in emails:
content = self._process_email(email)
data.append({"content": content, "meta_data": email})
return {"doc_id": self._generate_doc_id(query, data), "data": data}
@staticmethod
def _process_email(email: dict) -> str:
content = BeautifulSoup(email["body"], "html.parser").get_text()
content = clean_string(content)
return dedent(
f"""
Email from '{email['from']}' to '{email['to']}'
Subject: {email['subject']}
Date: {email['date']}
Content: {content}
"""
)
@staticmethod
def _generate_doc_id(query: str, data: list[dict]) -> str:
content_strings = [email["content"] for email in data]
return hashlib.sha256((query + ", ".join(content_strings)).encode()).hexdigest()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/github.py | embedchain/embedchain/loaders/github.py | import concurrent.futures
import hashlib
import logging
import re
import shlex
from typing import Any, Optional
from tqdm import tqdm
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
GITHUB_URL = "https://github.com"
GITHUB_API_URL = "https://api.github.com"
VALID_SEARCH_TYPES = set(["code", "repo", "pr", "issue", "discussion", "branch", "file"])
class GithubLoader(BaseLoader):
"""Load data from GitHub search query."""
def __init__(self, config: Optional[dict[str, Any]] = None):
super().__init__()
if not config:
raise ValueError(
"GithubLoader requires a personal access token to use github api. Check - `https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic`" # noqa: E501
)
try:
from github import Github
except ImportError as e:
raise ValueError(
"GithubLoader requires extra dependencies. \
Install with `pip install gitpython==3.1.38 PyGithub==1.59.1`"
) from e
self.config = config
token = config.get("token")
if not token:
raise ValueError(
"GithubLoader requires a personal access token to use github api. Check - `https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic`" # noqa: E501
)
try:
self.client = Github(token)
except Exception as e:
logging.error(f"GithubLoader failed to initialize client: {e}")
self.client = None
def _github_search_code(self, query: str):
"""Search GitHub code."""
data = []
results = self.client.search_code(query)
for result in tqdm(results, total=results.totalCount, desc="Loading code files from github"):
url = result.html_url
logging.info(f"Added data from url: {url}")
content = result.decoded_content.decode("utf-8")
metadata = {
"url": url,
}
data.append(
{
"content": clean_string(content),
"meta_data": metadata,
}
)
return data
def _get_github_repo_data(self, repo_name: str, branch_name: str = None, file_path: str = None) -> list[dict]:
"""Get file contents from Repo"""
data = []
repo = self.client.get_repo(repo_name)
repo_contents = repo.get_contents("")
if branch_name:
repo_contents = repo.get_contents("", ref=branch_name)
if file_path:
repo_contents = [repo.get_contents(file_path)]
with tqdm(desc="Loading files:", unit="item") as progress_bar:
while repo_contents:
file_content = repo_contents.pop(0)
if file_content.type == "dir":
try:
repo_contents.extend(repo.get_contents(file_content.path))
except Exception:
logging.warning(f"Failed to read directory: {file_content.path}")
progress_bar.update(1)
continue
else:
try:
file_text = file_content.decoded_content.decode()
except Exception:
logging.warning(f"Failed to read file: {file_content.path}")
progress_bar.update(1)
continue
file_path = file_content.path
data.append(
{
"content": clean_string(file_text),
"meta_data": {
"path": file_path,
},
}
)
progress_bar.update(1)
return data
def _github_search_repo(self, query: str) -> list[dict]:
"""Search GitHub repo."""
logging.info(f"Searching github repos with query: {query}")
updated_query = query.split(":")[-1]
data = self._get_github_repo_data(updated_query)
return data
def _github_search_issues_and_pr(self, query: str, type: str) -> list[dict]:
"""Search GitHub issues and PRs."""
data = []
query = f"{query} is:{type}"
logging.info(f"Searching github for query: {query}")
results = self.client.search_issues(query)
logging.info(f"Total results: {results.totalCount}")
for result in tqdm(results, total=results.totalCount, desc=f"Loading {type} from github"):
url = result.html_url
title = result.title
body = result.body
if not body:
logging.warning(f"Skipping issue because empty content for: {url}")
continue
labels = " ".join([label.name for label in result.labels])
issue_comments = result.get_comments()
comments = []
comments_created_at = []
for comment in issue_comments:
comments_created_at.append(str(comment.created_at))
comments.append(f"{comment.user.name}:{comment.body}")
content = "\n".join([title, labels, body, *comments])
metadata = {
"url": url,
"created_at": str(result.created_at),
"comments_created_at": " ".join(comments_created_at),
}
data.append(
{
"content": clean_string(content),
"meta_data": metadata,
}
)
return data
# need to test more for discussion
def _github_search_discussions(self, query: str):
"""Search GitHub discussions."""
data = []
query = f"{query} is:discussion"
logging.info(f"Searching github repo for query: {query}")
repos_results = self.client.search_repositories(query)
logging.info(f"Total repos found: {repos_results.totalCount}")
for repo_result in tqdm(repos_results, total=repos_results.totalCount, desc="Loading discussions from github"):
teams = repo_result.get_teams()
for team in teams:
team_discussions = team.get_discussions()
for discussion in team_discussions:
url = discussion.html_url
title = discussion.title
body = discussion.body
if not body:
logging.warning(f"Skipping discussion because empty content for: {url}")
continue
comments = []
comments_created_at = []
print("Discussion comments: ", discussion.comments_url)
content = "\n".join([title, body, *comments])
metadata = {
"url": url,
"created_at": str(discussion.created_at),
"comments_created_at": " ".join(comments_created_at),
}
data.append(
{
"content": clean_string(content),
"meta_data": metadata,
}
)
return data
def _get_github_repo_branch(self, query: str, type: str) -> list[dict]:
"""Get file contents for specific branch"""
logging.info(f"Searching github repo for query: {query} is:{type}")
pattern = r"repo:(\S+) name:(\S+)"
match = re.search(pattern, query)
if match:
repo_name = match.group(1)
branch_name = match.group(2)
else:
raise ValueError(
f"Repository name and Branch name not found, instead found this \
Repo: {repo_name}, Branch: {branch_name}"
)
data = self._get_github_repo_data(repo_name=repo_name, branch_name=branch_name)
return data
def _get_github_repo_file(self, query: str, type: str) -> list[dict]:
"""Get specific file content"""
logging.info(f"Searching github repo for query: {query} is:{type}")
pattern = r"repo:(\S+) path:(\S+)"
match = re.search(pattern, query)
if match:
repo_name = match.group(1)
file_path = match.group(2)
else:
raise ValueError(
f"Repository name and File name not found, instead found this Repo: {repo_name}, File: {file_path}"
)
data = self._get_github_repo_data(repo_name=repo_name, file_path=file_path)
return data
def _search_github_data(self, search_type: str, query: str):
"""Search github data."""
if search_type == "code":
data = self._github_search_code(query)
elif search_type == "repo":
data = self._github_search_repo(query)
elif search_type == "issue":
data = self._github_search_issues_and_pr(query, search_type)
elif search_type == "pr":
data = self._github_search_issues_and_pr(query, search_type)
elif search_type == "branch":
data = self._get_github_repo_branch(query, search_type)
elif search_type == "file":
data = self._get_github_repo_file(query, search_type)
elif search_type == "discussion":
raise ValueError("GithubLoader does not support searching discussions yet.")
else:
raise NotImplementedError(f"{search_type} not supported")
return data
@staticmethod
def _get_valid_github_query(query: str):
"""Check if query is valid and return search types and valid GitHub query."""
query_terms = shlex.split(query)
# query must provide repo to load data from
if len(query_terms) < 1 or "repo:" not in query:
raise ValueError(
"GithubLoader requires a search query with `repo:` term. Refer docs - `https://docs.embedchain.ai/data-sources/github`" # noqa: E501
)
github_query = []
types = set()
type_pattern = r"type:([a-zA-Z,]+)"
for term in query_terms:
term_match = re.search(type_pattern, term)
if term_match:
search_types = term_match.group(1).split(",")
types.update(search_types)
else:
github_query.append(term)
# query must provide search type
if len(types) == 0:
raise ValueError(
"GithubLoader requires a search query with `type:` term. Refer docs - `https://docs.embedchain.ai/data-sources/github`" # noqa: E501
)
for search_type in search_types:
if search_type not in VALID_SEARCH_TYPES:
raise ValueError(
f"Invalid search type: {search_type}. Valid types are: {', '.join(VALID_SEARCH_TYPES)}"
)
query = " ".join(github_query)
return types, query
def load_data(self, search_query: str, max_results: int = 1000):
"""Load data from GitHub search query."""
if not self.client:
raise ValueError(
"GithubLoader client is not initialized, data will not be loaded. Refer docs - `https://docs.embedchain.ai/data-sources/github`" # noqa: E501
)
search_types, query = self._get_valid_github_query(search_query)
logging.info(f"Searching github for query: {query}, with types: {', '.join(search_types)}")
data = []
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
futures_map = executor.map(self._search_github_data, search_types, [query] * len(search_types))
for search_data in tqdm(futures_map, total=len(search_types), desc="Searching data from github"):
data.extend(search_data)
return {
"doc_id": hashlib.sha256(query.encode()).hexdigest(),
"data": data,
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/local_text.py | embedchain/embedchain/loaders/local_text.py | import hashlib
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
@register_deserializable
class LocalTextLoader(BaseLoader):
def load_data(self, content):
"""Load data from a local text file."""
url = "local"
metadata = {
"url": url,
}
doc_id = hashlib.sha256((content + url).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": [
{
"content": content,
"meta_data": metadata,
}
],
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/notion.py | embedchain/embedchain/loaders/notion.py | import hashlib
import logging
import os
from typing import Any, Optional
import requests
from embedchain.helpers.json_serializable import register_deserializable
from embedchain.loaders.base_loader import BaseLoader
from embedchain.utils.misc import clean_string
logger = logging.getLogger(__name__)
class NotionDocument:
"""
A simple Document class to hold the text and additional information of a page.
"""
def __init__(self, text: str, extra_info: dict[str, Any]):
self.text = text
self.extra_info = extra_info
class NotionPageLoader:
"""
Notion Page Loader.
Reads a set of Notion pages.
"""
BLOCK_CHILD_URL_TMPL = "https://api.notion.com/v1/blocks/{block_id}/children"
def __init__(self, integration_token: Optional[str] = None) -> None:
"""Initialize with Notion integration token."""
if integration_token is None:
integration_token = os.getenv("NOTION_INTEGRATION_TOKEN")
if integration_token is None:
raise ValueError(
"Must specify `integration_token` or set environment " "variable `NOTION_INTEGRATION_TOKEN`."
)
self.token = integration_token
self.headers = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
def _read_block(self, block_id: str, num_tabs: int = 0) -> str:
"""Read a block from Notion."""
done = False
result_lines_arr = []
cur_block_id = block_id
while not done:
block_url = self.BLOCK_CHILD_URL_TMPL.format(block_id=cur_block_id)
res = requests.get(block_url, headers=self.headers)
data = res.json()
for result in data["results"]:
result_type = result["type"]
result_obj = result[result_type]
cur_result_text_arr = []
if "rich_text" in result_obj:
for rich_text in result_obj["rich_text"]:
if "text" in rich_text:
text = rich_text["text"]["content"]
prefix = "\t" * num_tabs
cur_result_text_arr.append(prefix + text)
result_block_id = result["id"]
has_children = result["has_children"]
if has_children:
children_text = self._read_block(result_block_id, num_tabs=num_tabs + 1)
cur_result_text_arr.append(children_text)
cur_result_text = "\n".join(cur_result_text_arr)
result_lines_arr.append(cur_result_text)
if data["next_cursor"] is None:
done = True
else:
cur_block_id = data["next_cursor"]
result_lines = "\n".join(result_lines_arr)
return result_lines
def load_data(self, page_ids: list[str]) -> list[NotionDocument]:
"""Load data from the given list of page IDs."""
docs = []
for page_id in page_ids:
page_text = self._read_block(page_id)
docs.append(NotionDocument(text=page_text, extra_info={"page_id": page_id}))
return docs
@register_deserializable
class NotionLoader(BaseLoader):
def load_data(self, source):
"""Load data from a Notion URL."""
id = source[-32:]
formatted_id = f"{id[:8]}-{id[8:12]}-{id[12:16]}-{id[16:20]}-{id[20:]}"
logger.debug(f"Extracted notion page id as: {formatted_id}")
integration_token = os.getenv("NOTION_INTEGRATION_TOKEN")
reader = NotionPageLoader(integration_token=integration_token)
documents = reader.load_data(page_ids=[formatted_id])
raw_text = documents[0].text
text = clean_string(raw_text)
doc_id = hashlib.sha256((text + source).encode()).hexdigest()
return {
"doc_id": doc_id,
"data": [
{
"content": text,
"meta_data": {"url": f"notion-{formatted_id}"},
}
],
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/csv.py | embedchain/embedchain/loaders/csv.py | import csv
import hashlib
from io import StringIO
from urllib.parse import urlparse
import requests
from embedchain.loaders.base_loader import BaseLoader
class CsvLoader(BaseLoader):
@staticmethod
def _detect_delimiter(first_line):
delimiters = [",", "\t", ";", "|"]
counts = {delimiter: first_line.count(delimiter) for delimiter in delimiters}
return max(counts, key=counts.get)
@staticmethod
def _get_file_content(content):
url = urlparse(content)
if all([url.scheme, url.netloc]) and url.scheme not in ["file", "http", "https"]:
raise ValueError("Not a valid URL.")
if url.scheme in ["http", "https"]:
response = requests.get(content)
response.raise_for_status()
return StringIO(response.text)
elif url.scheme == "file":
path = url.path
return open(path, newline="", encoding="utf-8") # Open the file using the path from the URI
else:
return open(content, newline="", encoding="utf-8") # Treat content as a regular file path
@staticmethod
def load_data(content):
"""Load a csv file with headers. Each line is a document"""
result = []
lines = []
with CsvLoader._get_file_content(content) as file:
first_line = file.readline()
delimiter = CsvLoader._detect_delimiter(first_line)
file.seek(0) # Reset the file pointer to the start
reader = csv.DictReader(file, delimiter=delimiter)
for i, row in enumerate(reader):
line = ", ".join([f"{field}: {value}" for field, value in row.items()])
lines.append(line)
result.append({"content": line, "meta_data": {"url": content, "row": i + 1}})
doc_id = hashlib.sha256((content + " ".join(lines)).encode()).hexdigest()
return {"doc_id": doc_id, "data": result}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/embedchain/loaders/openapi.py | embedchain/embedchain/loaders/openapi.py | import hashlib
from io import StringIO
from urllib.parse import urlparse
import requests
import yaml
from embedchain.loaders.base_loader import BaseLoader
class OpenAPILoader(BaseLoader):
@staticmethod
def _get_file_content(content):
url = urlparse(content)
if all([url.scheme, url.netloc]) and url.scheme not in ["file", "http", "https"]:
raise ValueError("Not a valid URL.")
if url.scheme in ["http", "https"]:
response = requests.get(content)
response.raise_for_status()
return StringIO(response.text)
elif url.scheme == "file":
path = url.path
return open(path)
else:
return open(content)
@staticmethod
def load_data(content):
"""Load yaml file of openapi. Each pair is a document."""
data = []
file_path = content
data_content = []
with OpenAPILoader._get_file_content(content=content) as file:
yaml_data = yaml.load(file, Loader=yaml.SafeLoader)
for i, (key, value) in enumerate(yaml_data.items()):
string_data = f"{key}: {value}"
metadata = {"url": file_path, "row": i + 1}
data.append({"content": string_data, "meta_data": metadata})
data_content.append(string_data)
doc_id = hashlib.sha256((content + ", ".join(data_content)).encode()).hexdigest()
return {"doc_id": doc_id, "data": data}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/test_app.py | embedchain/tests/test_app.py | import os
import pytest
import yaml
from embedchain import App
from embedchain.config import ChromaDbConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.llm.base import BaseLlm
from embedchain.vectordb.base import BaseVectorDB
from embedchain.vectordb.chroma import ChromaDB
@pytest.fixture
def app():
os.environ["OPENAI_API_KEY"] = "test-api-key"
os.environ["OPENAI_API_BASE"] = "test-api-base"
return App()
def test_app(app):
assert isinstance(app.llm, BaseLlm)
assert isinstance(app.db, BaseVectorDB)
assert isinstance(app.embedding_model, BaseEmbedder)
class TestConfigForAppComponents:
def test_constructor_config(self):
collection_name = "my-test-collection"
db = ChromaDB(config=ChromaDbConfig(collection_name=collection_name))
app = App(db=db)
assert app.db.config.collection_name == collection_name
def test_component_config(self):
collection_name = "my-test-collection"
database = ChromaDB(config=ChromaDbConfig(collection_name=collection_name))
app = App(db=database)
assert app.db.config.collection_name == collection_name
class TestAppFromConfig:
def load_config_data(self, yaml_path):
with open(yaml_path, "r") as file:
return yaml.safe_load(file)
def test_from_chroma_config(self, mocker):
mocker.patch("embedchain.vectordb.chroma.chromadb.Client")
yaml_path = "configs/chroma.yaml"
config_data = self.load_config_data(yaml_path)
app = App.from_config(config_path=yaml_path)
# Check if the App instance and its components were created correctly
assert isinstance(app, App)
# Validate the AppConfig values
assert app.config.id == config_data["app"]["config"]["id"]
# Even though not present in the config, the default value is used
assert app.config.collect_metrics is True
# Validate the LLM config values
llm_config = config_data["llm"]["config"]
assert app.llm.config.temperature == llm_config["temperature"]
assert app.llm.config.max_tokens == llm_config["max_tokens"]
assert app.llm.config.top_p == llm_config["top_p"]
assert app.llm.config.stream == llm_config["stream"]
# Validate the VectorDB config values
db_config = config_data["vectordb"]["config"]
assert app.db.config.collection_name == db_config["collection_name"]
assert app.db.config.dir == db_config["dir"]
assert app.db.config.allow_reset == db_config["allow_reset"]
# Validate the Embedder config values
embedder_config = config_data["embedder"]["config"]
assert app.embedding_model.config.model == embedder_config["model"]
assert app.embedding_model.config.deployment_name == embedder_config.get("deployment_name")
def test_from_opensource_config(self, mocker):
mocker.patch("embedchain.vectordb.chroma.chromadb.Client")
yaml_path = "configs/opensource.yaml"
config_data = self.load_config_data(yaml_path)
app = App.from_config(yaml_path)
# Check if the App instance and its components were created correctly
assert isinstance(app, App)
# Validate the AppConfig values
assert app.config.id == config_data["app"]["config"]["id"]
assert app.config.collect_metrics == config_data["app"]["config"]["collect_metrics"]
# Validate the LLM config values
llm_config = config_data["llm"]["config"]
assert app.llm.config.model == llm_config["model"]
assert app.llm.config.temperature == llm_config["temperature"]
assert app.llm.config.max_tokens == llm_config["max_tokens"]
assert app.llm.config.top_p == llm_config["top_p"]
assert app.llm.config.stream == llm_config["stream"]
# Validate the VectorDB config values
db_config = config_data["vectordb"]["config"]
assert app.db.config.collection_name == db_config["collection_name"]
assert app.db.config.dir == db_config["dir"]
assert app.db.config.allow_reset == db_config["allow_reset"]
# Validate the Embedder config values
embedder_config = config_data["embedder"]["config"]
assert app.embedding_model.config.deployment_name == embedder_config["deployment_name"]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/test_factory.py | embedchain/tests/test_factory.py | import os
import pytest
import embedchain
import embedchain.embedder.gpt4all
import embedchain.embedder.huggingface
import embedchain.embedder.openai
import embedchain.embedder.vertexai
import embedchain.llm.anthropic
import embedchain.llm.openai
import embedchain.vectordb.chroma
import embedchain.vectordb.elasticsearch
import embedchain.vectordb.opensearch
from embedchain.factory import EmbedderFactory, LlmFactory, VectorDBFactory
class TestFactories:
@pytest.mark.parametrize(
"provider_name, config_data, expected_class",
[
("openai", {}, embedchain.llm.openai.OpenAILlm),
("anthropic", {}, embedchain.llm.anthropic.AnthropicLlm),
],
)
def test_llm_factory_create(self, provider_name, config_data, expected_class):
os.environ["ANTHROPIC_API_KEY"] = "test_api_key"
os.environ["OPENAI_API_KEY"] = "test_api_key"
os.environ["OPENAI_API_BASE"] = "test_api_base"
llm_instance = LlmFactory.create(provider_name, config_data)
assert isinstance(llm_instance, expected_class)
@pytest.mark.parametrize(
"provider_name, config_data, expected_class",
[
("gpt4all", {}, embedchain.embedder.gpt4all.GPT4AllEmbedder),
(
"huggingface",
{"model": "sentence-transformers/all-mpnet-base-v2", "vector_dimension": 768},
embedchain.embedder.huggingface.HuggingFaceEmbedder,
),
("vertexai", {"model": "textembedding-gecko"}, embedchain.embedder.vertexai.VertexAIEmbedder),
("openai", {}, embedchain.embedder.openai.OpenAIEmbedder),
],
)
def test_embedder_factory_create(self, mocker, provider_name, config_data, expected_class):
mocker.patch("embedchain.embedder.vertexai.VertexAIEmbedder", autospec=True)
embedder_instance = EmbedderFactory.create(provider_name, config_data)
assert isinstance(embedder_instance, expected_class)
@pytest.mark.parametrize(
"provider_name, config_data, expected_class",
[
("chroma", {}, embedchain.vectordb.chroma.ChromaDB),
(
"opensearch",
{"opensearch_url": "http://localhost:9200", "http_auth": ("admin", "admin")},
embedchain.vectordb.opensearch.OpenSearchDB,
),
("elasticsearch", {"es_url": "http://localhost:9200"}, embedchain.vectordb.elasticsearch.ElasticsearchDB),
],
)
def test_vectordb_factory_create(self, mocker, provider_name, config_data, expected_class):
mocker.patch("embedchain.vectordb.opensearch.OpenSearchDB", autospec=True)
vectordb_instance = VectorDBFactory.create(provider_name, config_data)
assert isinstance(vectordb_instance, expected_class)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/test_utils.py | embedchain/tests/test_utils.py | import yaml
from embedchain.utils.misc import validate_config
CONFIG_YAMLS = [
"configs/anthropic.yaml",
"configs/azure_openai.yaml",
"configs/chroma.yaml",
"configs/chunker.yaml",
"configs/cohere.yaml",
"configs/together.yaml",
"configs/ollama.yaml",
"configs/full-stack.yaml",
"configs/gpt4.yaml",
"configs/gpt4all.yaml",
"configs/huggingface.yaml",
"configs/jina.yaml",
"configs/llama2.yaml",
"configs/opensearch.yaml",
"configs/opensource.yaml",
"configs/pinecone.yaml",
"configs/vertexai.yaml",
"configs/weaviate.yaml",
]
def test_all_config_yamls():
"""Test that all config yamls are valid."""
for config_yaml in CONFIG_YAMLS:
with open(config_yaml, "r") as f:
config = yaml.safe_load(f)
assert config is not None
try:
validate_config(config)
except Exception as e:
print(f"Error in {config_yaml}: {e}")
raise e
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/conftest.py | embedchain/tests/conftest.py | import os
import pytest
from sqlalchemy import MetaData, create_engine
from sqlalchemy.orm import sessionmaker
@pytest.fixture(autouse=True)
def clean_db():
db_path = os.path.expanduser("~/.embedchain/embedchain.db")
db_url = f"sqlite:///{db_path}"
engine = create_engine(db_url)
metadata = MetaData()
metadata.reflect(bind=engine) # Reflect schema from the engine
Session = sessionmaker(bind=engine)
session = Session()
try:
# Iterate over all tables in reversed order to respect foreign keys
for table in reversed(metadata.sorted_tables):
if table.name != "alembic_version": # Skip the Alembic version table
session.execute(table.delete())
session.commit()
except Exception as e:
session.rollback()
print(f"Error cleaning database: {e}")
finally:
session.close()
@pytest.fixture(autouse=True)
def disable_telemetry():
os.environ["EC_TELEMETRY"] = "false"
yield
del os.environ["EC_TELEMETRY"] | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/__init__.py | embedchain/tests/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/test_client.py | embedchain/tests/test_client.py | import pytest
from embedchain import Client
class TestClient:
@pytest.fixture
def mock_requests_post(self, mocker):
return mocker.patch("embedchain.client.requests.post")
def test_valid_api_key(self, mock_requests_post):
mock_requests_post.return_value.status_code = 200
client = Client(api_key="valid_api_key")
assert client.check("valid_api_key") is True
def test_invalid_api_key(self, mock_requests_post):
mock_requests_post.return_value.status_code = 401
with pytest.raises(ValueError):
Client(api_key="invalid_api_key")
def test_update_valid_api_key(self, mock_requests_post):
mock_requests_post.return_value.status_code = 200
client = Client(api_key="valid_api_key")
client.update("new_valid_api_key")
assert client.get() == "new_valid_api_key"
def test_clear_api_key(self, mock_requests_post):
mock_requests_post.return_value.status_code = 200
client = Client(api_key="valid_api_key")
client.clear()
assert client.get() is None
def test_save_api_key(self, mock_requests_post):
mock_requests_post.return_value.status_code = 200
api_key_to_save = "valid_api_key"
client = Client(api_key=api_key_to_save)
client.save()
assert client.get() == api_key_to_save
def test_load_api_key_from_config(self, mocker):
mocker.patch("embedchain.Client.load_config", return_value={"api_key": "test_api_key"})
client = Client()
assert client.get() == "test_api_key"
def test_load_invalid_api_key_from_config(self, mocker):
mocker.patch("embedchain.Client.load_config", return_value={})
with pytest.raises(ValueError):
Client()
def test_load_missing_api_key_from_config(self, mocker):
mocker.patch("embedchain.Client.load_config", return_value={})
with pytest.raises(ValueError):
Client()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/vectordb/test_pinecone.py | embedchain/tests/vectordb/test_pinecone.py | import pytest
from embedchain.config.vector_db.pinecone import PineconeDBConfig
from embedchain.vectordb.pinecone import PineconeDB
@pytest.fixture
def pinecone_pod_config():
return PineconeDBConfig(
index_name="test_collection",
api_key="test_api_key",
vector_dimension=3,
pod_config={"environment": "test_environment", "metadata_config": {"indexed": ["*"]}},
)
@pytest.fixture
def pinecone_serverless_config():
return PineconeDBConfig(
index_name="test_collection",
api_key="test_api_key",
vector_dimension=3,
serverless_config={
"cloud": "test_cloud",
"region": "test_region",
},
)
def test_pinecone_init_without_config(monkeypatch):
monkeypatch.setenv("PINECONE_API_KEY", "test_api_key")
monkeypatch.setattr("embedchain.vectordb.pinecone.PineconeDB._setup_pinecone_index", lambda x: x)
monkeypatch.setattr("embedchain.vectordb.pinecone.PineconeDB._get_or_create_db", lambda x: x)
pinecone_db = PineconeDB()
assert isinstance(pinecone_db, PineconeDB)
assert isinstance(pinecone_db.config, PineconeDBConfig)
assert pinecone_db.config.pod_config == {"environment": "gcp-starter", "metadata_config": {"indexed": ["*"]}}
monkeypatch.delenv("PINECONE_API_KEY")
def test_pinecone_init_with_config(pinecone_pod_config, monkeypatch):
monkeypatch.setattr("embedchain.vectordb.pinecone.PineconeDB._setup_pinecone_index", lambda x: x)
monkeypatch.setattr("embedchain.vectordb.pinecone.PineconeDB._get_or_create_db", lambda x: x)
pinecone_db = PineconeDB(config=pinecone_pod_config)
assert isinstance(pinecone_db, PineconeDB)
assert isinstance(pinecone_db.config, PineconeDBConfig)
assert pinecone_db.config.pod_config == pinecone_pod_config.pod_config
pinecone_db = PineconeDB(config=pinecone_pod_config)
assert isinstance(pinecone_db, PineconeDB)
assert isinstance(pinecone_db.config, PineconeDBConfig)
assert pinecone_db.config.serverless_config == pinecone_pod_config.serverless_config
class MockListIndexes:
def names(self):
return ["test_collection"]
class MockPineconeIndex:
db = []
def __init__(*args, **kwargs):
pass
def upsert(self, chunk, **kwargs):
self.db.extend([c for c in chunk])
return
def delete(self, *args, **kwargs):
pass
def query(self, *args, **kwargs):
return {
"matches": [
{
"metadata": {
"key": "value",
"text": "text_1",
},
"score": 0.1,
},
{
"metadata": {
"key": "value",
"text": "text_2",
},
"score": 0.2,
},
]
}
def fetch(self, *args, **kwargs):
return {
"vectors": {
"key_1": {
"metadata": {
"source": "1",
}
},
"key_2": {
"metadata": {
"source": "2",
}
},
}
}
def describe_index_stats(self, *args, **kwargs):
return {"total_vector_count": len(self.db)}
class MockPineconeClient:
def __init__(*args, **kwargs):
pass
def list_indexes(self):
return MockListIndexes()
def create_index(self, *args, **kwargs):
pass
def Index(self, *args, **kwargs):
return MockPineconeIndex()
def delete_index(self, *args, **kwargs):
pass
class MockPinecone:
def __init__(*args, **kwargs):
pass
def Pinecone(*args, **kwargs):
return MockPineconeClient()
def PodSpec(*args, **kwargs):
pass
def ServerlessSpec(*args, **kwargs):
pass
class MockEmbedder:
def embedding_fn(self, documents):
return [[1, 1, 1] for d in documents]
def test_setup_pinecone_index(pinecone_pod_config, pinecone_serverless_config, monkeypatch):
monkeypatch.setattr("embedchain.vectordb.pinecone.pinecone", MockPinecone)
monkeypatch.setenv("PINECONE_API_KEY", "test_api_key")
pinecone_db = PineconeDB(config=pinecone_pod_config)
pinecone_db._setup_pinecone_index()
assert pinecone_db.client is not None
assert pinecone_db.config.index_name == "test_collection"
assert pinecone_db.client.list_indexes().names() == ["test_collection"]
assert pinecone_db.pinecone_index is not None
pinecone_db = PineconeDB(config=pinecone_serverless_config)
pinecone_db._setup_pinecone_index()
assert pinecone_db.client is not None
assert pinecone_db.config.index_name == "test_collection"
assert pinecone_db.client.list_indexes().names() == ["test_collection"]
assert pinecone_db.pinecone_index is not None
def test_get(monkeypatch):
def mock_pinecone_db():
monkeypatch.setenv("PINECONE_API_KEY", "test_api_key")
monkeypatch.setattr("embedchain.vectordb.pinecone.PineconeDB._setup_pinecone_index", lambda x: x)
monkeypatch.setattr("embedchain.vectordb.pinecone.PineconeDB._get_or_create_db", lambda x: x)
db = PineconeDB()
db.pinecone_index = MockPineconeIndex()
return db
pinecone_db = mock_pinecone_db()
ids = pinecone_db.get(["key_1", "key_2"])
assert ids == {"ids": ["key_1", "key_2"], "metadatas": [{"source": "1"}, {"source": "2"}]}
def test_add(monkeypatch):
def mock_pinecone_db():
monkeypatch.setenv("PINECONE_API_KEY", "test_api_key")
monkeypatch.setattr("embedchain.vectordb.pinecone.PineconeDB._setup_pinecone_index", lambda x: x)
monkeypatch.setattr("embedchain.vectordb.pinecone.PineconeDB._get_or_create_db", lambda x: x)
db = PineconeDB()
db.pinecone_index = MockPineconeIndex()
db._set_embedder(MockEmbedder())
return db
pinecone_db = mock_pinecone_db()
pinecone_db.add(["text_1", "text_2"], [{"key_1": "value_1"}, {"key_2": "value_2"}], ["key_1", "key_2"])
assert pinecone_db.count() == 2
pinecone_db.add(["text_3", "text_4"], [{"key_3": "value_3"}, {"key_4": "value_4"}], ["key_3", "key_4"])
assert pinecone_db.count() == 4
def test_query(monkeypatch):
def mock_pinecone_db():
monkeypatch.setenv("PINECONE_API_KEY", "test_api_key")
monkeypatch.setattr("embedchain.vectordb.pinecone.PineconeDB._setup_pinecone_index", lambda x: x)
monkeypatch.setattr("embedchain.vectordb.pinecone.PineconeDB._get_or_create_db", lambda x: x)
db = PineconeDB()
db.pinecone_index = MockPineconeIndex()
db._set_embedder(MockEmbedder())
return db
pinecone_db = mock_pinecone_db()
# without citations
results = pinecone_db.query(["text_1", "text_2"], n_results=2, where={})
assert results == ["text_1", "text_2"]
# with citations
results = pinecone_db.query(["text_1", "text_2"], n_results=2, where={}, citations=True)
assert results == [
("text_1", {"key": "value", "text": "text_1", "score": 0.1}),
("text_2", {"key": "value", "text": "text_2", "score": 0.2}),
]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/vectordb/test_elasticsearch_db.py | embedchain/tests/vectordb/test_elasticsearch_db.py | import os
import unittest
from unittest.mock import patch
from embedchain import App
from embedchain.config import AppConfig, ElasticsearchDBConfig
from embedchain.embedder.gpt4all import GPT4AllEmbedder
from embedchain.vectordb.elasticsearch import ElasticsearchDB
class TestEsDB(unittest.TestCase):
@patch("embedchain.vectordb.elasticsearch.Elasticsearch")
def test_setUp(self, mock_client):
self.db = ElasticsearchDB(config=ElasticsearchDBConfig(es_url="https://localhost:9200"))
self.vector_dim = 384
app_config = AppConfig(collect_metrics=False)
self.app = App(config=app_config, db=self.db)
# Assert that the Elasticsearch client is stored in the ElasticsearchDB class.
self.assertEqual(self.db.client, mock_client.return_value)
@patch("embedchain.vectordb.elasticsearch.Elasticsearch")
def test_query(self, mock_client):
self.db = ElasticsearchDB(config=ElasticsearchDBConfig(es_url="https://localhost:9200"))
app_config = AppConfig(collect_metrics=False)
self.app = App(config=app_config, db=self.db, embedding_model=GPT4AllEmbedder())
# Assert that the Elasticsearch client is stored in the ElasticsearchDB class.
self.assertEqual(self.db.client, mock_client.return_value)
# Create some dummy data
documents = ["This is a document.", "This is another document."]
metadatas = [{"url": "url_1", "doc_id": "doc_id_1"}, {"url": "url_2", "doc_id": "doc_id_2"}]
ids = ["doc_1", "doc_2"]
# Add the data to the database.
self.db.add(documents, metadatas, ids)
search_response = {
"hits": {
"hits": [
{
"_source": {"text": "This is a document.", "metadata": {"url": "url_1", "doc_id": "doc_id_1"}},
"_score": 0.9,
},
{
"_source": {
"text": "This is another document.",
"metadata": {"url": "url_2", "doc_id": "doc_id_2"},
},
"_score": 0.8,
},
]
}
}
# Configure the mock client to return the mocked response.
mock_client.return_value.search.return_value = search_response
# Query the database for the documents that are most similar to the query "This is a document".
query = "This is a document"
results_without_citations = self.db.query(query, n_results=2, where={})
expected_results_without_citations = ["This is a document.", "This is another document."]
self.assertEqual(results_without_citations, expected_results_without_citations)
results_with_citations = self.db.query(query, n_results=2, where={}, citations=True)
expected_results_with_citations = [
("This is a document.", {"url": "url_1", "doc_id": "doc_id_1", "score": 0.9}),
("This is another document.", {"url": "url_2", "doc_id": "doc_id_2", "score": 0.8}),
]
self.assertEqual(results_with_citations, expected_results_with_citations)
def test_init_without_url(self):
# Make sure it's not loaded from env
try:
del os.environ["ELASTICSEARCH_URL"]
except KeyError:
pass
# Test if an exception is raised when an invalid es_config is provided
with self.assertRaises(AttributeError):
ElasticsearchDB()
def test_init_with_invalid_es_config(self):
# Test if an exception is raised when an invalid es_config is provided
with self.assertRaises(TypeError):
ElasticsearchDB(es_config={"ES_URL": "some_url", "valid es_config": False})
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/vectordb/test_weaviate.py | embedchain/tests/vectordb/test_weaviate.py | import unittest
from unittest.mock import patch
from embedchain import App
from embedchain.config import AppConfig
from embedchain.config.vector_db.pinecone import PineconeDBConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.vectordb.weaviate import WeaviateDB
def mock_embedding_fn(texts: list[str]) -> list[list[float]]:
"""A mock embedding function."""
return [[1, 2, 3], [4, 5, 6]]
class TestWeaviateDb(unittest.TestCase):
def test_incorrect_config_throws_error(self):
"""Test the init method of the WeaviateDb class throws error for incorrect config"""
with self.assertRaises(TypeError):
WeaviateDB(config=PineconeDBConfig())
@patch("embedchain.vectordb.weaviate.weaviate")
def test_initialize(self, weaviate_mock):
"""Test the init method of the WeaviateDb class."""
weaviate_client_mock = weaviate_mock.Client.return_value
weaviate_client_schema_mock = weaviate_client_mock.schema
# Mock that schema doesn't already exist so that a new schema is created
weaviate_client_schema_mock.exists.return_value = False
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Weaviate instance
db = WeaviateDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
expected_class_obj = {
"classes": [
{
"class": "Embedchain_store_1536",
"vectorizer": "none",
"properties": [
{
"name": "identifier",
"dataType": ["text"],
},
{
"name": "text",
"dataType": ["text"],
},
{
"name": "metadata",
"dataType": ["Embedchain_store_1536_metadata"],
},
],
},
{
"class": "Embedchain_store_1536_metadata",
"vectorizer": "none",
"properties": [
{
"name": "data_type",
"dataType": ["text"],
},
{
"name": "doc_id",
"dataType": ["text"],
},
{
"name": "url",
"dataType": ["text"],
},
{
"name": "hash",
"dataType": ["text"],
},
{
"name": "app_id",
"dataType": ["text"],
},
],
},
]
}
# Assert that the Weaviate client was initialized
weaviate_mock.Client.assert_called_once()
self.assertEqual(db.index_name, "Embedchain_store_1536")
weaviate_client_schema_mock.create.assert_called_once_with(expected_class_obj)
@patch("embedchain.vectordb.weaviate.weaviate")
def test_get_or_create_db(self, weaviate_mock):
"""Test the _get_or_create_db method of the WeaviateDb class."""
weaviate_client_mock = weaviate_mock.Client.return_value
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Weaviate instance
db = WeaviateDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
expected_client = db._get_or_create_db()
self.assertEqual(expected_client, weaviate_client_mock)
@patch("embedchain.vectordb.weaviate.weaviate")
def test_add(self, weaviate_mock):
"""Test the add method of the WeaviateDb class."""
weaviate_client_mock = weaviate_mock.Client.return_value
weaviate_client_batch_mock = weaviate_client_mock.batch
weaviate_client_batch_enter_mock = weaviate_client_mock.batch.__enter__.return_value
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Weaviate instance
db = WeaviateDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
documents = ["This is test document"]
metadatas = [None]
ids = ["id_1"]
db.add(documents, metadatas, ids)
# Check if the document was added to the database.
weaviate_client_batch_mock.configure.assert_called_once_with(batch_size=100, timeout_retries=3)
weaviate_client_batch_enter_mock.add_data_object.assert_any_call(
data_object={"text": documents[0]}, class_name="Embedchain_store_1536_metadata", vector=[1, 2, 3]
)
weaviate_client_batch_enter_mock.add_data_object.assert_any_call(
data_object={"text": documents[0]},
class_name="Embedchain_store_1536_metadata",
vector=[1, 2, 3],
)
@patch("embedchain.vectordb.weaviate.weaviate")
def test_query_without_where(self, weaviate_mock):
"""Test the query method of the WeaviateDb class."""
weaviate_client_mock = weaviate_mock.Client.return_value
weaviate_client_query_mock = weaviate_client_mock.query
weaviate_client_query_get_mock = weaviate_client_query_mock.get.return_value
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Weaviate instance
db = WeaviateDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
# Query for the document.
db.query(input_query="This is a test document.", n_results=1, where={})
weaviate_client_query_mock.get.assert_called_once_with("Embedchain_store_1536", ["text"])
weaviate_client_query_get_mock.with_near_vector.assert_called_once_with({"vector": [1, 2, 3]})
@patch("embedchain.vectordb.weaviate.weaviate")
def test_query_with_where(self, weaviate_mock):
"""Test the query method of the WeaviateDb class."""
weaviate_client_mock = weaviate_mock.Client.return_value
weaviate_client_query_mock = weaviate_client_mock.query
weaviate_client_query_get_mock = weaviate_client_query_mock.get.return_value
weaviate_client_query_get_where_mock = weaviate_client_query_get_mock.with_where.return_value
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Weaviate instance
db = WeaviateDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
# Query for the document.
db.query(input_query="This is a test document.", n_results=1, where={"doc_id": "123"})
weaviate_client_query_mock.get.assert_called_once_with("Embedchain_store_1536", ["text"])
weaviate_client_query_get_mock.with_where.assert_called_once_with(
{"operator": "Equal", "path": ["metadata", "Embedchain_store_1536_metadata", "doc_id"], "valueText": "123"}
)
weaviate_client_query_get_where_mock.with_near_vector.assert_called_once_with({"vector": [1, 2, 3]})
@patch("embedchain.vectordb.weaviate.weaviate")
def test_reset(self, weaviate_mock):
"""Test the reset method of the WeaviateDb class."""
weaviate_client_mock = weaviate_mock.Client.return_value
weaviate_client_batch_mock = weaviate_client_mock.batch
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Weaviate instance
db = WeaviateDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
# Reset the database.
db.reset()
weaviate_client_batch_mock.delete_objects.assert_called_once_with(
"Embedchain_store_1536", where={"path": ["identifier"], "operator": "Like", "valueText": ".*"}
)
@patch("embedchain.vectordb.weaviate.weaviate")
def test_count(self, weaviate_mock):
"""Test the reset method of the WeaviateDb class."""
weaviate_client_mock = weaviate_mock.Client.return_value
weaviate_client_query = weaviate_client_mock.query
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Weaviate instance
db = WeaviateDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
# Reset the database.
db.count()
weaviate_client_query.aggregate.assert_called_once_with("Embedchain_store_1536")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/vectordb/test_chroma_db.py | embedchain/tests/vectordb/test_chroma_db.py | import os
import shutil
from unittest.mock import patch
import pytest
from chromadb.config import Settings
from embedchain import App
from embedchain.config import AppConfig, ChromaDbConfig
from embedchain.vectordb.chroma import ChromaDB
os.environ["OPENAI_API_KEY"] = "test-api-key"
@pytest.fixture
def chroma_db():
return ChromaDB(config=ChromaDbConfig(host="test-host", port="1234"))
@pytest.fixture
def app_with_settings():
chroma_config = ChromaDbConfig(allow_reset=True, dir="test-db")
chroma_db = ChromaDB(config=chroma_config)
app_config = AppConfig(collect_metrics=False)
return App(config=app_config, db=chroma_db)
@pytest.fixture(scope="session", autouse=True)
def cleanup_db():
yield
try:
shutil.rmtree("test-db")
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
@patch("embedchain.vectordb.chroma.chromadb.Client")
def test_chroma_db_init_with_host_and_port(mock_client):
chroma_db = ChromaDB(config=ChromaDbConfig(host="test-host", port="1234")) # noqa
called_settings: Settings = mock_client.call_args[0][0]
assert called_settings.chroma_server_host == "test-host"
assert called_settings.chroma_server_http_port == "1234"
@patch("embedchain.vectordb.chroma.chromadb.Client")
def test_chroma_db_init_with_basic_auth(mock_client):
chroma_config = {
"host": "test-host",
"port": "1234",
"chroma_settings": {
"chroma_client_auth_provider": "chromadb.auth.basic.BasicAuthClientProvider",
"chroma_client_auth_credentials": "admin:admin",
},
}
ChromaDB(config=ChromaDbConfig(**chroma_config))
called_settings: Settings = mock_client.call_args[0][0]
assert called_settings.chroma_server_host == "test-host"
assert called_settings.chroma_server_http_port == "1234"
assert (
called_settings.chroma_client_auth_provider == chroma_config["chroma_settings"]["chroma_client_auth_provider"]
)
assert (
called_settings.chroma_client_auth_credentials
== chroma_config["chroma_settings"]["chroma_client_auth_credentials"]
)
@patch("embedchain.vectordb.chroma.chromadb.Client")
def test_app_init_with_host_and_port(mock_client):
host = "test-host"
port = "1234"
config = AppConfig(collect_metrics=False)
db_config = ChromaDbConfig(host=host, port=port)
db = ChromaDB(config=db_config)
_app = App(config=config, db=db)
called_settings: Settings = mock_client.call_args[0][0]
assert called_settings.chroma_server_host == host
assert called_settings.chroma_server_http_port == port
@patch("embedchain.vectordb.chroma.chromadb.Client")
def test_app_init_with_host_and_port_none(mock_client):
db = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
_app = App(config=AppConfig(collect_metrics=False), db=db)
called_settings: Settings = mock_client.call_args[0][0]
assert called_settings.chroma_server_host is None
assert called_settings.chroma_server_http_port is None
def test_chroma_db_duplicates_throw_warning(caplog):
db = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.db.collection.add(embeddings=[[0, 0, 0]], ids=["0"])
app.db.collection.add(embeddings=[[0, 0, 0]], ids=["0"])
assert "Insert of existing embedding ID: 0" in caplog.text
assert "Add of existing embedding ID: 0" in caplog.text
app.db.reset()
def test_chroma_db_duplicates_collections_no_warning(caplog):
db = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name("test_collection_1")
app.db.collection.add(embeddings=[[0, 0, 0]], ids=["0"])
app.set_collection_name("test_collection_2")
app.db.collection.add(embeddings=[[0, 0, 0]], ids=["0"])
assert "Insert of existing embedding ID: 0" not in caplog.text
assert "Add of existing embedding ID: 0" not in caplog.text
app.db.reset()
app.set_collection_name("test_collection_1")
app.db.reset()
def test_chroma_db_collection_init_with_default_collection():
db = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
assert app.db.collection.name == "embedchain_store"
def test_chroma_db_collection_init_with_custom_collection():
db = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name(name="test_collection")
assert app.db.collection.name == "test_collection"
def test_chroma_db_collection_set_collection_name():
db = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name("test_collection")
assert app.db.collection.name == "test_collection"
def test_chroma_db_collection_changes_encapsulated():
db = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name("test_collection_1")
assert app.db.count() == 0
app.db.collection.add(embeddings=[0, 0, 0], ids=["0"])
assert app.db.count() == 1
app.set_collection_name("test_collection_2")
assert app.db.count() == 0
app.db.collection.add(embeddings=[0, 0, 0], ids=["0"])
app.set_collection_name("test_collection_1")
assert app.db.count() == 1
app.db.reset()
app.set_collection_name("test_collection_2")
app.db.reset()
def test_chroma_db_collection_collections_are_persistent():
db = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name("test_collection_1")
app.db.collection.add(embeddings=[[0, 0, 0]], ids=["0"])
del app
db = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name("test_collection_1")
assert app.db.count() == 1
app.db.reset()
def test_chroma_db_collection_parallel_collections():
db1 = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db", collection_name="test_collection_1"))
app1 = App(
config=AppConfig(collect_metrics=False),
db=db1,
)
db2 = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db", collection_name="test_collection_2"))
app2 = App(
config=AppConfig(collect_metrics=False),
db=db2,
)
# cleanup if any previous tests failed or were interrupted
app1.db.reset()
app2.db.reset()
app1.db.collection.add(embeddings=[0, 0, 0], ids=["0"])
assert app1.db.count() == 1
assert app2.db.count() == 0
app1.db.collection.add(embeddings=[[0, 0, 0], [1, 1, 1]], ids=["1", "2"])
app2.db.collection.add(embeddings=[0, 0, 0], ids=["0"])
app1.set_collection_name("test_collection_2")
assert app1.db.count() == 1
app2.set_collection_name("test_collection_1")
assert app2.db.count() == 3
# cleanup
app1.db.reset()
app2.db.reset()
def test_chroma_db_collection_ids_share_collections():
db1 = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app1 = App(config=AppConfig(collect_metrics=False), db=db1)
app1.set_collection_name("one_collection")
db2 = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app2 = App(config=AppConfig(collect_metrics=False), db=db2)
app2.set_collection_name("one_collection")
app1.db.collection.add(embeddings=[[0, 0, 0], [1, 1, 1]], ids=["0", "1"])
app2.db.collection.add(embeddings=[0, 0, 0], ids=["2"])
assert app1.db.count() == 3
assert app2.db.count() == 3
# cleanup
app1.db.reset()
app2.db.reset()
def test_chroma_db_collection_reset():
db1 = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app1 = App(config=AppConfig(collect_metrics=False), db=db1)
app1.set_collection_name("one_collection")
db2 = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app2 = App(config=AppConfig(collect_metrics=False), db=db2)
app2.set_collection_name("two_collection")
db3 = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app3 = App(config=AppConfig(collect_metrics=False), db=db3)
app3.set_collection_name("three_collection")
db4 = ChromaDB(config=ChromaDbConfig(allow_reset=True, dir="test-db"))
app4 = App(config=AppConfig(collect_metrics=False), db=db4)
app4.set_collection_name("four_collection")
app1.db.collection.add(embeddings=[0, 0, 0], ids=["1"])
app2.db.collection.add(embeddings=[0, 0, 0], ids=["2"])
app3.db.collection.add(embeddings=[0, 0, 0], ids=["3"])
app4.db.collection.add(embeddings=[0, 0, 0], ids=["4"])
app1.db.reset()
assert app1.db.count() == 0
assert app2.db.count() == 1
assert app3.db.count() == 1
assert app4.db.count() == 1
# cleanup
app2.db.reset()
app3.db.reset()
app4.db.reset()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/vectordb/test_qdrant.py | embedchain/tests/vectordb/test_qdrant.py | import unittest
import uuid
from mock import patch
from qdrant_client.http import models
from qdrant_client.http.models import Batch
from embedchain import App
from embedchain.config import AppConfig
from embedchain.config.vector_db.pinecone import PineconeDBConfig
from embedchain.embedder.base import BaseEmbedder
from embedchain.vectordb.qdrant import QdrantDB
def mock_embedding_fn(texts: list[str]) -> list[list[float]]:
"""A mock embedding function."""
return [[1, 2, 3], [4, 5, 6]]
class TestQdrantDB(unittest.TestCase):
TEST_UUIDS = ["abc", "def", "ghi"]
def test_incorrect_config_throws_error(self):
"""Test the init method of the Qdrant class throws error for incorrect config"""
with self.assertRaises(TypeError):
QdrantDB(config=PineconeDBConfig())
@patch("embedchain.vectordb.qdrant.QdrantClient")
def test_initialize(self, qdrant_client_mock):
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Qdrant instance
db = QdrantDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
self.assertEqual(db.collection_name, "embedchain-store-1536")
self.assertEqual(db.client, qdrant_client_mock.return_value)
qdrant_client_mock.return_value.get_collections.assert_called_once()
@patch("embedchain.vectordb.qdrant.QdrantClient")
def test_get(self, qdrant_client_mock):
qdrant_client_mock.return_value.scroll.return_value = ([], None)
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Qdrant instance
db = QdrantDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
resp = db.get(ids=[], where={})
self.assertEqual(resp, {"ids": [], "metadatas": []})
resp2 = db.get(ids=["123", "456"], where={"url": "https://ai.ai"})
self.assertEqual(resp2, {"ids": [], "metadatas": []})
@patch("embedchain.vectordb.qdrant.QdrantClient")
@patch.object(uuid, "uuid4", side_effect=TEST_UUIDS)
def test_add(self, uuid_mock, qdrant_client_mock):
qdrant_client_mock.return_value.scroll.return_value = ([], None)
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Qdrant instance
db = QdrantDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
documents = ["This is a test document.", "This is another test document."]
metadatas = [{}, {}]
ids = ["123", "456"]
db.add(documents, metadatas, ids)
qdrant_client_mock.return_value.upsert.assert_called_once_with(
collection_name="embedchain-store-1536",
points=Batch(
ids=["123", "456"],
payloads=[
{
"identifier": "123",
"text": "This is a test document.",
"metadata": {"text": "This is a test document."},
},
{
"identifier": "456",
"text": "This is another test document.",
"metadata": {"text": "This is another test document."},
},
],
vectors=[[1, 2, 3], [4, 5, 6]],
),
)
@patch("embedchain.vectordb.qdrant.QdrantClient")
def test_query(self, qdrant_client_mock):
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Qdrant instance
db = QdrantDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
# Query for the document.
db.query(input_query="This is a test document.", n_results=1, where={"doc_id": "123"})
qdrant_client_mock.return_value.search.assert_called_once_with(
collection_name="embedchain-store-1536",
query_filter=models.Filter(
must=[
models.FieldCondition(
key="metadata.doc_id",
match=models.MatchValue(
value="123",
),
)
]
),
query_vector=[1, 2, 3],
limit=1,
)
@patch("embedchain.vectordb.qdrant.QdrantClient")
def test_count(self, qdrant_client_mock):
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Qdrant instance
db = QdrantDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
db.count()
qdrant_client_mock.return_value.get_collection.assert_called_once_with(collection_name="embedchain-store-1536")
@patch("embedchain.vectordb.qdrant.QdrantClient")
def test_reset(self, qdrant_client_mock):
# Set the embedder
embedder = BaseEmbedder()
embedder.set_vector_dimension(1536)
embedder.set_embedding_fn(mock_embedding_fn)
# Create a Qdrant instance
db = QdrantDB()
app_config = AppConfig(collect_metrics=False)
App(config=app_config, db=db, embedding_model=embedder)
db.reset()
qdrant_client_mock.return_value.delete_collection.assert_called_once_with(
collection_name="embedchain-store-1536"
)
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/vectordb/test_zilliz_db.py | embedchain/tests/vectordb/test_zilliz_db.py | # ruff: noqa: E501
import os
from unittest import mock
from unittest.mock import Mock, patch
import pytest
from embedchain.config import ZillizDBConfig
from embedchain.vectordb.zilliz import ZillizVectorDB
# to run tests, provide the URI and TOKEN in .env file
class TestZillizVectorDBConfig:
@mock.patch.dict(os.environ, {"ZILLIZ_CLOUD_URI": "mocked_uri", "ZILLIZ_CLOUD_TOKEN": "mocked_token"})
def test_init_with_uri_and_token(self):
"""
Test if the `ZillizVectorDBConfig` instance is initialized with the correct uri and token values.
"""
# Create a ZillizDBConfig instance with mocked values
expected_uri = "mocked_uri"
expected_token = "mocked_token"
db_config = ZillizDBConfig()
# Assert that the values in the ZillizVectorDB instance match the mocked values
assert db_config.uri == expected_uri
assert db_config.token == expected_token
@mock.patch.dict(os.environ, {"ZILLIZ_CLOUD_URI": "mocked_uri", "ZILLIZ_CLOUD_TOKEN": "mocked_token"})
def test_init_without_uri(self):
"""
Test if the `ZillizVectorDBConfig` instance throws an error when no URI found.
"""
try:
del os.environ["ZILLIZ_CLOUD_URI"]
except KeyError:
pass
with pytest.raises(AttributeError):
ZillizDBConfig()
@mock.patch.dict(os.environ, {"ZILLIZ_CLOUD_URI": "mocked_uri", "ZILLIZ_CLOUD_TOKEN": "mocked_token"})
def test_init_without_token(self):
"""
Test if the `ZillizVectorDBConfig` instance throws an error when no Token found.
"""
try:
del os.environ["ZILLIZ_CLOUD_TOKEN"]
except KeyError:
pass
# Test if an exception is raised when ZILLIZ_CLOUD_TOKEN is missing
with pytest.raises(AttributeError):
ZillizDBConfig()
class TestZillizVectorDB:
@pytest.fixture
@mock.patch.dict(os.environ, {"ZILLIZ_CLOUD_URI": "mocked_uri", "ZILLIZ_CLOUD_TOKEN": "mocked_token"})
def mock_config(self, mocker):
return mocker.Mock(spec=ZillizDBConfig())
@patch("embedchain.vectordb.zilliz.MilvusClient", autospec=True)
@patch("embedchain.vectordb.zilliz.connections.connect", autospec=True)
def test_zilliz_vector_db_setup(self, mock_connect, mock_client, mock_config):
"""
Test if the `ZillizVectorDB` instance is initialized with the correct uri and token values.
"""
# Create an instance of ZillizVectorDB with the mock config
# zilliz_db = ZillizVectorDB(config=mock_config)
ZillizVectorDB(config=mock_config)
# Assert that the MilvusClient and connections.connect were called
mock_client.assert_called_once_with(uri=mock_config.uri, token=mock_config.token)
mock_connect.assert_called_once_with(uri=mock_config.uri, token=mock_config.token)
class TestZillizDBCollection:
@pytest.fixture
@mock.patch.dict(os.environ, {"ZILLIZ_CLOUD_URI": "mocked_uri", "ZILLIZ_CLOUD_TOKEN": "mocked_token"})
def mock_config(self, mocker):
return mocker.Mock(spec=ZillizDBConfig())
@pytest.fixture
def mock_embedder(self, mocker):
return mocker.Mock()
@mock.patch.dict(os.environ, {"ZILLIZ_CLOUD_URI": "mocked_uri", "ZILLIZ_CLOUD_TOKEN": "mocked_token"})
def test_init_with_default_collection(self):
"""
Test if the `ZillizVectorDB` instance is initialized with the correct default collection name.
"""
# Create a ZillizDBConfig instance
db_config = ZillizDBConfig()
assert db_config.collection_name == "embedchain_store"
@mock.patch.dict(os.environ, {"ZILLIZ_CLOUD_URI": "mocked_uri", "ZILLIZ_CLOUD_TOKEN": "mocked_token"})
def test_init_with_custom_collection(self):
"""
Test if the `ZillizVectorDB` instance is initialized with the correct custom collection name.
"""
# Create a ZillizDBConfig instance with mocked values
expected_collection = "test_collection"
db_config = ZillizDBConfig(collection_name="test_collection")
assert db_config.collection_name == expected_collection
@patch("embedchain.vectordb.zilliz.MilvusClient", autospec=True)
@patch("embedchain.vectordb.zilliz.connections", autospec=True)
def test_query(self, mock_connect, mock_client, mock_embedder, mock_config):
# Create an instance of ZillizVectorDB with mock config
zilliz_db = ZillizVectorDB(config=mock_config)
# Add a 'embedder' attribute to the ZillizVectorDB instance for testing
zilliz_db.embedder = mock_embedder # Mock the 'collection' object
# Add a 'collection' attribute to the ZillizVectorDB instance for testing
zilliz_db.collection = Mock(is_empty=False) # Mock the 'collection' object
assert zilliz_db.client == mock_client()
# Mock the MilvusClient search method
with patch.object(zilliz_db.client, "search") as mock_search:
# Mock the embedding function
mock_embedder.embedding_fn.return_value = ["query_vector"]
# Mock the search result
mock_search.return_value = [
[
{
"distance": 0.0,
"entity": {
"text": "result_doc",
"embeddings": [1, 2, 3],
"metadata": {"url": "url_1", "doc_id": "doc_id_1"},
},
}
]
]
query_result = zilliz_db.query(input_query="query_text", n_results=1, where={})
# Assert that MilvusClient.search was called with the correct parameters
mock_search.assert_called_with(
collection_name=mock_config.collection_name,
data=["query_vector"],
filter="",
limit=1,
output_fields=["*"],
)
# Assert that the query result matches the expected result
assert query_result == ["result_doc"]
query_result_with_citations = zilliz_db.query(
input_query="query_text", n_results=1, where={}, citations=True
)
mock_search.assert_called_with(
collection_name=mock_config.collection_name,
data=["query_vector"],
filter="",
limit=1,
output_fields=["*"],
)
assert query_result_with_citations == [("result_doc", {"url": "url_1", "doc_id": "doc_id_1", "score": 0.0})]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/vectordb/test_lancedb.py | embedchain/tests/vectordb/test_lancedb.py | import os
import shutil
import pytest
from embedchain import App
from embedchain.config import AppConfig
from embedchain.config.vector_db.lancedb import LanceDBConfig
from embedchain.vectordb.lancedb import LanceDB
os.environ["OPENAI_API_KEY"] = "test-api-key"
@pytest.fixture
def lancedb():
return LanceDB(config=LanceDBConfig(dir="test-db", collection_name="test-coll"))
@pytest.fixture
def app_with_settings():
lancedb_config = LanceDBConfig(allow_reset=True, dir="test-db-reset")
lancedb = LanceDB(config=lancedb_config)
app_config = AppConfig(collect_metrics=False)
return App(config=app_config, db=lancedb)
@pytest.fixture(scope="session", autouse=True)
def cleanup_db():
yield
try:
shutil.rmtree("test-db.lance")
shutil.rmtree("test-db-reset.lance")
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
def test_lancedb_duplicates_throw_warning(caplog):
db = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.db.add(ids=["0"], documents=["doc1"], metadatas=["test"])
app.db.add(ids=["0"], documents=["doc1"], metadatas=["test"])
assert "Insert of existing doc ID: 0" not in caplog.text
assert "Add of existing doc ID: 0" not in caplog.text
app.db.reset()
def test_lancedb_duplicates_collections_no_warning(caplog):
db = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name("test_collection_1")
app.db.add(ids=["0"], documents=["doc1"], metadatas=["test"])
app.set_collection_name("test_collection_2")
app.db.add(ids=["0"], documents=["doc1"], metadatas=["test"])
assert "Insert of existing doc ID: 0" not in caplog.text
assert "Add of existing doc ID: 0" not in caplog.text
app.db.reset()
app.set_collection_name("test_collection_1")
app.db.reset()
def test_lancedb_collection_init_with_default_collection():
db = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
assert app.db.collection.name == "embedchain_store"
def test_lancedb_collection_init_with_custom_collection():
db = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name(name="test_collection")
assert app.db.collection.name == "test_collection"
def test_lancedb_collection_set_collection_name():
db = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name("test_collection")
assert app.db.collection.name == "test_collection"
def test_lancedb_collection_changes_encapsulated():
db = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name("test_collection_1")
assert app.db.count() == 0
app.db.add(ids=["0"], documents=["doc1"], metadatas=["test"])
assert app.db.count() == 1
app.set_collection_name("test_collection_2")
assert app.db.count() == 0
app.db.add(ids=["0"], documents=["doc1"], metadatas=["test"])
app.set_collection_name("test_collection_1")
assert app.db.count() == 1
app.db.reset()
app.set_collection_name("test_collection_2")
app.db.reset()
def test_lancedb_collection_collections_are_persistent():
db = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name("test_collection_1")
app.db.add(ids=["0"], documents=["doc1"], metadatas=["test"])
del app
db = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app = App(config=AppConfig(collect_metrics=False), db=db)
app.set_collection_name("test_collection_1")
assert app.db.count() == 1
app.db.reset()
def test_lancedb_collection_parallel_collections():
db1 = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db", collection_name="test_collection_1"))
app1 = App(
config=AppConfig(collect_metrics=False),
db=db1,
)
db2 = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db", collection_name="test_collection_2"))
app2 = App(
config=AppConfig(collect_metrics=False),
db=db2,
)
# cleanup if any previous tests failed or were interrupted
app1.db.reset()
app2.db.reset()
app1.db.add(ids=["0"], documents=["doc1"], metadatas=["test"])
assert app1.db.count() == 1
assert app2.db.count() == 0
app1.db.add(ids=["1", "2"], documents=["doc1", "doc2"], metadatas=["test", "test"])
app2.db.add(ids=["0"], documents=["doc1"], metadatas=["test"])
app1.set_collection_name("test_collection_2")
assert app1.db.count() == 1
app2.set_collection_name("test_collection_1")
assert app2.db.count() == 3
# cleanup
app1.db.reset()
app2.db.reset()
def test_lancedb_collection_ids_share_collections():
db1 = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app1 = App(config=AppConfig(collect_metrics=False), db=db1)
app1.set_collection_name("one_collection")
db2 = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app2 = App(config=AppConfig(collect_metrics=False), db=db2)
app2.set_collection_name("one_collection")
# cleanup
app1.db.reset()
app2.db.reset()
app1.db.add(ids=["0", "1"], documents=["doc1", "doc2"], metadatas=["test", "test"])
app2.db.add(ids=["2"], documents=["doc3"], metadatas=["test"])
assert app1.db.count() == 2
assert app2.db.count() == 3
# cleanup
app1.db.reset()
app2.db.reset()
def test_lancedb_collection_reset():
db1 = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app1 = App(config=AppConfig(collect_metrics=False), db=db1)
app1.set_collection_name("one_collection")
db2 = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app2 = App(config=AppConfig(collect_metrics=False), db=db2)
app2.set_collection_name("two_collection")
db3 = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app3 = App(config=AppConfig(collect_metrics=False), db=db3)
app3.set_collection_name("three_collection")
db4 = LanceDB(config=LanceDBConfig(allow_reset=True, dir="test-db"))
app4 = App(config=AppConfig(collect_metrics=False), db=db4)
app4.set_collection_name("four_collection")
# cleanup if any previous tests failed or were interrupted
app1.db.reset()
app2.db.reset()
app3.db.reset()
app4.db.reset()
app1.db.add(ids=["1"], documents=["doc1"], metadatas=["test"])
app2.db.add(ids=["2"], documents=["doc2"], metadatas=["test"])
app3.db.add(ids=["3"], documents=["doc3"], metadatas=["test"])
app4.db.add(ids=["4"], documents=["doc4"], metadatas=["test"])
app1.db.reset()
assert app1.db.count() == 0
assert app2.db.count() == 1
assert app3.db.count() == 1
assert app4.db.count() == 1
# cleanup
app2.db.reset()
app3.db.reset()
app4.db.reset()
def generate_embeddings(dummy_embed, embed_size):
generated_embedding = []
for i in range(embed_size):
generated_embedding.append(dummy_embed)
return generated_embedding
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/chunkers/test_base_chunker.py | embedchain/tests/chunkers/test_base_chunker.py | import hashlib
from unittest.mock import MagicMock
import pytest
from embedchain.chunkers.base_chunker import BaseChunker
from embedchain.config.add_config import ChunkerConfig
from embedchain.models.data_type import DataType
@pytest.fixture
def text_splitter_mock():
return MagicMock()
@pytest.fixture
def loader_mock():
return MagicMock()
@pytest.fixture
def app_id():
return "test_app"
@pytest.fixture
def data_type():
return DataType.TEXT
@pytest.fixture
def chunker(text_splitter_mock, data_type):
text_splitter = text_splitter_mock
chunker = BaseChunker(text_splitter)
chunker.set_data_type(data_type)
return chunker
def test_create_chunks_with_config(chunker, text_splitter_mock, loader_mock, app_id, data_type):
text_splitter_mock.split_text.return_value = ["Chunk 1", "long chunk"]
loader_mock.load_data.return_value = {
"data": [{"content": "Content 1", "meta_data": {"url": "URL 1"}}],
"doc_id": "DocID",
}
config = ChunkerConfig(chunk_size=50, chunk_overlap=0, length_function=len, min_chunk_size=10)
result = chunker.create_chunks(loader_mock, "test_src", app_id, config)
assert result["documents"] == ["long chunk"]
def test_create_chunks(chunker, text_splitter_mock, loader_mock, app_id, data_type):
text_splitter_mock.split_text.return_value = ["Chunk 1", "Chunk 2"]
loader_mock.load_data.return_value = {
"data": [{"content": "Content 1", "meta_data": {"url": "URL 1"}}],
"doc_id": "DocID",
}
result = chunker.create_chunks(loader_mock, "test_src", app_id)
expected_ids = [
f"{app_id}--" + hashlib.sha256(("Chunk 1" + "URL 1").encode()).hexdigest(),
f"{app_id}--" + hashlib.sha256(("Chunk 2" + "URL 1").encode()).hexdigest(),
]
assert result["documents"] == ["Chunk 1", "Chunk 2"]
assert result["ids"] == expected_ids
assert result["metadatas"] == [
{
"url": "URL 1",
"data_type": data_type.value,
"doc_id": f"{app_id}--DocID",
},
{
"url": "URL 1",
"data_type": data_type.value,
"doc_id": f"{app_id}--DocID",
},
]
assert result["doc_id"] == f"{app_id}--DocID"
def test_get_chunks(chunker, text_splitter_mock):
text_splitter_mock.split_text.return_value = ["Chunk 1", "Chunk 2"]
content = "This is a test content."
result = chunker.get_chunks(content)
assert len(result) == 2
assert result == ["Chunk 1", "Chunk 2"]
def test_set_data_type(chunker):
chunker.set_data_type(DataType.MDX)
assert chunker.data_type == DataType.MDX
def test_get_word_count(chunker):
documents = ["This is a test.", "Another test."]
result = chunker.get_word_count(documents)
assert result == 6
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/chunkers/test_chunkers.py | embedchain/tests/chunkers/test_chunkers.py | from embedchain.chunkers.audio import AudioChunker
from embedchain.chunkers.common_chunker import CommonChunker
from embedchain.chunkers.discourse import DiscourseChunker
from embedchain.chunkers.docs_site import DocsSiteChunker
from embedchain.chunkers.docx_file import DocxFileChunker
from embedchain.chunkers.excel_file import ExcelFileChunker
from embedchain.chunkers.gmail import GmailChunker
from embedchain.chunkers.google_drive import GoogleDriveChunker
from embedchain.chunkers.json import JSONChunker
from embedchain.chunkers.mdx import MdxChunker
from embedchain.chunkers.notion import NotionChunker
from embedchain.chunkers.openapi import OpenAPIChunker
from embedchain.chunkers.pdf_file import PdfFileChunker
from embedchain.chunkers.postgres import PostgresChunker
from embedchain.chunkers.qna_pair import QnaPairChunker
from embedchain.chunkers.sitemap import SitemapChunker
from embedchain.chunkers.slack import SlackChunker
from embedchain.chunkers.table import TableChunker
from embedchain.chunkers.text import TextChunker
from embedchain.chunkers.web_page import WebPageChunker
from embedchain.chunkers.xml import XmlChunker
from embedchain.chunkers.youtube_video import YoutubeVideoChunker
from embedchain.config.add_config import ChunkerConfig
chunker_config = ChunkerConfig(chunk_size=500, chunk_overlap=0, length_function=len)
chunker_common_config = {
DocsSiteChunker: {"chunk_size": 500, "chunk_overlap": 50, "length_function": len},
DocxFileChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
PdfFileChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
TextChunker: {"chunk_size": 300, "chunk_overlap": 0, "length_function": len},
MdxChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
NotionChunker: {"chunk_size": 300, "chunk_overlap": 0, "length_function": len},
QnaPairChunker: {"chunk_size": 300, "chunk_overlap": 0, "length_function": len},
TableChunker: {"chunk_size": 300, "chunk_overlap": 0, "length_function": len},
SitemapChunker: {"chunk_size": 500, "chunk_overlap": 0, "length_function": len},
WebPageChunker: {"chunk_size": 2000, "chunk_overlap": 0, "length_function": len},
XmlChunker: {"chunk_size": 500, "chunk_overlap": 50, "length_function": len},
YoutubeVideoChunker: {"chunk_size": 2000, "chunk_overlap": 0, "length_function": len},
JSONChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
OpenAPIChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
GmailChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
PostgresChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
SlackChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
DiscourseChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
CommonChunker: {"chunk_size": 2000, "chunk_overlap": 0, "length_function": len},
GoogleDriveChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
ExcelFileChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
AudioChunker: {"chunk_size": 1000, "chunk_overlap": 0, "length_function": len},
}
def test_default_config_values():
for chunker_class, config in chunker_common_config.items():
chunker = chunker_class()
assert chunker.text_splitter._chunk_size == config["chunk_size"]
assert chunker.text_splitter._chunk_overlap == config["chunk_overlap"]
assert chunker.text_splitter._length_function == config["length_function"]
def test_custom_config_values():
for chunker_class, _ in chunker_common_config.items():
chunker = chunker_class(config=chunker_config)
assert chunker.text_splitter._chunk_size == 500
assert chunker.text_splitter._chunk_overlap == 0
assert chunker.text_splitter._length_function == len
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/chunkers/test_text.py | embedchain/tests/chunkers/test_text.py | # ruff: noqa: E501
from embedchain.chunkers.text import TextChunker
from embedchain.config import ChunkerConfig
from embedchain.models.data_type import DataType
class TestTextChunker:
def test_chunks_without_app_id(self):
"""
Test the chunks generated by TextChunker.
"""
chunker_config = ChunkerConfig(chunk_size=10, chunk_overlap=0, length_function=len, min_chunk_size=0)
chunker = TextChunker(config=chunker_config)
text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit."
# Data type must be set manually in the test
chunker.set_data_type(DataType.TEXT)
result = chunker.create_chunks(MockLoader(), text, chunker_config)
documents = result["documents"]
assert len(documents) > 5
def test_chunks_with_app_id(self):
"""
Test the chunks generated by TextChunker with app_id
"""
chunker_config = ChunkerConfig(chunk_size=10, chunk_overlap=0, length_function=len, min_chunk_size=0)
chunker = TextChunker(config=chunker_config)
text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit."
chunker.set_data_type(DataType.TEXT)
result = chunker.create_chunks(MockLoader(), text, chunker_config)
documents = result["documents"]
assert len(documents) > 5
def test_big_chunksize(self):
"""
Test that if an infinitely high chunk size is used, only one chunk is returned.
"""
chunker_config = ChunkerConfig(chunk_size=9999999999, chunk_overlap=0, length_function=len, min_chunk_size=0)
chunker = TextChunker(config=chunker_config)
text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit."
# Data type must be set manually in the test
chunker.set_data_type(DataType.TEXT)
result = chunker.create_chunks(MockLoader(), text, chunker_config)
documents = result["documents"]
assert len(documents) == 1
def test_small_chunksize(self):
"""
Test that if a chunk size of one is used, every character is a chunk.
"""
chunker_config = ChunkerConfig(chunk_size=1, chunk_overlap=0, length_function=len, min_chunk_size=0)
chunker = TextChunker(config=chunker_config)
# We can't test with lorem ipsum because chunks are deduped, so would be recurring characters.
text = """0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c"""
# Data type must be set manually in the test
chunker.set_data_type(DataType.TEXT)
result = chunker.create_chunks(MockLoader(), text, chunker_config)
documents = result["documents"]
assert len(documents) == len(text)
def test_word_count(self):
chunker_config = ChunkerConfig(chunk_size=1, chunk_overlap=0, length_function=len, min_chunk_size=0)
chunker = TextChunker(config=chunker_config)
chunker.set_data_type(DataType.TEXT)
document = ["ab cd", "ef gh"]
result = chunker.get_word_count(document)
assert result == 4
class MockLoader:
@staticmethod
def load_data(src) -> dict:
"""
Mock loader that returns a list of data dictionaries.
Adjust this method to return different data for testing.
"""
return {
"doc_id": "123",
"data": [
{
"content": src,
"meta_data": {"url": "none"},
}
],
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/embedchain/test_add.py | embedchain/tests/embedchain/test_add.py | import os
import pytest
from embedchain import App
from embedchain.config import AddConfig, AppConfig, ChunkerConfig
from embedchain.models.data_type import DataType
os.environ["OPENAI_API_KEY"] = "test_key"
@pytest.fixture
def app(mocker):
mocker.patch("chromadb.api.models.Collection.Collection.add")
return App(config=AppConfig(collect_metrics=False))
def test_add(app):
app.add("https://example.com", metadata={"foo": "bar"})
assert app.user_asks == [["https://example.com", "web_page", {"foo": "bar"}]]
# TODO: Make this test faster by generating a sitemap locally rather than using a remote one
# def test_add_sitemap(app):
# app.add("https://www.google.com/sitemap.xml", metadata={"foo": "bar"})
# assert app.user_asks == [["https://www.google.com/sitemap.xml", "sitemap", {"foo": "bar"}]]
def test_add_forced_type(app):
data_type = "text"
app.add("https://example.com", data_type=data_type, metadata={"foo": "bar"})
assert app.user_asks == [["https://example.com", data_type, {"foo": "bar"}]]
def test_dry_run(app):
chunker_config = ChunkerConfig(chunk_size=1, chunk_overlap=0, min_chunk_size=0)
text = """0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"""
result = app.add(source=text, config=AddConfig(chunker=chunker_config), dry_run=True)
chunks = result["chunks"]
metadata = result["metadata"]
count = result["count"]
data_type = result["type"]
assert len(chunks) == len(text)
assert count == len(text)
assert data_type == DataType.TEXT
for item in metadata:
assert isinstance(item, dict)
assert "local" in item["url"]
assert "text" in item["data_type"]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/embedchain/test_utils.py | embedchain/tests/embedchain/test_utils.py | import tempfile
import unittest
from unittest.mock import patch
from embedchain.models.data_type import DataType
from embedchain.utils.misc import detect_datatype
class TestApp(unittest.TestCase):
"""Test that the datatype detection is working, based on the input."""
def test_detect_datatype_youtube(self):
self.assertEqual(detect_datatype("https://www.youtube.com/watch?v=dQw4w9WgXcQ"), DataType.YOUTUBE_VIDEO)
self.assertEqual(detect_datatype("https://m.youtube.com/watch?v=dQw4w9WgXcQ"), DataType.YOUTUBE_VIDEO)
self.assertEqual(
detect_datatype("https://www.youtube-nocookie.com/watch?v=dQw4w9WgXcQ"), DataType.YOUTUBE_VIDEO
)
self.assertEqual(detect_datatype("https://vid.plus/watch?v=dQw4w9WgXcQ"), DataType.YOUTUBE_VIDEO)
self.assertEqual(detect_datatype("https://youtu.be/dQw4w9WgXcQ"), DataType.YOUTUBE_VIDEO)
def test_detect_datatype_local_file(self):
self.assertEqual(detect_datatype("file:///home/user/file.txt"), DataType.WEB_PAGE)
def test_detect_datatype_pdf(self):
self.assertEqual(detect_datatype("https://www.example.com/document.pdf"), DataType.PDF_FILE)
def test_detect_datatype_local_pdf(self):
self.assertEqual(detect_datatype("file:///home/user/document.pdf"), DataType.PDF_FILE)
def test_detect_datatype_xml(self):
self.assertEqual(detect_datatype("https://www.example.com/sitemap.xml"), DataType.SITEMAP)
def test_detect_datatype_local_xml(self):
self.assertEqual(detect_datatype("file:///home/user/sitemap.xml"), DataType.SITEMAP)
def test_detect_datatype_docx(self):
self.assertEqual(detect_datatype("https://www.example.com/document.docx"), DataType.DOCX)
def test_detect_datatype_local_docx(self):
self.assertEqual(detect_datatype("file:///home/user/document.docx"), DataType.DOCX)
def test_detect_data_type_json(self):
self.assertEqual(detect_datatype("https://www.example.com/data.json"), DataType.JSON)
def test_detect_data_type_local_json(self):
self.assertEqual(detect_datatype("file:///home/user/data.json"), DataType.JSON)
@patch("os.path.isfile")
def test_detect_datatype_regular_filesystem_docx(self, mock_isfile):
with tempfile.NamedTemporaryFile(suffix=".docx", delete=True) as tmp:
mock_isfile.return_value = True
self.assertEqual(detect_datatype(tmp.name), DataType.DOCX)
def test_detect_datatype_docs_site(self):
self.assertEqual(detect_datatype("https://docs.example.com"), DataType.DOCS_SITE)
def test_detect_datatype_docs_sitein_path(self):
self.assertEqual(detect_datatype("https://www.example.com/docs/index.html"), DataType.DOCS_SITE)
self.assertNotEqual(detect_datatype("file:///var/www/docs/index.html"), DataType.DOCS_SITE) # NOT equal
def test_detect_datatype_web_page(self):
self.assertEqual(detect_datatype("https://nav.al/agi"), DataType.WEB_PAGE)
def test_detect_datatype_invalid_url(self):
self.assertEqual(detect_datatype("not a url"), DataType.TEXT)
def test_detect_datatype_qna_pair(self):
self.assertEqual(
detect_datatype(("Question?", "Answer. Content of the string is irrelevant.")), DataType.QNA_PAIR
) #
def test_detect_datatype_qna_pair_types(self):
"""Test that a QnA pair needs to be a tuple of length two, and both items have to be strings."""
with self.assertRaises(TypeError):
self.assertNotEqual(
detect_datatype(("How many planets are in our solar system?", 8)), DataType.QNA_PAIR
) # NOT equal
def test_detect_datatype_text(self):
self.assertEqual(detect_datatype("Just some text."), DataType.TEXT)
def test_detect_datatype_non_string_error(self):
"""Test type error if the value passed is not a string, and not a valid non-string data_type"""
with self.assertRaises(TypeError):
detect_datatype(["foo", "bar"])
@patch("os.path.isfile")
def test_detect_datatype_regular_filesystem_file_txt(self, mock_isfile):
with tempfile.NamedTemporaryFile(suffix=".txt", delete=True) as tmp:
mock_isfile.return_value = True
self.assertEqual(detect_datatype(tmp.name), DataType.TEXT_FILE)
def test_detect_datatype_regular_filesystem_no_file(self):
"""Test that if a filepath is not actually an existing file, it is not handled as a file path."""
self.assertEqual(detect_datatype("/var/not-an-existing-file.txt"), DataType.TEXT)
def test_doc_examples_quickstart(self):
"""Test examples used in the documentation."""
self.assertEqual(detect_datatype("https://en.wikipedia.org/wiki/Elon_Musk"), DataType.WEB_PAGE)
self.assertEqual(detect_datatype("https://www.tesla.com/elon-musk"), DataType.WEB_PAGE)
def test_doc_examples_introduction(self):
"""Test examples used in the documentation."""
self.assertEqual(detect_datatype("https://www.youtube.com/watch?v=3qHkcs3kG44"), DataType.YOUTUBE_VIDEO)
self.assertEqual(
detect_datatype(
"https://navalmanack.s3.amazonaws.com/Eric-Jorgenson_The-Almanack-of-Naval-Ravikant_Final.pdf"
),
DataType.PDF_FILE,
)
self.assertEqual(detect_datatype("https://nav.al/feedback"), DataType.WEB_PAGE)
def test_doc_examples_app_types(self):
"""Test examples used in the documentation."""
self.assertEqual(detect_datatype("https://www.youtube.com/watch?v=Ff4fRgnuFgQ"), DataType.YOUTUBE_VIDEO)
self.assertEqual(detect_datatype("https://en.wikipedia.org/wiki/Mark_Zuckerberg"), DataType.WEB_PAGE)
def test_doc_examples_configuration(self):
"""Test examples used in the documentation."""
import subprocess
import sys
subprocess.check_call([sys.executable, "-m", "pip", "install", "wikipedia"])
import wikipedia
page = wikipedia.page("Albert Einstein")
# TODO: Add a wikipedia type, so wikipedia is a dependency and we don't need this slow test.
# (timings: import: 1.4s, fetch wiki: 0.7s)
self.assertEqual(detect_datatype(page.content), DataType.TEXT)
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/embedchain/test_embedchain.py | embedchain/tests/embedchain/test_embedchain.py | import os
import pytest
from chromadb.api.models.Collection import Collection
from embedchain import App
from embedchain.config import AppConfig, ChromaDbConfig
from embedchain.embedchain import EmbedChain
from embedchain.llm.base import BaseLlm
from embedchain.memory.base import ChatHistory
from embedchain.vectordb.chroma import ChromaDB
os.environ["OPENAI_API_KEY"] = "test-api-key"
@pytest.fixture
def app_instance():
config = AppConfig(log_level="DEBUG", collect_metrics=False)
return App(config=config)
def test_whole_app(app_instance, mocker):
knowledge = "lorem ipsum dolor sit amet, consectetur adipiscing"
mocker.patch.object(EmbedChain, "add")
mocker.patch.object(EmbedChain, "_retrieve_from_database")
mocker.patch.object(BaseLlm, "get_answer_from_llm", return_value=knowledge)
mocker.patch.object(BaseLlm, "get_llm_model_answer", return_value=knowledge)
mocker.patch.object(BaseLlm, "generate_prompt")
mocker.patch.object(BaseLlm, "add_history")
mocker.patch.object(ChatHistory, "delete", autospec=True)
app_instance.add(knowledge, data_type="text")
app_instance.query("What text did I give you?")
app_instance.chat("What text did I give you?")
assert BaseLlm.generate_prompt.call_count == 2
app_instance.reset()
def test_add_after_reset(app_instance, mocker):
mocker.patch("embedchain.vectordb.chroma.chromadb.Client")
config = AppConfig(log_level="DEBUG", collect_metrics=False)
chroma_config = ChromaDbConfig(allow_reset=True)
db = ChromaDB(config=chroma_config)
app_instance = App(config=config, db=db)
# mock delete chat history
mocker.patch.object(ChatHistory, "delete", autospec=True)
app_instance.reset()
app_instance.db.client.heartbeat()
mocker.patch.object(Collection, "add")
app_instance.db.collection.add(
embeddings=[[1.1, 2.3, 3.2], [4.5, 6.9, 4.4], [1.1, 2.3, 3.2]],
metadatas=[
{"chapter": "3", "verse": "16"},
{"chapter": "3", "verse": "5"},
{"chapter": "29", "verse": "11"},
],
ids=["id1", "id2", "id3"],
)
app_instance.reset()
def test_add_with_incorrect_content(app_instance, mocker):
content = [{"foo": "bar"}]
with pytest.raises(TypeError):
app_instance.add(content, data_type="json")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/models/test_data_type.py | embedchain/tests/models/test_data_type.py | from embedchain.models.data_type import (
DataType,
DirectDataType,
IndirectDataType,
SpecialDataType,
)
def test_subclass_types_in_data_type():
"""Test that all data type category subclasses are contained in the composite data type"""
# Check if DirectDataType values are in DataType
for data_type in DirectDataType:
assert data_type.value in DataType._value2member_map_
# Check if IndirectDataType values are in DataType
for data_type in IndirectDataType:
assert data_type.value in DataType._value2member_map_
# Check if SpecialDataType values are in DataType
for data_type in SpecialDataType:
assert data_type.value in DataType._value2member_map_
def test_data_type_in_subclasses():
"""Test that all data types in the composite data type are categorized in a subclass"""
for data_type in DataType:
if data_type.value in DirectDataType._value2member_map_:
assert data_type.value in DirectDataType._value2member_map_
elif data_type.value in IndirectDataType._value2member_map_:
assert data_type.value in IndirectDataType._value2member_map_
elif data_type.value in SpecialDataType._value2member_map_:
assert data_type.value in SpecialDataType._value2member_map_
else:
assert False, f"{data_type.value} not found in any subclass enums"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/memory/test_chat_memory.py | embedchain/tests/memory/test_chat_memory.py | import pytest
from embedchain.memory.base import ChatHistory
from embedchain.memory.message import ChatMessage
# Fixture for creating an instance of ChatHistory
@pytest.fixture
def chat_memory_instance():
return ChatHistory()
def test_add_chat_memory(chat_memory_instance):
app_id = "test_app"
session_id = "test_session"
human_message = "Hello, how are you?"
ai_message = "I'm fine, thank you!"
chat_message = ChatMessage()
chat_message.add_user_message(human_message)
chat_message.add_ai_message(ai_message)
chat_memory_instance.add(app_id, session_id, chat_message)
assert chat_memory_instance.count(app_id, session_id) == 1
chat_memory_instance.delete(app_id, session_id)
def test_get(chat_memory_instance):
app_id = "test_app"
session_id = "test_session"
for i in range(1, 7):
human_message = f"Question {i}"
ai_message = f"Answer {i}"
chat_message = ChatMessage()
chat_message.add_user_message(human_message)
chat_message.add_ai_message(ai_message)
chat_memory_instance.add(app_id, session_id, chat_message)
recent_memories = chat_memory_instance.get(app_id, session_id, num_rounds=5)
assert len(recent_memories) == 5
all_memories = chat_memory_instance.get(app_id, fetch_all=True)
assert len(all_memories) == 6
def test_delete_chat_history(chat_memory_instance):
app_id = "test_app"
session_id = "test_session"
for i in range(1, 6):
human_message = f"Question {i}"
ai_message = f"Answer {i}"
chat_message = ChatMessage()
chat_message.add_user_message(human_message)
chat_message.add_ai_message(ai_message)
chat_memory_instance.add(app_id, session_id, chat_message)
session_id_2 = "test_session_2"
for i in range(1, 6):
human_message = f"Question {i}"
ai_message = f"Answer {i}"
chat_message = ChatMessage()
chat_message.add_user_message(human_message)
chat_message.add_ai_message(ai_message)
chat_memory_instance.add(app_id, session_id_2, chat_message)
chat_memory_instance.delete(app_id, session_id)
assert chat_memory_instance.count(app_id, session_id) == 0
assert chat_memory_instance.count(app_id) == 5
chat_memory_instance.delete(app_id)
assert chat_memory_instance.count(app_id) == 0
@pytest.fixture
def close_connection(chat_memory_instance):
yield
chat_memory_instance.close_connection()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/memory/test_memory_messages.py | embedchain/tests/memory/test_memory_messages.py | from embedchain.memory.message import BaseMessage, ChatMessage
def test_ec_base_message():
content = "Hello, how are you?"
created_by = "human"
metadata = {"key": "value"}
message = BaseMessage(content=content, created_by=created_by, metadata=metadata)
assert message.content == content
assert message.created_by == created_by
assert message.metadata == metadata
assert message.type is None
assert message.is_lc_serializable() is True
assert str(message) == f"{created_by}: {content}"
def test_ec_base_chat_message():
human_message_content = "Hello, how are you?"
ai_message_content = "I'm fine, thank you!"
human_metadata = {"user": "John"}
ai_metadata = {"response_time": 0.5}
chat_message = ChatMessage()
chat_message.add_user_message(human_message_content, metadata=human_metadata)
chat_message.add_ai_message(ai_message_content, metadata=ai_metadata)
assert chat_message.human_message.content == human_message_content
assert chat_message.human_message.created_by == "human"
assert chat_message.human_message.metadata == human_metadata
assert chat_message.ai_message.content == ai_message_content
assert chat_message.ai_message.created_by == "ai"
assert chat_message.ai_message.metadata == ai_metadata
assert str(chat_message) == f"human: {human_message_content}\nai: {ai_message_content}"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/helper_classes/test_json_serializable.py | embedchain/tests/helper_classes/test_json_serializable.py | import random
import unittest
from string import Template
from embedchain import App
from embedchain.config import AppConfig, BaseLlmConfig
from embedchain.helpers.json_serializable import (
JSONSerializable,
register_deserializable,
)
class TestJsonSerializable(unittest.TestCase):
"""Test that the datatype detection is working, based on the input."""
def test_base_function(self):
"""Test that the base premise of serialization and deserealization is working"""
@register_deserializable
class TestClass(JSONSerializable):
def __init__(self):
self.rng = random.random()
original_class = TestClass()
serial = original_class.serialize()
# Negative test to show that a new class does not have the same random number.
negative_test_class = TestClass()
self.assertNotEqual(original_class.rng, negative_test_class.rng)
# Test to show that a deserialized class has the same random number.
positive_test_class: TestClass = TestClass().deserialize(serial)
self.assertEqual(original_class.rng, positive_test_class.rng)
self.assertTrue(isinstance(positive_test_class, TestClass))
# Test that it works as a static method too.
positive_test_class: TestClass = TestClass.deserialize(serial)
self.assertEqual(original_class.rng, positive_test_class.rng)
# TODO: There's no reason it shouldn't work, but serialization to and from file should be tested too.
def test_registration_required(self):
"""Test that registration is required, and that without registration the default class is returned."""
class SecondTestClass(JSONSerializable):
def __init__(self):
self.default = True
app = SecondTestClass()
# Make not default
app.default = False
# Serialize
serial = app.serialize()
# Deserialize. Due to the way errors are handled, it will not fail but return a default class.
app: SecondTestClass = SecondTestClass().deserialize(serial)
self.assertTrue(app.default)
# If we register and try again with the same serial, it should work
SecondTestClass._register_class_as_deserializable(SecondTestClass)
app: SecondTestClass = SecondTestClass().deserialize(serial)
self.assertFalse(app.default)
def test_recursive(self):
"""Test recursiveness with the real app"""
random_id = str(random.random())
config = AppConfig(id=random_id, collect_metrics=False)
# config class is set under app.config.
app = App(config=config)
s = app.serialize()
new_app: App = App.deserialize(s)
# The id of the new app is the same as the first one.
self.assertEqual(random_id, new_app.config.id)
# We have proven that a nested class (app.config) can be serialized and deserialized just the same.
# TODO: test deeper recursion
def test_special_subclasses(self):
"""Test special subclasses that are not serializable by default."""
# Template
config = BaseLlmConfig(template=Template("My custom template with $query, $context and $history."))
s = config.serialize()
new_config: BaseLlmConfig = BaseLlmConfig.deserialize(s)
self.assertEqual(config.prompt.template, new_config.prompt.template)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_openai.py | embedchain/tests/llm/test_openai.py | import os
import httpx
import pytest
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from embedchain.config import BaseLlmConfig
from embedchain.llm.openai import OpenAILlm
@pytest.fixture()
def env_config():
os.environ["OPENAI_API_KEY"] = "test_api_key"
os.environ["OPENAI_API_BASE"] = "https://api.openai.com/v1/engines/"
yield
os.environ.pop("OPENAI_API_KEY")
@pytest.fixture
def config(env_config):
config = BaseLlmConfig(
temperature=0.7,
max_tokens=50,
top_p=0.8,
stream=False,
system_prompt="System prompt",
model="gpt-4o-mini",
http_client_proxies=None,
http_async_client_proxies=None,
)
yield config
def test_get_llm_model_answer(config, mocker):
mocked_get_answer = mocker.patch("embedchain.llm.openai.OpenAILlm._get_answer", return_value="Test answer")
llm = OpenAILlm(config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
mocked_get_answer.assert_called_once_with("Test query", config)
def test_get_llm_model_answer_with_system_prompt(config, mocker):
config.system_prompt = "Custom system prompt"
mocked_get_answer = mocker.patch("embedchain.llm.openai.OpenAILlm._get_answer", return_value="Test answer")
llm = OpenAILlm(config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
mocked_get_answer.assert_called_once_with("Test query", config)
def test_get_llm_model_answer_empty_prompt(config, mocker):
mocked_get_answer = mocker.patch("embedchain.llm.openai.OpenAILlm._get_answer", return_value="Test answer")
llm = OpenAILlm(config)
answer = llm.get_llm_model_answer("")
assert answer == "Test answer"
mocked_get_answer.assert_called_once_with("", config)
def test_get_llm_model_answer_with_token_usage(config, mocker):
test_config = BaseLlmConfig(
temperature=config.temperature,
max_tokens=config.max_tokens,
top_p=config.top_p,
stream=config.stream,
system_prompt=config.system_prompt,
model=config.model,
token_usage=True,
)
mocked_get_answer = mocker.patch(
"embedchain.llm.openai.OpenAILlm._get_answer",
return_value=("Test answer", {"prompt_tokens": 1, "completion_tokens": 2}),
)
llm = OpenAILlm(test_config)
answer, token_info = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
assert token_info == {
"prompt_tokens": 1,
"completion_tokens": 2,
"total_tokens": 3,
"total_cost": 1.35e-06,
"cost_currency": "USD",
}
mocked_get_answer.assert_called_once_with("Test query", test_config)
def test_get_llm_model_answer_with_streaming(config, mocker):
config.stream = True
mocked_openai_chat = mocker.patch("embedchain.llm.openai.ChatOpenAI")
llm = OpenAILlm(config)
llm.get_llm_model_answer("Test query")
mocked_openai_chat.assert_called_once()
callbacks = [callback[1]["callbacks"] for callback in mocked_openai_chat.call_args_list]
assert any(isinstance(callback[0], StreamingStdOutCallbackHandler) for callback in callbacks)
def test_get_llm_model_answer_without_system_prompt(config, mocker):
config.system_prompt = None
mocked_openai_chat = mocker.patch("embedchain.llm.openai.ChatOpenAI")
llm = OpenAILlm(config)
llm.get_llm_model_answer("Test query")
mocked_openai_chat.assert_called_once_with(
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
model_kwargs={},
top_p= config.top_p,
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"],
http_client=None,
http_async_client=None,
)
def test_get_llm_model_answer_with_special_headers(config, mocker):
config.default_headers = {"test": "test"}
mocked_openai_chat = mocker.patch("embedchain.llm.openai.ChatOpenAI")
llm = OpenAILlm(config)
llm.get_llm_model_answer("Test query")
mocked_openai_chat.assert_called_once_with(
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
model_kwargs={},
top_p= config.top_p,
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"],
default_headers={"test": "test"},
http_client=None,
http_async_client=None,
)
def test_get_llm_model_answer_with_model_kwargs(config, mocker):
config.model_kwargs = {"response_format": {"type": "json_object"}}
mocked_openai_chat = mocker.patch("embedchain.llm.openai.ChatOpenAI")
llm = OpenAILlm(config)
llm.get_llm_model_answer("Test query")
mocked_openai_chat.assert_called_once_with(
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
model_kwargs={"response_format": {"type": "json_object"}},
top_p=config.top_p,
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"],
http_client=None,
http_async_client=None,
)
@pytest.mark.parametrize(
"mock_return, expected",
[
([{"test": "test"}], '{"test": "test"}'),
([], "Input could not be mapped to the function!"),
],
)
def test_get_llm_model_answer_with_tools(config, mocker, mock_return, expected):
mocked_openai_chat = mocker.patch("embedchain.llm.openai.ChatOpenAI")
mocked_convert_to_openai_tool = mocker.patch("langchain_core.utils.function_calling.convert_to_openai_tool")
mocked_json_output_tools_parser = mocker.patch("langchain.output_parsers.openai_tools.JsonOutputToolsParser")
mocked_openai_chat.return_value.bind.return_value.pipe.return_value.invoke.return_value = mock_return
llm = OpenAILlm(config, tools={"test": "test"})
answer = llm.get_llm_model_answer("Test query")
mocked_openai_chat.assert_called_once_with(
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
model_kwargs={},
top_p=config.top_p,
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"],
http_client=None,
http_async_client=None,
)
mocked_convert_to_openai_tool.assert_called_once_with({"test": "test"})
mocked_json_output_tools_parser.assert_called_once()
assert answer == expected
def test_get_llm_model_answer_with_http_client_proxies(env_config, mocker):
mocked_openai_chat = mocker.patch("embedchain.llm.openai.ChatOpenAI")
mock_http_client = mocker.Mock(spec=httpx.Client)
mock_http_client_instance = mocker.Mock(spec=httpx.Client)
mock_http_client.return_value = mock_http_client_instance
mocker.patch("httpx.Client", new=mock_http_client)
config = BaseLlmConfig(
temperature=0.7,
max_tokens=50,
top_p=0.8,
stream=False,
system_prompt="System prompt",
model="gpt-4o-mini",
http_client_proxies="http://testproxy.mem0.net:8000",
)
llm = OpenAILlm(config)
llm.get_llm_model_answer("Test query")
mocked_openai_chat.assert_called_once_with(
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
model_kwargs={},
top_p=config.top_p,
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"],
http_client=mock_http_client_instance,
http_async_client=None,
)
mock_http_client.assert_called_once_with(proxies="http://testproxy.mem0.net:8000")
def test_get_llm_model_answer_with_http_async_client_proxies(env_config, mocker):
mocked_openai_chat = mocker.patch("embedchain.llm.openai.ChatOpenAI")
mock_http_async_client = mocker.Mock(spec=httpx.AsyncClient)
mock_http_async_client_instance = mocker.Mock(spec=httpx.AsyncClient)
mock_http_async_client.return_value = mock_http_async_client_instance
mocker.patch("httpx.AsyncClient", new=mock_http_async_client)
config = BaseLlmConfig(
temperature=0.7,
max_tokens=50,
top_p=0.8,
stream=False,
system_prompt="System prompt",
model="gpt-4o-mini",
http_async_client_proxies={"http://": "http://testproxy.mem0.net:8000"},
)
llm = OpenAILlm(config)
llm.get_llm_model_answer("Test query")
mocked_openai_chat.assert_called_once_with(
model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
model_kwargs={},
top_p=config.top_p,
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"],
http_client=None,
http_async_client=mock_http_async_client_instance,
)
mock_http_async_client.assert_called_once_with(proxies={"http://": "http://testproxy.mem0.net:8000"})
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_generate_prompt.py | embedchain/tests/llm/test_generate_prompt.py | import unittest
from string import Template
from embedchain import App
from embedchain.config import AppConfig, BaseLlmConfig
class TestGeneratePrompt(unittest.TestCase):
def setUp(self):
self.app = App(config=AppConfig(collect_metrics=False))
def test_generate_prompt_with_template(self):
"""
Tests that the generate_prompt method correctly formats the prompt using
a custom template provided in the BaseLlmConfig instance.
This test sets up a scenario with an input query and a list of contexts,
and a custom template, and then calls generate_prompt. It checks that the
returned prompt correctly incorporates all the contexts and the query into
the format specified by the template.
"""
# Setup
input_query = "Test query"
contexts = ["Context 1", "Context 2", "Context 3"]
template = "You are a bot. Context: ${context} - Query: ${query} - Helpful answer:"
config = BaseLlmConfig(template=Template(template))
self.app.llm.config = config
# Execute
result = self.app.llm.generate_prompt(input_query, contexts)
# Assert
expected_result = (
"You are a bot. Context: Context 1 | Context 2 | Context 3 - Query: Test query - Helpful answer:"
)
self.assertEqual(result, expected_result)
def test_generate_prompt_with_contexts_list(self):
"""
Tests that the generate_prompt method correctly handles a list of contexts.
This test sets up a scenario with an input query and a list of contexts,
and then calls generate_prompt. It checks that the returned prompt
correctly includes all the contexts and the query.
"""
# Setup
input_query = "Test query"
contexts = ["Context 1", "Context 2", "Context 3"]
config = BaseLlmConfig()
# Execute
self.app.llm.config = config
result = self.app.llm.generate_prompt(input_query, contexts)
# Assert
expected_result = config.prompt.substitute(context="Context 1 | Context 2 | Context 3", query=input_query)
self.assertEqual(result, expected_result)
def test_generate_prompt_with_history(self):
"""
Test the 'generate_prompt' method with BaseLlmConfig containing a history attribute.
"""
config = BaseLlmConfig()
config.prompt = Template("Context: $context | Query: $query | History: $history")
self.app.llm.config = config
self.app.llm.set_history(["Past context 1", "Past context 2"])
prompt = self.app.llm.generate_prompt("Test query", ["Test context"])
expected_prompt = "Context: Test context | Query: Test query | History: Past context 1\nPast context 2"
self.assertEqual(prompt, expected_prompt)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_chat.py | embedchain/tests/llm/test_chat.py | import os
import unittest
from unittest.mock import MagicMock, patch
from embedchain import App
from embedchain.config import AppConfig, BaseLlmConfig
from embedchain.llm.base import BaseLlm
from embedchain.memory.base import ChatHistory
from embedchain.memory.message import ChatMessage
class TestApp(unittest.TestCase):
def setUp(self):
os.environ["OPENAI_API_KEY"] = "test_key"
self.app = App(config=AppConfig(collect_metrics=False))
@patch.object(App, "_retrieve_from_database", return_value=["Test context"])
@patch.object(BaseLlm, "get_answer_from_llm", return_value="Test answer")
def test_chat_with_memory(self, mock_get_answer, mock_retrieve):
"""
This test checks the functionality of the 'chat' method in the App class with respect to the chat history
memory.
The 'chat' method is called twice. The first call initializes the chat history memory.
The second call is expected to use the chat history from the first call.
Key assumptions tested:
called with correct arguments, adding the correct chat history.
- After the first call, 'memory.chat_memory.add_user_message' and 'memory.chat_memory.add_ai_message' are
- During the second call, the 'chat' method uses the chat history from the first call.
The test isolates the 'chat' method behavior by mocking out '_retrieve_from_database', 'get_answer_from_llm' and
'memory' methods.
"""
config = AppConfig(collect_metrics=False)
app = App(config=config)
with patch.object(BaseLlm, "add_history") as mock_history:
first_answer = app.chat("Test query 1")
self.assertEqual(first_answer, "Test answer")
mock_history.assert_called_with(app.config.id, "Test query 1", "Test answer", session_id="default")
second_answer = app.chat("Test query 2", session_id="test_session")
self.assertEqual(second_answer, "Test answer")
mock_history.assert_called_with(app.config.id, "Test query 2", "Test answer", session_id="test_session")
@patch.object(App, "_retrieve_from_database", return_value=["Test context"])
@patch.object(BaseLlm, "get_answer_from_llm", return_value="Test answer")
def test_template_replacement(self, mock_get_answer, mock_retrieve):
"""
Tests that if a default template is used and it doesn't contain history,
the default template is swapped in.
Also tests that a dry run does not change the history
"""
with patch.object(ChatHistory, "get") as mock_memory:
mock_message = ChatMessage()
mock_message.add_user_message("Test query 1")
mock_message.add_ai_message("Test answer")
mock_memory.return_value = [mock_message]
config = AppConfig(collect_metrics=False)
app = App(config=config)
first_answer = app.chat("Test query 1")
self.assertEqual(first_answer, "Test answer")
self.assertEqual(len(app.llm.history), 1)
history = app.llm.history
dry_run = app.chat("Test query 2", dry_run=True)
self.assertIn("Conversation history:", dry_run)
self.assertEqual(history, app.llm.history)
self.assertEqual(len(app.llm.history), 1)
@patch("chromadb.api.models.Collection.Collection.add", MagicMock)
def test_chat_with_where_in_params(self):
"""
Test where filter
"""
with patch.object(self.app, "_retrieve_from_database") as mock_retrieve:
mock_retrieve.return_value = ["Test context"]
with patch.object(self.app.llm, "get_llm_model_answer") as mock_answer:
mock_answer.return_value = "Test answer"
answer = self.app.chat("Test query", where={"attribute": "value"})
self.assertEqual(answer, "Test answer")
_args, kwargs = mock_retrieve.call_args
self.assertEqual(kwargs.get("input_query"), "Test query")
self.assertEqual(kwargs.get("where"), {"attribute": "value"})
mock_answer.assert_called_once()
@patch("chromadb.api.models.Collection.Collection.add", MagicMock)
def test_chat_with_where_in_chat_config(self):
"""
This test checks the functionality of the 'chat' method in the App class.
It simulates a scenario where the '_retrieve_from_database' method returns a context list based on
a where filter and 'get_llm_model_answer' returns an expected answer string.
The 'chat' method is expected to call '_retrieve_from_database' with the where filter specified
in the BaseLlmConfig and 'get_llm_model_answer' methods appropriately and return the right answer.
Key assumptions tested:
- '_retrieve_from_database' method is called exactly once with arguments: "Test query" and an instance of
BaseLlmConfig.
- 'get_llm_model_answer' is called exactly once. The specific arguments are not checked in this test.
- 'chat' method returns the value it received from 'get_llm_model_answer'.
The test isolates the 'chat' method behavior by mocking out '_retrieve_from_database' and
'get_llm_model_answer' methods.
"""
with patch.object(self.app.llm, "get_llm_model_answer") as mock_answer:
mock_answer.return_value = "Test answer"
with patch.object(self.app.db, "query") as mock_database_query:
mock_database_query.return_value = ["Test context"]
llm_config = BaseLlmConfig(where={"attribute": "value"})
answer = self.app.chat("Test query", llm_config)
self.assertEqual(answer, "Test answer")
_args, kwargs = mock_database_query.call_args
self.assertEqual(kwargs.get("input_query"), "Test query")
where = kwargs.get("where")
assert "app_id" in where
assert "attribute" in where
mock_answer.assert_called_once()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_llama2.py | embedchain/tests/llm/test_llama2.py | import os
import pytest
from embedchain.llm.llama2 import Llama2Llm
@pytest.fixture
def llama2_llm():
os.environ["REPLICATE_API_TOKEN"] = "test_api_token"
llm = Llama2Llm()
return llm
def test_init_raises_value_error_without_api_key(mocker):
mocker.patch.dict(os.environ, clear=True)
with pytest.raises(ValueError):
Llama2Llm()
def test_get_llm_model_answer_raises_value_error_for_system_prompt(llama2_llm):
llama2_llm.config.system_prompt = "system_prompt"
with pytest.raises(ValueError):
llama2_llm.get_llm_model_answer("prompt")
def test_get_llm_model_answer(llama2_llm, mocker):
mocked_replicate = mocker.patch("embedchain.llm.llama2.Replicate")
mocked_replicate_instance = mocker.MagicMock()
mocked_replicate.return_value = mocked_replicate_instance
mocked_replicate_instance.invoke.return_value = "Test answer"
llama2_llm.config.model = "test_model"
llama2_llm.config.max_tokens = 50
llama2_llm.config.temperature = 0.7
llama2_llm.config.top_p = 0.8
answer = llama2_llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_query.py | embedchain/tests/llm/test_query.py | import os
from unittest.mock import MagicMock, patch
import pytest
from embedchain import App
from embedchain.config import AppConfig, BaseLlmConfig
from embedchain.llm.openai import OpenAILlm
@pytest.fixture
def app():
os.environ["OPENAI_API_KEY"] = "test_api_key"
app = App(config=AppConfig(collect_metrics=False))
return app
@patch("chromadb.api.models.Collection.Collection.add", MagicMock)
def test_query(app):
with patch.object(app, "_retrieve_from_database") as mock_retrieve:
mock_retrieve.return_value = ["Test context"]
with patch.object(app.llm, "get_llm_model_answer") as mock_answer:
mock_answer.return_value = "Test answer"
answer = app.query(input_query="Test query")
assert answer == "Test answer"
mock_retrieve.assert_called_once()
_, kwargs = mock_retrieve.call_args
input_query_arg = kwargs.get("input_query")
assert input_query_arg == "Test query"
mock_answer.assert_called_once()
@patch("embedchain.llm.openai.OpenAILlm._get_answer")
def test_query_config_app_passing(mock_get_answer):
mock_get_answer.return_value = MagicMock()
mock_get_answer.return_value = "Test answer"
config = AppConfig(collect_metrics=False)
chat_config = BaseLlmConfig(system_prompt="Test system prompt")
llm = OpenAILlm(config=chat_config)
app = App(config=config, llm=llm)
answer = app.llm.get_llm_model_answer("Test query")
assert app.llm.config.system_prompt == "Test system prompt"
assert answer == "Test answer"
@patch("chromadb.api.models.Collection.Collection.add", MagicMock)
def test_query_with_where_in_params(app):
with patch.object(app, "_retrieve_from_database") as mock_retrieve:
mock_retrieve.return_value = ["Test context"]
with patch.object(app.llm, "get_llm_model_answer") as mock_answer:
mock_answer.return_value = "Test answer"
answer = app.query("Test query", where={"attribute": "value"})
assert answer == "Test answer"
_, kwargs = mock_retrieve.call_args
assert kwargs.get("input_query") == "Test query"
assert kwargs.get("where") == {"attribute": "value"}
mock_answer.assert_called_once()
@patch("chromadb.api.models.Collection.Collection.add", MagicMock)
def test_query_with_where_in_query_config(app):
with patch.object(app.llm, "get_llm_model_answer") as mock_answer:
mock_answer.return_value = "Test answer"
with patch.object(app.db, "query") as mock_database_query:
mock_database_query.return_value = ["Test context"]
llm_config = BaseLlmConfig(where={"attribute": "value"})
answer = app.query("Test query", llm_config)
assert answer == "Test answer"
_, kwargs = mock_database_query.call_args
assert kwargs.get("input_query") == "Test query"
where = kwargs.get("where")
assert "app_id" in where
assert "attribute" in where
mock_answer.assert_called_once()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_together.py | embedchain/tests/llm/test_together.py | import os
import pytest
from embedchain.config import BaseLlmConfig
from embedchain.llm.together import TogetherLlm
@pytest.fixture
def together_llm_config():
os.environ["TOGETHER_API_KEY"] = "test_api_key"
config = BaseLlmConfig(model="together-ai-up-to-3b", max_tokens=50, temperature=0.7, top_p=0.8)
yield config
os.environ.pop("TOGETHER_API_KEY")
def test_init_raises_value_error_without_api_key(mocker):
mocker.patch.dict(os.environ, clear=True)
with pytest.raises(ValueError):
TogetherLlm()
def test_get_llm_model_answer_raises_value_error_for_system_prompt(together_llm_config):
llm = TogetherLlm(together_llm_config)
llm.config.system_prompt = "system_prompt"
with pytest.raises(ValueError):
llm.get_llm_model_answer("prompt")
def test_get_llm_model_answer(together_llm_config, mocker):
mocker.patch("embedchain.llm.together.TogetherLlm._get_answer", return_value="Test answer")
llm = TogetherLlm(together_llm_config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
def test_get_llm_model_answer_with_token_usage(together_llm_config, mocker):
test_config = BaseLlmConfig(
temperature=together_llm_config.temperature,
max_tokens=together_llm_config.max_tokens,
top_p=together_llm_config.top_p,
model=together_llm_config.model,
token_usage=True,
)
mocker.patch(
"embedchain.llm.together.TogetherLlm._get_answer",
return_value=("Test answer", {"prompt_tokens": 1, "completion_tokens": 2}),
)
llm = TogetherLlm(test_config)
answer, token_info = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
assert token_info == {
"prompt_tokens": 1,
"completion_tokens": 2,
"total_tokens": 3,
"total_cost": 3e-07,
"cost_currency": "USD",
}
def test_get_answer_mocked_together(together_llm_config, mocker):
mocked_together = mocker.patch("embedchain.llm.together.ChatTogether")
mock_instance = mocked_together.return_value
mock_instance.invoke.return_value.content = "Mocked answer"
llm = TogetherLlm(together_llm_config)
prompt = "Test query"
answer = llm.get_llm_model_answer(prompt)
assert answer == "Mocked answer"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_google.py | embedchain/tests/llm/test_google.py | import pytest
from embedchain.config import BaseLlmConfig
from embedchain.llm.google import GoogleLlm
@pytest.fixture
def google_llm_config():
return BaseLlmConfig(model="gemini-pro", max_tokens=100, temperature=0.7, top_p=0.5, stream=False)
def test_google_llm_init_missing_api_key(monkeypatch):
monkeypatch.delenv("GOOGLE_API_KEY", raising=False)
with pytest.raises(ValueError, match="Please set the GOOGLE_API_KEY environment variable."):
GoogleLlm()
def test_google_llm_init(monkeypatch):
monkeypatch.setenv("GOOGLE_API_KEY", "fake_api_key")
with monkeypatch.context() as m:
m.setattr("importlib.import_module", lambda x: None)
google_llm = GoogleLlm()
assert google_llm is not None
def test_google_llm_get_llm_model_answer_with_system_prompt(monkeypatch):
monkeypatch.setenv("GOOGLE_API_KEY", "fake_api_key")
monkeypatch.setattr("importlib.import_module", lambda x: None)
google_llm = GoogleLlm(config=BaseLlmConfig(system_prompt="system prompt"))
with pytest.raises(ValueError, match="GoogleLlm does not support `system_prompt`"):
google_llm.get_llm_model_answer("test prompt")
def test_google_llm_get_llm_model_answer(monkeypatch, google_llm_config):
def mock_get_answer(prompt, config):
return "Generated Text"
monkeypatch.setenv("GOOGLE_API_KEY", "fake_api_key")
monkeypatch.setattr(GoogleLlm, "_get_answer", mock_get_answer)
google_llm = GoogleLlm(config=google_llm_config)
result = google_llm.get_llm_model_answer("test prompt")
assert result == "Generated Text"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_aws_bedrock.py | embedchain/tests/llm/test_aws_bedrock.py | import pytest
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from embedchain.config import BaseLlmConfig
from embedchain.llm.aws_bedrock import AWSBedrockLlm
@pytest.fixture
def config(monkeypatch):
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "test_access_key_id")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "test_secret_access_key")
config = BaseLlmConfig(
model="amazon.titan-text-express-v1",
model_kwargs={
"temperature": 0.5,
"topP": 1,
"maxTokenCount": 1000,
},
)
yield config
monkeypatch.delenv("AWS_ACCESS_KEY_ID")
monkeypatch.delenv("AWS_SECRET_ACCESS_KEY")
def test_get_llm_model_answer(config, mocker):
mocked_get_answer = mocker.patch("embedchain.llm.aws_bedrock.AWSBedrockLlm._get_answer", return_value="Test answer")
llm = AWSBedrockLlm(config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
mocked_get_answer.assert_called_once_with("Test query", config)
def test_get_llm_model_answer_empty_prompt(config, mocker):
mocked_get_answer = mocker.patch("embedchain.llm.aws_bedrock.AWSBedrockLlm._get_answer", return_value="Test answer")
llm = AWSBedrockLlm(config)
answer = llm.get_llm_model_answer("")
assert answer == "Test answer"
mocked_get_answer.assert_called_once_with("", config)
def test_get_llm_model_answer_with_streaming(config, mocker):
config.stream = True
mocked_bedrock_chat = mocker.patch("embedchain.llm.aws_bedrock.BedrockLLM")
llm = AWSBedrockLlm(config)
llm.get_llm_model_answer("Test query")
mocked_bedrock_chat.assert_called_once()
callbacks = [callback[1]["callbacks"] for callback in mocked_bedrock_chat.call_args_list]
assert any(isinstance(callback[0], StreamingStdOutCallbackHandler) for callback in callbacks)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_vertex_ai.py | embedchain/tests/llm/test_vertex_ai.py | from unittest.mock import MagicMock, patch
import pytest
from langchain.schema import HumanMessage, SystemMessage
from embedchain.config import BaseLlmConfig
from embedchain.core.db.database import database_manager
from embedchain.llm.vertex_ai import VertexAILlm
@pytest.fixture(autouse=True)
def setup_database():
database_manager.setup_engine()
@pytest.fixture
def vertexai_llm():
config = BaseLlmConfig(temperature=0.6, model="chat-bison")
return VertexAILlm(config)
def test_get_llm_model_answer(vertexai_llm):
with patch.object(VertexAILlm, "_get_answer", return_value="Test Response") as mock_method:
prompt = "Test Prompt"
response = vertexai_llm.get_llm_model_answer(prompt)
assert response == "Test Response"
mock_method.assert_called_once_with(prompt, vertexai_llm.config)
def test_get_llm_model_answer_with_token_usage(vertexai_llm):
test_config = BaseLlmConfig(
temperature=vertexai_llm.config.temperature,
max_tokens=vertexai_llm.config.max_tokens,
top_p=vertexai_llm.config.top_p,
model=vertexai_llm.config.model,
token_usage=True,
)
vertexai_llm.config = test_config
with patch.object(
VertexAILlm,
"_get_answer",
return_value=("Test Response", {"prompt_token_count": 1, "candidates_token_count": 2}),
):
response, token_info = vertexai_llm.get_llm_model_answer("Test Query")
assert response == "Test Response"
assert token_info == {
"prompt_tokens": 1,
"completion_tokens": 2,
"total_tokens": 3,
"total_cost": 3.75e-07,
"cost_currency": "USD",
}
@patch("embedchain.llm.vertex_ai.ChatVertexAI")
def test_get_answer(mock_chat_vertexai, vertexai_llm, caplog):
mock_chat_vertexai.return_value.invoke.return_value = MagicMock(content="Test Response")
config = vertexai_llm.config
prompt = "Test Prompt"
messages = vertexai_llm._get_messages(prompt)
response = vertexai_llm._get_answer(prompt, config)
mock_chat_vertexai.return_value.invoke.assert_called_once_with(messages)
assert response == "Test Response" # Assertion corrected
assert "Config option `top_p` is not supported by this model." not in caplog.text
def test_get_messages(vertexai_llm):
prompt = "Test Prompt"
system_prompt = "Test System Prompt"
messages = vertexai_llm._get_messages(prompt, system_prompt)
assert messages == [
SystemMessage(content="Test System Prompt", additional_kwargs={}),
HumanMessage(content="Test Prompt", additional_kwargs={}, example=False),
]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_clarifai.py | embedchain/tests/llm/test_clarifai.py |
import pytest
from embedchain.config import BaseLlmConfig
from embedchain.llm.clarifai import ClarifaiLlm
@pytest.fixture
def clarifai_llm_config(monkeypatch):
monkeypatch.setenv("CLARIFAI_PAT","test_api_key")
config = BaseLlmConfig(
model="https://clarifai.com/openai/chat-completion/models/GPT-4",
model_kwargs={"temperature": 0.7, "max_tokens": 100},
)
yield config
monkeypatch.delenv("CLARIFAI_PAT")
def test_clarifai__llm_get_llm_model_answer(clarifai_llm_config, mocker):
mocker.patch("embedchain.llm.clarifai.ClarifaiLlm._get_answer", return_value="Test answer")
llm = ClarifaiLlm(clarifai_llm_config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_gpt4all.py | embedchain/tests/llm/test_gpt4all.py | import pytest
from langchain_community.llms.gpt4all import GPT4All as LangchainGPT4All
from embedchain.config import BaseLlmConfig
from embedchain.llm.gpt4all import GPT4ALLLlm
@pytest.fixture
def config():
config = BaseLlmConfig(
temperature=0.7,
max_tokens=50,
top_p=0.8,
stream=False,
system_prompt="System prompt",
model="orca-mini-3b-gguf2-q4_0.gguf",
)
yield config
@pytest.fixture
def gpt4all_with_config(config):
return GPT4ALLLlm(config=config)
@pytest.fixture
def gpt4all_without_config():
return GPT4ALLLlm()
def test_gpt4all_init_with_config(config, gpt4all_with_config):
assert gpt4all_with_config.config.temperature == config.temperature
assert gpt4all_with_config.config.max_tokens == config.max_tokens
assert gpt4all_with_config.config.top_p == config.top_p
assert gpt4all_with_config.config.stream == config.stream
assert gpt4all_with_config.config.system_prompt == config.system_prompt
assert gpt4all_with_config.config.model == config.model
assert isinstance(gpt4all_with_config.instance, LangchainGPT4All)
def test_gpt4all_init_without_config(gpt4all_without_config):
assert gpt4all_without_config.config.model == "orca-mini-3b-gguf2-q4_0.gguf"
assert isinstance(gpt4all_without_config.instance, LangchainGPT4All)
def test_get_llm_model_answer(mocker, gpt4all_with_config):
test_query = "Test query"
test_answer = "Test answer"
mocked_get_answer = mocker.patch("embedchain.llm.gpt4all.GPT4ALLLlm._get_answer", return_value=test_answer)
answer = gpt4all_with_config.get_llm_model_answer(test_query)
assert answer == test_answer
mocked_get_answer.assert_called_once_with(prompt=test_query, config=gpt4all_with_config.config)
def test_gpt4all_model_switching(gpt4all_with_config):
with pytest.raises(RuntimeError, match="GPT4ALLLlm does not support switching models at runtime."):
gpt4all_with_config._get_answer("Test prompt", BaseLlmConfig(model="new_model"))
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_azure_openai.py | embedchain/tests/llm/test_azure_openai.py | from unittest.mock import MagicMock, Mock, patch
import httpx
import pytest
from langchain.schema import HumanMessage, SystemMessage
from embedchain.config import BaseLlmConfig
from embedchain.llm.azure_openai import AzureOpenAILlm
@pytest.fixture
def azure_openai_llm():
config = BaseLlmConfig(
deployment_name="azure_deployment",
temperature=0.7,
model="gpt-4o-mini",
max_tokens=50,
system_prompt="System Prompt",
)
return AzureOpenAILlm(config)
def test_get_llm_model_answer(azure_openai_llm):
with patch.object(AzureOpenAILlm, "_get_answer", return_value="Test Response") as mock_method:
prompt = "Test Prompt"
response = azure_openai_llm.get_llm_model_answer(prompt)
assert response == "Test Response"
mock_method.assert_called_once_with(prompt=prompt, config=azure_openai_llm.config)
def test_get_answer(azure_openai_llm):
with patch("langchain_openai.AzureChatOpenAI") as mock_chat:
mock_chat_instance = mock_chat.return_value
mock_chat_instance.invoke.return_value = MagicMock(content="Test Response")
prompt = "Test Prompt"
response = azure_openai_llm._get_answer(prompt, azure_openai_llm.config)
assert response == "Test Response"
mock_chat.assert_called_once_with(
deployment_name=azure_openai_llm.config.deployment_name,
openai_api_version="2024-02-01",
model_name=azure_openai_llm.config.model or "gpt-4o-mini",
temperature=azure_openai_llm.config.temperature,
max_tokens=azure_openai_llm.config.max_tokens,
streaming=azure_openai_llm.config.stream,
http_client=None,
http_async_client=None,
)
def test_get_messages(azure_openai_llm):
prompt = "Test Prompt"
system_prompt = "Test System Prompt"
messages = azure_openai_llm._get_messages(prompt, system_prompt)
assert messages == [
SystemMessage(content="Test System Prompt", additional_kwargs={}),
HumanMessage(content="Test Prompt", additional_kwargs={}, example=False),
]
def test_when_no_deployment_name_provided():
config = BaseLlmConfig(temperature=0.7, model="gpt-4o-mini", max_tokens=50, system_prompt="System Prompt")
with pytest.raises(ValueError):
llm = AzureOpenAILlm(config)
llm.get_llm_model_answer("Test Prompt")
def test_with_api_version():
config = BaseLlmConfig(
deployment_name="azure_deployment",
temperature=0.7,
model="gpt-4o-mini",
max_tokens=50,
system_prompt="System Prompt",
api_version="2024-02-01",
)
with patch("langchain_openai.AzureChatOpenAI") as mock_chat:
llm = AzureOpenAILlm(config)
llm.get_llm_model_answer("Test Prompt")
mock_chat.assert_called_once_with(
deployment_name="azure_deployment",
openai_api_version="2024-02-01",
model_name="gpt-4o-mini",
temperature=0.7,
max_tokens=50,
streaming=False,
http_client=None,
http_async_client=None,
)
def test_get_llm_model_answer_with_http_client_proxies():
mock_http_client = Mock(spec=httpx.Client)
mock_http_client_instance = Mock(spec=httpx.Client)
mock_http_client.return_value = mock_http_client_instance
with patch("langchain_openai.AzureChatOpenAI") as mock_chat, patch(
"httpx.Client", new=mock_http_client
) as mock_http_client:
mock_chat.return_value.invoke.return_value.content = "Mocked response"
config = BaseLlmConfig(
deployment_name="azure_deployment",
temperature=0.7,
max_tokens=50,
stream=False,
system_prompt="System prompt",
model="gpt-4o-mini",
http_client_proxies="http://testproxy.mem0.net:8000",
)
llm = AzureOpenAILlm(config)
llm.get_llm_model_answer("Test query")
mock_chat.assert_called_once_with(
deployment_name="azure_deployment",
openai_api_version="2024-02-01",
model_name="gpt-4o-mini",
temperature=0.7,
max_tokens=50,
streaming=False,
http_client=mock_http_client_instance,
http_async_client=None,
)
mock_http_client.assert_called_once_with(proxies="http://testproxy.mem0.net:8000")
def test_get_llm_model_answer_with_http_async_client_proxies():
mock_http_async_client = Mock(spec=httpx.AsyncClient)
mock_http_async_client_instance = Mock(spec=httpx.AsyncClient)
mock_http_async_client.return_value = mock_http_async_client_instance
with patch("langchain_openai.AzureChatOpenAI") as mock_chat, patch(
"httpx.AsyncClient", new=mock_http_async_client
) as mock_http_async_client:
mock_chat.return_value.invoke.return_value.content = "Mocked response"
config = BaseLlmConfig(
deployment_name="azure_deployment",
temperature=0.7,
max_tokens=50,
stream=False,
system_prompt="System prompt",
model="gpt-4o-mini",
http_async_client_proxies={"http://": "http://testproxy.mem0.net:8000"},
)
llm = AzureOpenAILlm(config)
llm.get_llm_model_answer("Test query")
mock_chat.assert_called_once_with(
deployment_name="azure_deployment",
openai_api_version="2024-02-01",
model_name="gpt-4o-mini",
temperature=0.7,
max_tokens=50,
streaming=False,
http_client=None,
http_async_client=mock_http_async_client_instance,
)
mock_http_async_client.assert_called_once_with(proxies={"http://": "http://testproxy.mem0.net:8000"})
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/conftest.py | embedchain/tests/llm/conftest.py |
from unittest import mock
import pytest
@pytest.fixture(autouse=True)
def mock_alembic_command_upgrade():
with mock.patch("alembic.command.upgrade"):
yield
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_base_llm.py | embedchain/tests/llm/test_base_llm.py | from string import Template
import pytest
from embedchain.llm.base import BaseLlm, BaseLlmConfig
@pytest.fixture
def base_llm():
config = BaseLlmConfig()
return BaseLlm(config=config)
def test_is_get_llm_model_answer_not_implemented(base_llm):
with pytest.raises(NotImplementedError):
base_llm.get_llm_model_answer()
def test_is_stream_bool():
with pytest.raises(ValueError):
config = BaseLlmConfig(stream="test value")
BaseLlm(config=config)
def test_template_string_gets_converted_to_Template_instance():
config = BaseLlmConfig(template="test value $query $context")
llm = BaseLlm(config=config)
assert isinstance(llm.config.prompt, Template)
def test_is_get_llm_model_answer_implemented():
class TestLlm(BaseLlm):
def get_llm_model_answer(self):
return "Implemented"
config = BaseLlmConfig()
llm = TestLlm(config=config)
assert llm.get_llm_model_answer() == "Implemented"
def test_stream_response(base_llm):
answer = ["Chunk1", "Chunk2", "Chunk3"]
result = list(base_llm._stream_response(answer))
assert result == answer
def test_append_search_and_context(base_llm):
context = "Context"
web_search_result = "Web Search Result"
result = base_llm._append_search_and_context(context, web_search_result)
expected_result = "Context\nWeb Search Result: Web Search Result"
assert result == expected_result
def test_access_search_and_get_results(base_llm, mocker):
base_llm.access_search_and_get_results = mocker.patch.object(
base_llm, "access_search_and_get_results", return_value="Search Results"
)
input_query = "Test query"
result = base_llm.access_search_and_get_results(input_query)
assert result == "Search Results"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_huggingface.py | embedchain/tests/llm/test_huggingface.py | import importlib
import os
import pytest
from embedchain.config import BaseLlmConfig
from embedchain.llm.huggingface import HuggingFaceLlm
@pytest.fixture
def huggingface_llm_config():
os.environ["HUGGINGFACE_ACCESS_TOKEN"] = "test_access_token"
config = BaseLlmConfig(model="google/flan-t5-xxl", max_tokens=50, temperature=0.7, top_p=0.8)
yield config
os.environ.pop("HUGGINGFACE_ACCESS_TOKEN")
@pytest.fixture
def huggingface_endpoint_config():
os.environ["HUGGINGFACE_ACCESS_TOKEN"] = "test_access_token"
config = BaseLlmConfig(endpoint="https://api-inference.huggingface.co/models/gpt2", model_kwargs={"device": "cpu"})
yield config
os.environ.pop("HUGGINGFACE_ACCESS_TOKEN")
def test_init_raises_value_error_without_api_key(mocker):
mocker.patch.dict(os.environ, clear=True)
with pytest.raises(ValueError):
HuggingFaceLlm()
def test_get_llm_model_answer_raises_value_error_for_system_prompt(huggingface_llm_config):
llm = HuggingFaceLlm(huggingface_llm_config)
llm.config.system_prompt = "system_prompt"
with pytest.raises(ValueError):
llm.get_llm_model_answer("prompt")
def test_top_p_value_within_range():
config = BaseLlmConfig(top_p=1.0)
with pytest.raises(ValueError):
HuggingFaceLlm._get_answer("test_prompt", config)
def test_dependency_is_imported():
importlib_installed = True
try:
importlib.import_module("huggingface_hub")
except ImportError:
importlib_installed = False
assert importlib_installed
def test_get_llm_model_answer(huggingface_llm_config, mocker):
mocker.patch("embedchain.llm.huggingface.HuggingFaceLlm._get_answer", return_value="Test answer")
llm = HuggingFaceLlm(huggingface_llm_config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
def test_hugging_face_mock(huggingface_llm_config, mocker):
mock_llm_instance = mocker.Mock(return_value="Test answer")
mock_hf_hub = mocker.patch("embedchain.llm.huggingface.HuggingFaceHub")
mock_hf_hub.return_value.invoke = mock_llm_instance
llm = HuggingFaceLlm(huggingface_llm_config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
mock_llm_instance.assert_called_once_with("Test query")
def test_custom_endpoint(huggingface_endpoint_config, mocker):
mock_llm_instance = mocker.Mock(return_value="Test answer")
mock_hf_endpoint = mocker.patch("embedchain.llm.huggingface.HuggingFaceEndpoint")
mock_hf_endpoint.return_value.invoke = mock_llm_instance
llm = HuggingFaceLlm(huggingface_endpoint_config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
mock_llm_instance.assert_called_once_with("Test query")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_mistralai.py | embedchain/tests/llm/test_mistralai.py | import pytest
from embedchain.config import BaseLlmConfig
from embedchain.llm.mistralai import MistralAILlm
@pytest.fixture
def mistralai_llm_config(monkeypatch):
monkeypatch.setenv("MISTRAL_API_KEY", "fake_api_key")
yield BaseLlmConfig(model="mistral-tiny", max_tokens=100, temperature=0.7, top_p=0.5, stream=False)
monkeypatch.delenv("MISTRAL_API_KEY", raising=False)
def test_mistralai_llm_init_missing_api_key(monkeypatch):
monkeypatch.delenv("MISTRAL_API_KEY", raising=False)
with pytest.raises(ValueError, match="Please set the MISTRAL_API_KEY environment variable."):
MistralAILlm()
def test_mistralai_llm_init(monkeypatch):
monkeypatch.setenv("MISTRAL_API_KEY", "fake_api_key")
llm = MistralAILlm()
assert llm is not None
def test_get_llm_model_answer(monkeypatch, mistralai_llm_config):
def mock_get_answer(self, prompt, config):
return "Generated Text"
monkeypatch.setattr(MistralAILlm, "_get_answer", mock_get_answer)
llm = MistralAILlm(config=mistralai_llm_config)
result = llm.get_llm_model_answer("test prompt")
assert result == "Generated Text"
def test_get_llm_model_answer_with_system_prompt(monkeypatch, mistralai_llm_config):
mistralai_llm_config.system_prompt = "Test system prompt"
monkeypatch.setattr(MistralAILlm, "_get_answer", lambda self, prompt, config: "Generated Text")
llm = MistralAILlm(config=mistralai_llm_config)
result = llm.get_llm_model_answer("test prompt")
assert result == "Generated Text"
def test_get_llm_model_answer_empty_prompt(monkeypatch, mistralai_llm_config):
monkeypatch.setattr(MistralAILlm, "_get_answer", lambda self, prompt, config: "Generated Text")
llm = MistralAILlm(config=mistralai_llm_config)
result = llm.get_llm_model_answer("")
assert result == "Generated Text"
def test_get_llm_model_answer_without_system_prompt(monkeypatch, mistralai_llm_config):
mistralai_llm_config.system_prompt = None
monkeypatch.setattr(MistralAILlm, "_get_answer", lambda self, prompt, config: "Generated Text")
llm = MistralAILlm(config=mistralai_llm_config)
result = llm.get_llm_model_answer("test prompt")
assert result == "Generated Text"
def test_get_llm_model_answer_with_token_usage(monkeypatch, mistralai_llm_config):
test_config = BaseLlmConfig(
temperature=mistralai_llm_config.temperature,
max_tokens=mistralai_llm_config.max_tokens,
top_p=mistralai_llm_config.top_p,
model=mistralai_llm_config.model,
token_usage=True,
)
monkeypatch.setattr(
MistralAILlm,
"_get_answer",
lambda self, prompt, config: ("Generated Text", {"prompt_tokens": 1, "completion_tokens": 2}),
)
llm = MistralAILlm(test_config)
answer, token_info = llm.get_llm_model_answer("Test query")
assert answer == "Generated Text"
assert token_info == {
"prompt_tokens": 1,
"completion_tokens": 2,
"total_tokens": 3,
"total_cost": 7.5e-07,
"cost_currency": "USD",
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_cohere.py | embedchain/tests/llm/test_cohere.py | import os
import pytest
from embedchain.config import BaseLlmConfig
from embedchain.llm.cohere import CohereLlm
@pytest.fixture
def cohere_llm_config():
os.environ["COHERE_API_KEY"] = "test_api_key"
config = BaseLlmConfig(model="command-r", max_tokens=100, temperature=0.7, top_p=0.8, token_usage=False)
yield config
os.environ.pop("COHERE_API_KEY")
def test_init_raises_value_error_without_api_key(mocker):
mocker.patch.dict(os.environ, clear=True)
with pytest.raises(ValueError):
CohereLlm()
def test_get_llm_model_answer_raises_value_error_for_system_prompt(cohere_llm_config):
llm = CohereLlm(cohere_llm_config)
llm.config.system_prompt = "system_prompt"
with pytest.raises(ValueError):
llm.get_llm_model_answer("prompt")
def test_get_llm_model_answer(cohere_llm_config, mocker):
mocker.patch("embedchain.llm.cohere.CohereLlm._get_answer", return_value="Test answer")
llm = CohereLlm(cohere_llm_config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
def test_get_llm_model_answer_with_token_usage(cohere_llm_config, mocker):
test_config = BaseLlmConfig(
temperature=cohere_llm_config.temperature,
max_tokens=cohere_llm_config.max_tokens,
top_p=cohere_llm_config.top_p,
model=cohere_llm_config.model,
token_usage=True,
)
mocker.patch(
"embedchain.llm.cohere.CohereLlm._get_answer",
return_value=("Test answer", {"input_tokens": 1, "output_tokens": 2}),
)
llm = CohereLlm(test_config)
answer, token_info = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
assert token_info == {
"prompt_tokens": 1,
"completion_tokens": 2,
"total_tokens": 3,
"total_cost": 3.5e-06,
"cost_currency": "USD",
}
def test_get_answer_mocked_cohere(cohere_llm_config, mocker):
mocked_cohere = mocker.patch("embedchain.llm.cohere.ChatCohere")
mocked_cohere.return_value.invoke.return_value.content = "Mocked answer"
llm = CohereLlm(cohere_llm_config)
prompt = "Test query"
answer = llm.get_llm_model_answer(prompt)
assert answer == "Mocked answer"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_anthrophic.py | embedchain/tests/llm/test_anthrophic.py | import os
from unittest.mock import patch
import pytest
from langchain.schema import HumanMessage, SystemMessage
from embedchain.config import BaseLlmConfig
from embedchain.llm.anthropic import AnthropicLlm
@pytest.fixture
def anthropic_llm():
os.environ["ANTHROPIC_API_KEY"] = "test_api_key"
config = BaseLlmConfig(temperature=0.5, model="claude-instant-1", token_usage=False)
return AnthropicLlm(config)
def test_get_llm_model_answer(anthropic_llm):
with patch.object(AnthropicLlm, "_get_answer", return_value="Test Response") as mock_method:
prompt = "Test Prompt"
response = anthropic_llm.get_llm_model_answer(prompt)
assert response == "Test Response"
mock_method.assert_called_once_with(prompt, anthropic_llm.config)
def test_get_messages(anthropic_llm):
prompt = "Test Prompt"
system_prompt = "Test System Prompt"
messages = anthropic_llm._get_messages(prompt, system_prompt)
assert messages == [
SystemMessage(content="Test System Prompt", additional_kwargs={}),
HumanMessage(content="Test Prompt", additional_kwargs={}, example=False),
]
def test_get_llm_model_answer_with_token_usage(anthropic_llm):
test_config = BaseLlmConfig(
temperature=anthropic_llm.config.temperature, model=anthropic_llm.config.model, token_usage=True
)
anthropic_llm.config = test_config
with patch.object(
AnthropicLlm, "_get_answer", return_value=("Test Response", {"input_tokens": 1, "output_tokens": 2})
) as mock_method:
prompt = "Test Prompt"
response, token_info = anthropic_llm.get_llm_model_answer(prompt)
assert response == "Test Response"
assert token_info == {
"prompt_tokens": 1,
"completion_tokens": 2,
"total_tokens": 3,
"total_cost": 1.265e-05,
"cost_currency": "USD",
}
mock_method.assert_called_once_with(prompt, anthropic_llm.config)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_jina.py | embedchain/tests/llm/test_jina.py | import os
import pytest
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from embedchain.config import BaseLlmConfig
from embedchain.llm.jina import JinaLlm
@pytest.fixture
def config():
os.environ["JINACHAT_API_KEY"] = "test_api_key"
config = BaseLlmConfig(temperature=0.7, max_tokens=50, top_p=0.8, stream=False, system_prompt="System prompt")
yield config
os.environ.pop("JINACHAT_API_KEY")
def test_init_raises_value_error_without_api_key(mocker):
mocker.patch.dict(os.environ, clear=True)
with pytest.raises(ValueError):
JinaLlm()
def test_get_llm_model_answer(config, mocker):
mocked_get_answer = mocker.patch("embedchain.llm.jina.JinaLlm._get_answer", return_value="Test answer")
llm = JinaLlm(config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
mocked_get_answer.assert_called_once_with("Test query", config)
def test_get_llm_model_answer_with_system_prompt(config, mocker):
config.system_prompt = "Custom system prompt"
mocked_get_answer = mocker.patch("embedchain.llm.jina.JinaLlm._get_answer", return_value="Test answer")
llm = JinaLlm(config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
mocked_get_answer.assert_called_once_with("Test query", config)
def test_get_llm_model_answer_empty_prompt(config, mocker):
mocked_get_answer = mocker.patch("embedchain.llm.jina.JinaLlm._get_answer", return_value="Test answer")
llm = JinaLlm(config)
answer = llm.get_llm_model_answer("")
assert answer == "Test answer"
mocked_get_answer.assert_called_once_with("", config)
def test_get_llm_model_answer_with_streaming(config, mocker):
config.stream = True
mocked_jinachat = mocker.patch("embedchain.llm.jina.JinaChat")
llm = JinaLlm(config)
llm.get_llm_model_answer("Test query")
mocked_jinachat.assert_called_once()
callbacks = [callback[1]["callbacks"] for callback in mocked_jinachat.call_args_list]
assert any(isinstance(callback[0], StreamingStdOutCallbackHandler) for callback in callbacks)
def test_get_llm_model_answer_without_system_prompt(config, mocker):
config.system_prompt = None
mocked_jinachat = mocker.patch("embedchain.llm.jina.JinaChat")
llm = JinaLlm(config)
llm.get_llm_model_answer("Test query")
mocked_jinachat.assert_called_once_with(
temperature=config.temperature,
max_tokens=config.max_tokens,
jinachat_api_key=os.environ["JINACHAT_API_KEY"],
model_kwargs={"top_p": config.top_p},
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/llm/test_ollama.py | embedchain/tests/llm/test_ollama.py | import pytest
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from embedchain.config import BaseLlmConfig
from embedchain.llm.ollama import OllamaLlm
@pytest.fixture
def ollama_llm_config():
config = BaseLlmConfig(model="llama2", temperature=0.7, top_p=0.8, stream=True, system_prompt=None)
yield config
def test_get_llm_model_answer(ollama_llm_config, mocker):
mocker.patch("embedchain.llm.ollama.Client.list", return_value={"models": [{"name": "llama2"}]})
mocker.patch("embedchain.llm.ollama.OllamaLlm._get_answer", return_value="Test answer")
llm = OllamaLlm(ollama_llm_config)
answer = llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
def test_get_answer_mocked_ollama(ollama_llm_config, mocker):
mocker.patch("embedchain.llm.ollama.Client.list", return_value={"models": [{"name": "llama2"}]})
mocked_ollama = mocker.patch("embedchain.llm.ollama.Ollama")
mock_instance = mocked_ollama.return_value
mock_instance.invoke.return_value = "Mocked answer"
llm = OllamaLlm(ollama_llm_config)
prompt = "Test query"
answer = llm.get_llm_model_answer(prompt)
assert answer == "Mocked answer"
def test_get_llm_model_answer_with_streaming(ollama_llm_config, mocker):
ollama_llm_config.stream = True
ollama_llm_config.callbacks = [StreamingStdOutCallbackHandler()]
mocker.patch("embedchain.llm.ollama.Client.list", return_value={"models": [{"name": "llama2"}]})
mocked_ollama_chat = mocker.patch("embedchain.llm.ollama.OllamaLlm._get_answer", return_value="Test answer")
llm = OllamaLlm(ollama_llm_config)
llm.get_llm_model_answer("Test query")
mocked_ollama_chat.assert_called_once()
call_args = mocked_ollama_chat.call_args
config_arg = call_args[1]["config"]
callbacks = config_arg.callbacks
assert len(callbacks) == 1
assert isinstance(callbacks[0], StreamingStdOutCallbackHandler)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/evaluation/test_groundedness_metric.py | embedchain/tests/evaluation/test_groundedness_metric.py | import numpy as np
import pytest
from embedchain.config.evaluation.base import GroundednessConfig
from embedchain.evaluation.metrics import Groundedness
from embedchain.utils.evaluation import EvalData, EvalMetric
@pytest.fixture
def mock_data():
return [
EvalData(
contexts=[
"This is a test context 1.",
],
question="This is a test question 1.",
answer="This is a test answer 1.",
),
EvalData(
contexts=[
"This is a test context 2-1.",
"This is a test context 2-2.",
],
question="This is a test question 2.",
answer="This is a test answer 2.",
),
]
@pytest.fixture
def mock_groundedness_metric(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
metric = Groundedness()
return metric
def test_groundedness_init(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
metric = Groundedness()
assert metric.name == EvalMetric.GROUNDEDNESS.value
assert metric.config.model == "gpt-4"
assert metric.config.api_key is None
monkeypatch.delenv("OPENAI_API_KEY")
def test_groundedness_init_with_config():
metric = Groundedness(config=GroundednessConfig(api_key="test_api_key"))
assert metric.name == EvalMetric.GROUNDEDNESS.value
assert metric.config.model == "gpt-4"
assert metric.config.api_key == "test_api_key"
def test_groundedness_init_without_api_key(monkeypatch):
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
with pytest.raises(ValueError):
Groundedness()
def test_generate_answer_claim_prompt(mock_groundedness_metric, mock_data):
prompt = mock_groundedness_metric._generate_answer_claim_prompt(data=mock_data[0])
assert "This is a test question 1." in prompt
assert "This is a test answer 1." in prompt
def test_get_claim_statements(mock_groundedness_metric, mock_data, monkeypatch):
monkeypatch.setattr(
mock_groundedness_metric.client.chat.completions,
"create",
lambda *args, **kwargs: type(
"obj",
(object,),
{
"choices": [
type(
"obj",
(object,),
{
"message": type(
"obj",
(object,),
{
"content": """This is a test answer 1.
This is a test answer 2.
This is a test answer 3."""
},
)
},
)
]
},
)(),
)
prompt = mock_groundedness_metric._generate_answer_claim_prompt(data=mock_data[0])
claim_statements = mock_groundedness_metric._get_claim_statements(prompt=prompt)
assert len(claim_statements) == 3
assert "This is a test answer 1." in claim_statements
def test_generate_claim_inference_prompt(mock_groundedness_metric, mock_data):
prompt = mock_groundedness_metric._generate_answer_claim_prompt(data=mock_data[0])
claim_statements = [
"This is a test claim 1.",
"This is a test claim 2.",
]
prompt = mock_groundedness_metric._generate_claim_inference_prompt(
data=mock_data[0], claim_statements=claim_statements
)
assert "This is a test context 1." in prompt
assert "This is a test claim 1." in prompt
def test_get_claim_verdict_scores(mock_groundedness_metric, mock_data, monkeypatch):
monkeypatch.setattr(
mock_groundedness_metric.client.chat.completions,
"create",
lambda *args, **kwargs: type(
"obj",
(object,),
{"choices": [type("obj", (object,), {"message": type("obj", (object,), {"content": "1\n0\n-1"})})]},
)(),
)
prompt = mock_groundedness_metric._generate_answer_claim_prompt(data=mock_data[0])
claim_statements = mock_groundedness_metric._get_claim_statements(prompt=prompt)
prompt = mock_groundedness_metric._generate_claim_inference_prompt(
data=mock_data[0], claim_statements=claim_statements
)
claim_verdict_scores = mock_groundedness_metric._get_claim_verdict_scores(prompt=prompt)
assert len(claim_verdict_scores) == 3
assert claim_verdict_scores[0] == 1
assert claim_verdict_scores[1] == 0
def test_compute_score(mock_groundedness_metric, mock_data, monkeypatch):
monkeypatch.setattr(
mock_groundedness_metric,
"_get_claim_statements",
lambda *args, **kwargs: np.array(
[
"This is a test claim 1.",
"This is a test claim 2.",
]
),
)
monkeypatch.setattr(mock_groundedness_metric, "_get_claim_verdict_scores", lambda *args, **kwargs: np.array([1, 0]))
score = mock_groundedness_metric._compute_score(data=mock_data[0])
assert score == 0.5
def test_evaluate(mock_groundedness_metric, mock_data, monkeypatch):
monkeypatch.setattr(mock_groundedness_metric, "_compute_score", lambda *args, **kwargs: 0.5)
score = mock_groundedness_metric.evaluate(dataset=mock_data)
assert score == 0.5
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/evaluation/test_context_relevancy_metric.py | embedchain/tests/evaluation/test_context_relevancy_metric.py | import pytest
from embedchain.config.evaluation.base import ContextRelevanceConfig
from embedchain.evaluation.metrics import ContextRelevance
from embedchain.utils.evaluation import EvalData, EvalMetric
@pytest.fixture
def mock_data():
return [
EvalData(
contexts=[
"This is a test context 1.",
],
question="This is a test question 1.",
answer="This is a test answer 1.",
),
EvalData(
contexts=[
"This is a test context 2-1.",
"This is a test context 2-2.",
],
question="This is a test question 2.",
answer="This is a test answer 2.",
),
]
@pytest.fixture
def mock_context_relevance_metric(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
metric = ContextRelevance()
return metric
def test_context_relevance_init(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
metric = ContextRelevance()
assert metric.name == EvalMetric.CONTEXT_RELEVANCY.value
assert metric.config.model == "gpt-4"
assert metric.config.api_key is None
assert metric.config.language == "en"
monkeypatch.delenv("OPENAI_API_KEY")
def test_context_relevance_init_with_config():
metric = ContextRelevance(config=ContextRelevanceConfig(api_key="test_api_key"))
assert metric.name == EvalMetric.CONTEXT_RELEVANCY.value
assert metric.config.model == "gpt-4"
assert metric.config.api_key == "test_api_key"
assert metric.config.language == "en"
def test_context_relevance_init_without_api_key(monkeypatch):
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
with pytest.raises(ValueError):
ContextRelevance()
def test_sentence_segmenter(mock_context_relevance_metric):
text = "This is a test sentence. This is another sentence."
assert mock_context_relevance_metric._sentence_segmenter(text) == [
"This is a test sentence. ",
"This is another sentence.",
]
def test_compute_score(mock_context_relevance_metric, mock_data, monkeypatch):
monkeypatch.setattr(
mock_context_relevance_metric.client.chat.completions,
"create",
lambda model, messages: type(
"obj",
(object,),
{
"choices": [
type("obj", (object,), {"message": type("obj", (object,), {"content": "This is a test reponse."})})
]
},
)(),
)
assert mock_context_relevance_metric._compute_score(mock_data[0]) == 1.0
assert mock_context_relevance_metric._compute_score(mock_data[1]) == 0.5
def test_evaluate(mock_context_relevance_metric, mock_data, monkeypatch):
monkeypatch.setattr(
mock_context_relevance_metric.client.chat.completions,
"create",
lambda model, messages: type(
"obj",
(object,),
{
"choices": [
type("obj", (object,), {"message": type("obj", (object,), {"content": "This is a test reponse."})})
]
},
)(),
)
assert mock_context_relevance_metric.evaluate(mock_data) == 0.75
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/evaluation/test_answer_relevancy_metric.py | embedchain/tests/evaluation/test_answer_relevancy_metric.py | import numpy as np
import pytest
from embedchain.config.evaluation.base import AnswerRelevanceConfig
from embedchain.evaluation.metrics import AnswerRelevance
from embedchain.utils.evaluation import EvalData, EvalMetric
@pytest.fixture
def mock_data():
return [
EvalData(
contexts=[
"This is a test context 1.",
],
question="This is a test question 1.",
answer="This is a test answer 1.",
),
EvalData(
contexts=[
"This is a test context 2-1.",
"This is a test context 2-2.",
],
question="This is a test question 2.",
answer="This is a test answer 2.",
),
]
@pytest.fixture
def mock_answer_relevance_metric(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
monkeypatch.setenv("OPENAI_API_BASE", "test_api_base")
metric = AnswerRelevance()
return metric
def test_answer_relevance_init(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
metric = AnswerRelevance()
assert metric.name == EvalMetric.ANSWER_RELEVANCY.value
assert metric.config.model == "gpt-4"
assert metric.config.embedder == "text-embedding-ada-002"
assert metric.config.api_key is None
assert metric.config.num_gen_questions == 1
monkeypatch.delenv("OPENAI_API_KEY")
def test_answer_relevance_init_with_config():
metric = AnswerRelevance(config=AnswerRelevanceConfig(api_key="test_api_key"))
assert metric.name == EvalMetric.ANSWER_RELEVANCY.value
assert metric.config.model == "gpt-4"
assert metric.config.embedder == "text-embedding-ada-002"
assert metric.config.api_key == "test_api_key"
assert metric.config.num_gen_questions == 1
def test_answer_relevance_init_without_api_key(monkeypatch):
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
with pytest.raises(ValueError):
AnswerRelevance()
def test_generate_prompt(mock_answer_relevance_metric, mock_data):
prompt = mock_answer_relevance_metric._generate_prompt(mock_data[0])
assert "This is a test answer 1." in prompt
prompt = mock_answer_relevance_metric._generate_prompt(mock_data[1])
assert "This is a test answer 2." in prompt
def test_generate_questions(mock_answer_relevance_metric, mock_data, monkeypatch):
monkeypatch.setattr(
mock_answer_relevance_metric.client.chat.completions,
"create",
lambda model, messages: type(
"obj",
(object,),
{
"choices": [
type(
"obj",
(object,),
{"message": type("obj", (object,), {"content": "This is a test question response.\n"})},
)
]
},
)(),
)
prompt = mock_answer_relevance_metric._generate_prompt(mock_data[0])
questions = mock_answer_relevance_metric._generate_questions(prompt)
assert len(questions) == 1
monkeypatch.setattr(
mock_answer_relevance_metric.client.chat.completions,
"create",
lambda model, messages: type(
"obj",
(object,),
{
"choices": [
type("obj", (object,), {"message": type("obj", (object,), {"content": "question 1?\nquestion2?"})})
]
},
)(),
)
prompt = mock_answer_relevance_metric._generate_prompt(mock_data[1])
questions = mock_answer_relevance_metric._generate_questions(prompt)
assert len(questions) == 2
def test_generate_embedding(mock_answer_relevance_metric, mock_data, monkeypatch):
monkeypatch.setattr(
mock_answer_relevance_metric.client.embeddings,
"create",
lambda input, model: type("obj", (object,), {"data": [type("obj", (object,), {"embedding": [1, 2, 3]})]})(),
)
embedding = mock_answer_relevance_metric._generate_embedding("This is a test question.")
assert len(embedding) == 3
def test_compute_similarity(mock_answer_relevance_metric, mock_data):
original = np.array([1, 2, 3])
generated = np.array([[1, 2, 3], [1, 2, 3]])
similarity = mock_answer_relevance_metric._compute_similarity(original, generated)
assert len(similarity) == 2
assert similarity[0] == 1.0
assert similarity[1] == 1.0
def test_compute_score(mock_answer_relevance_metric, mock_data, monkeypatch):
monkeypatch.setattr(
mock_answer_relevance_metric.client.chat.completions,
"create",
lambda model, messages: type(
"obj",
(object,),
{
"choices": [
type(
"obj",
(object,),
{"message": type("obj", (object,), {"content": "This is a test question response.\n"})},
)
]
},
)(),
)
monkeypatch.setattr(
mock_answer_relevance_metric.client.embeddings,
"create",
lambda input, model: type("obj", (object,), {"data": [type("obj", (object,), {"embedding": [1, 2, 3]})]})(),
)
score = mock_answer_relevance_metric._compute_score(mock_data[0])
assert score == 1.0
monkeypatch.setattr(
mock_answer_relevance_metric.client.chat.completions,
"create",
lambda model, messages: type(
"obj",
(object,),
{
"choices": [
type("obj", (object,), {"message": type("obj", (object,), {"content": "question 1?\nquestion2?"})})
]
},
)(),
)
monkeypatch.setattr(
mock_answer_relevance_metric.client.embeddings,
"create",
lambda input, model: type("obj", (object,), {"data": [type("obj", (object,), {"embedding": [1, 2, 3]})]})(),
)
score = mock_answer_relevance_metric._compute_score(mock_data[1])
assert score == 1.0
def test_evaluate(mock_answer_relevance_metric, mock_data, monkeypatch):
monkeypatch.setattr(
mock_answer_relevance_metric.client.chat.completions,
"create",
lambda model, messages: type(
"obj",
(object,),
{
"choices": [
type(
"obj",
(object,),
{"message": type("obj", (object,), {"content": "This is a test question response.\n"})},
)
]
},
)(),
)
monkeypatch.setattr(
mock_answer_relevance_metric.client.embeddings,
"create",
lambda input, model: type("obj", (object,), {"data": [type("obj", (object,), {"embedding": [1, 2, 3]})]})(),
)
score = mock_answer_relevance_metric.evaluate(mock_data)
assert score == 1.0
monkeypatch.setattr(
mock_answer_relevance_metric.client.chat.completions,
"create",
lambda model, messages: type(
"obj",
(object,),
{
"choices": [
type("obj", (object,), {"message": type("obj", (object,), {"content": "question 1?\nquestion2?"})})
]
},
)(),
)
monkeypatch.setattr(
mock_answer_relevance_metric.client.embeddings,
"create",
lambda input, model: type("obj", (object,), {"data": [type("obj", (object,), {"embedding": [1, 2, 3]})]})(),
)
score = mock_answer_relevance_metric.evaluate(mock_data)
assert score == 1.0
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/embedder/test_aws_bedrock_embedder.py | embedchain/tests/embedder/test_aws_bedrock_embedder.py | from unittest.mock import patch
from embedchain.config.embedder.aws_bedrock import AWSBedrockEmbedderConfig
from embedchain.embedder.aws_bedrock import AWSBedrockEmbedder
def test_aws_bedrock_embedder_with_model():
config = AWSBedrockEmbedderConfig(
model="test-model",
model_kwargs={"param": "value"},
vector_dimension=1536,
)
with patch("embedchain.embedder.aws_bedrock.BedrockEmbeddings") as mock_embeddings:
embedder = AWSBedrockEmbedder(config=config)
assert embedder.config.model == "test-model"
assert embedder.config.model_kwargs == {"param": "value"}
assert embedder.config.vector_dimension == 1536
mock_embeddings.assert_called_once_with(
model_id="test-model",
model_kwargs={"param": "value"},
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/embedder/test_huggingface_embedder.py | embedchain/tests/embedder/test_huggingface_embedder.py |
from unittest.mock import patch
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.huggingface import HuggingFaceEmbedder
def test_huggingface_embedder_with_model(monkeypatch):
config = BaseEmbedderConfig(model="test-model", model_kwargs={"param": "value"})
with patch('embedchain.embedder.huggingface.HuggingFaceEmbeddings') as mock_embeddings:
embedder = HuggingFaceEmbedder(config=config)
assert embedder.config.model == "test-model"
assert embedder.config.model_kwargs == {"param": "value"}
mock_embeddings.assert_called_once_with(
model_name="test-model",
model_kwargs={"param": "value"}
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/embedder/test_embedder.py | embedchain/tests/embedder/test_embedder.py | import pytest
from chromadb.api.types import Documents, Embeddings
from embedchain.config.embedder.base import BaseEmbedderConfig
from embedchain.embedder.base import BaseEmbedder
@pytest.fixture
def base_embedder():
return BaseEmbedder()
def test_initialization(base_embedder):
assert isinstance(base_embedder.config, BaseEmbedderConfig)
# not initialized
assert not hasattr(base_embedder, "embedding_fn")
assert not hasattr(base_embedder, "vector_dimension")
def test_set_embedding_fn(base_embedder):
def embedding_function(texts: Documents) -> Embeddings:
return [f"Embedding for {text}" for text in texts]
base_embedder.set_embedding_fn(embedding_function)
assert hasattr(base_embedder, "embedding_fn")
assert callable(base_embedder.embedding_fn)
embeddings = base_embedder.embedding_fn(["text1", "text2"])
assert embeddings == ["Embedding for text1", "Embedding for text2"]
def test_set_embedding_fn_when_not_a_function(base_embedder):
with pytest.raises(ValueError):
base_embedder.set_embedding_fn(None)
def test_set_vector_dimension(base_embedder):
base_embedder.set_vector_dimension(256)
assert hasattr(base_embedder, "vector_dimension")
assert base_embedder.vector_dimension == 256
def test_set_vector_dimension_type_error(base_embedder):
with pytest.raises(TypeError):
base_embedder.set_vector_dimension(None)
def test_embedder_with_config():
embedder = BaseEmbedder(BaseEmbedderConfig())
assert isinstance(embedder.config, BaseEmbedderConfig)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/embedder/test_azure_openai_embedder.py | embedchain/tests/embedder/test_azure_openai_embedder.py | from unittest.mock import Mock, patch
import httpx
from embedchain.config import BaseEmbedderConfig
from embedchain.embedder.azure_openai import AzureOpenAIEmbedder
def test_azure_openai_embedder_with_http_client(monkeypatch):
mock_http_client = Mock(spec=httpx.Client)
mock_http_client_instance = Mock(spec=httpx.Client)
mock_http_client.return_value = mock_http_client_instance
with patch("embedchain.embedder.azure_openai.AzureOpenAIEmbeddings") as mock_embeddings, patch(
"httpx.Client", new=mock_http_client
) as mock_http_client:
config = BaseEmbedderConfig(
deployment_name="text-embedding-ada-002",
http_client_proxies="http://testproxy.mem0.net:8000",
)
_ = AzureOpenAIEmbedder(config=config)
mock_embeddings.assert_called_once_with(
deployment="text-embedding-ada-002",
http_client=mock_http_client_instance,
http_async_client=None,
)
mock_http_client.assert_called_once_with(proxies="http://testproxy.mem0.net:8000")
def test_azure_openai_embedder_with_http_async_client(monkeypatch):
mock_http_async_client = Mock(spec=httpx.AsyncClient)
mock_http_async_client_instance = Mock(spec=httpx.AsyncClient)
mock_http_async_client.return_value = mock_http_async_client_instance
with patch("embedchain.embedder.azure_openai.AzureOpenAIEmbeddings") as mock_embeddings, patch(
"httpx.AsyncClient", new=mock_http_async_client
) as mock_http_async_client:
config = BaseEmbedderConfig(
deployment_name="text-embedding-ada-002",
http_async_client_proxies={"http://": "http://testproxy.mem0.net:8000"},
)
_ = AzureOpenAIEmbedder(config=config)
mock_embeddings.assert_called_once_with(
deployment="text-embedding-ada-002",
http_client=None,
http_async_client=mock_http_async_client_instance,
)
mock_http_async_client.assert_called_once_with(proxies={"http://": "http://testproxy.mem0.net:8000"})
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/telemetry/test_posthog.py | embedchain/tests/telemetry/test_posthog.py | import logging
import os
from embedchain.telemetry.posthog import AnonymousTelemetry
class TestAnonymousTelemetry:
def test_init(self, mocker):
# Enable telemetry specifically for this test
os.environ["EC_TELEMETRY"] = "true"
mock_posthog = mocker.patch("embedchain.telemetry.posthog.Posthog")
telemetry = AnonymousTelemetry()
assert telemetry.project_api_key == "phc_PHQDA5KwztijnSojsxJ2c1DuJd52QCzJzT2xnSGvjN2"
assert telemetry.host == "https://app.posthog.com"
assert telemetry.enabled is True
assert telemetry.user_id
mock_posthog.assert_called_once_with(project_api_key=telemetry.project_api_key, host=telemetry.host)
def test_init_with_disabled_telemetry(self, mocker):
mocker.patch("embedchain.telemetry.posthog.Posthog")
telemetry = AnonymousTelemetry()
assert telemetry.enabled is False
assert telemetry.posthog.disabled is True
def test_get_user_id(self, mocker, tmpdir):
mock_uuid = mocker.patch("embedchain.telemetry.posthog.uuid.uuid4")
mock_uuid.return_value = "unique_user_id"
config_file = tmpdir.join("config.json")
mocker.patch("embedchain.telemetry.posthog.CONFIG_FILE", str(config_file))
telemetry = AnonymousTelemetry()
user_id = telemetry._get_user_id()
assert user_id == "unique_user_id"
assert config_file.read() == '{"user_id": "unique_user_id"}'
def test_capture(self, mocker):
# Enable telemetry specifically for this test
os.environ["EC_TELEMETRY"] = "true"
mock_posthog = mocker.patch("embedchain.telemetry.posthog.Posthog")
telemetry = AnonymousTelemetry()
event_name = "test_event"
properties = {"key": "value"}
telemetry.capture(event_name, properties)
mock_posthog.assert_called_once_with(
project_api_key=telemetry.project_api_key,
host=telemetry.host,
)
mock_posthog.return_value.capture.assert_called_once_with(
telemetry.user_id,
event_name,
properties,
)
def test_capture_with_exception(self, mocker, caplog):
os.environ["EC_TELEMETRY"] = "true"
mock_posthog = mocker.patch("embedchain.telemetry.posthog.Posthog")
mock_posthog.return_value.capture.side_effect = Exception("Test Exception")
telemetry = AnonymousTelemetry()
event_name = "test_event"
properties = {"key": "value"}
with caplog.at_level(logging.ERROR):
telemetry.capture(event_name, properties)
assert "Failed to send telemetry event" in caplog.text
caplog.clear()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_local_qna_pair.py | embedchain/tests/loaders/test_local_qna_pair.py | import hashlib
import pytest
from embedchain.loaders.local_qna_pair import LocalQnaPairLoader
@pytest.fixture
def qna_pair_loader():
return LocalQnaPairLoader()
def test_load_data(qna_pair_loader):
question = "What is the capital of France?"
answer = "The capital of France is Paris."
content = (question, answer)
result = qna_pair_loader.load_data(content)
assert "doc_id" in result
assert "data" in result
url = "local"
expected_content = f"Q: {question}\nA: {answer}"
assert result["data"][0]["content"] == expected_content
assert result["data"][0]["meta_data"]["url"] == url
assert result["data"][0]["meta_data"]["question"] == question
expected_doc_id = hashlib.sha256((expected_content + url).encode()).hexdigest()
assert result["doc_id"] == expected_doc_id
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_postgres.py | embedchain/tests/loaders/test_postgres.py | from unittest.mock import MagicMock
import psycopg
import pytest
from embedchain.loaders.postgres import PostgresLoader
@pytest.fixture
def postgres_loader(mocker):
with mocker.patch.object(psycopg, "connect"):
config = {"url": "postgres://user:password@localhost:5432/database"}
loader = PostgresLoader(config=config)
yield loader
def test_postgres_loader_initialization(postgres_loader):
assert postgres_loader.connection is not None
assert postgres_loader.cursor is not None
def test_postgres_loader_invalid_config():
with pytest.raises(ValueError, match="Must provide the valid config. Received: None"):
PostgresLoader(config=None)
def test_load_data(postgres_loader, monkeypatch):
mock_cursor = MagicMock()
monkeypatch.setattr(postgres_loader, "cursor", mock_cursor)
query = "SELECT * FROM table"
mock_cursor.fetchall.return_value = [(1, "data1"), (2, "data2")]
result = postgres_loader.load_data(query)
assert "doc_id" in result
assert "data" in result
assert len(result["data"]) == 2
assert result["data"][0]["meta_data"]["url"] == query
assert result["data"][1]["meta_data"]["url"] == query
assert mock_cursor.execute.called_with(query)
def test_load_data_exception(postgres_loader, monkeypatch):
mock_cursor = MagicMock()
monkeypatch.setattr(postgres_loader, "cursor", mock_cursor)
_ = "SELECT * FROM table"
mock_cursor.execute.side_effect = Exception("Mocked exception")
with pytest.raises(
ValueError, match=r"Failed to load data using query=SELECT \* FROM table with: Mocked exception"
):
postgres_loader.load_data("SELECT * FROM table")
def test_close_connection(postgres_loader):
postgres_loader.close_connection()
assert postgres_loader.cursor is None
assert postgres_loader.connection is None
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_gmail.py | embedchain/tests/loaders/test_gmail.py | import pytest
from embedchain.loaders.gmail import GmailLoader
@pytest.fixture
def mock_beautifulsoup(mocker):
return mocker.patch("embedchain.loaders.gmail.BeautifulSoup", return_value=mocker.MagicMock())
@pytest.fixture
def gmail_loader(mock_beautifulsoup):
return GmailLoader()
def test_load_data_file_not_found(gmail_loader, mocker):
with pytest.raises(FileNotFoundError):
with mocker.patch("os.path.isfile", return_value=False):
gmail_loader.load_data("your_query")
@pytest.mark.skip(reason="TODO: Fix this test. Failing due to some googleapiclient import issue.")
def test_load_data(gmail_loader, mocker):
mock_gmail_reader_instance = mocker.MagicMock()
text = "your_test_email_text"
metadata = {
"id": "your_test_id",
"snippet": "your_test_snippet",
}
mock_gmail_reader_instance.load_data.return_value = [
{
"text": text,
"extra_info": metadata,
}
]
with mocker.patch("os.path.isfile", return_value=True):
response_data = gmail_loader.load_data("your_query")
assert "doc_id" in response_data
assert "data" in response_data
assert isinstance(response_data["doc_id"], str)
assert isinstance(response_data["data"], list)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_mysql.py | embedchain/tests/loaders/test_mysql.py | import hashlib
from unittest.mock import MagicMock
import pytest
from embedchain.loaders.mysql import MySQLLoader
@pytest.fixture
def mysql_loader(mocker):
with mocker.patch("mysql.connector.connection.MySQLConnection"):
config = {
"host": "localhost",
"port": "3306",
"user": "your_username",
"password": "your_password",
"database": "your_database",
}
loader = MySQLLoader(config=config)
yield loader
def test_mysql_loader_initialization(mysql_loader):
assert mysql_loader.config is not None
assert mysql_loader.connection is not None
assert mysql_loader.cursor is not None
def test_mysql_loader_invalid_config():
with pytest.raises(ValueError, match="Invalid sql config: None"):
MySQLLoader(config=None)
def test_mysql_loader_setup_loader_successful(mysql_loader):
assert mysql_loader.connection is not None
assert mysql_loader.cursor is not None
def test_mysql_loader_setup_loader_connection_error(mysql_loader, mocker):
mocker.patch("mysql.connector.connection.MySQLConnection", side_effect=IOError("Mocked connection error"))
with pytest.raises(ValueError, match="Unable to connect with the given config:"):
mysql_loader._setup_loader(config={})
def test_mysql_loader_check_query_successful(mysql_loader):
query = "SELECT * FROM table"
mysql_loader._check_query(query=query)
def test_mysql_loader_check_query_invalid(mysql_loader):
with pytest.raises(ValueError, match="Invalid mysql query: 123"):
mysql_loader._check_query(query=123)
def test_mysql_loader_load_data_successful(mysql_loader, mocker):
mock_cursor = MagicMock()
mocker.patch.object(mysql_loader, "cursor", mock_cursor)
mock_cursor.fetchall.return_value = [(1, "data1"), (2, "data2")]
query = "SELECT * FROM table"
result = mysql_loader.load_data(query)
assert "doc_id" in result
assert "data" in result
assert len(result["data"]) == 2
assert result["data"][0]["meta_data"]["url"] == query
assert result["data"][1]["meta_data"]["url"] == query
doc_id = hashlib.sha256((query + ", ".join([d["content"] for d in result["data"]])).encode()).hexdigest()
assert result["doc_id"] == doc_id
assert mock_cursor.execute.called_with(query)
def test_mysql_loader_load_data_invalid_query(mysql_loader):
with pytest.raises(ValueError, match="Invalid mysql query: 123"):
mysql_loader.load_data(query=123)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_openapi.py | embedchain/tests/loaders/test_openapi.py | import pytest
from embedchain.loaders.openapi import OpenAPILoader
@pytest.fixture
def openapi_loader():
return OpenAPILoader()
def test_load_data(openapi_loader, mocker):
mocker.patch("builtins.open", mocker.mock_open(read_data="key1: value1\nkey2: value2"))
mocker.patch("hashlib.sha256", return_value=mocker.Mock(hexdigest=lambda: "mock_hash"))
file_path = "configs/openai_openapi.yaml"
result = openapi_loader.load_data(file_path)
expected_doc_id = "mock_hash"
expected_data = [
{"content": "key1: value1", "meta_data": {"url": file_path, "row": 1}},
{"content": "key2: value2", "meta_data": {"url": file_path, "row": 2}},
]
assert result["doc_id"] == expected_doc_id
assert result["data"] == expected_data
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_web_page.py | embedchain/tests/loaders/test_web_page.py | import hashlib
from unittest.mock import Mock, patch
import pytest
import requests
from embedchain.loaders.web_page import WebPageLoader
@pytest.fixture
def web_page_loader():
return WebPageLoader()
def test_load_data(web_page_loader):
page_url = "https://example.com/page"
mock_response = Mock()
mock_response.status_code = 200
mock_response.content = """
<html>
<head>
<title>Test Page</title>
</head>
<body>
<div id="content">
<p>This is some test content.</p>
</div>
</body>
</html>
"""
with patch("embedchain.loaders.web_page.WebPageLoader._session.get", return_value=mock_response):
result = web_page_loader.load_data(page_url)
content = web_page_loader._get_clean_content(mock_response.content, page_url)
expected_doc_id = hashlib.sha256((content + page_url).encode()).hexdigest()
assert result["doc_id"] == expected_doc_id
expected_data = [
{
"content": content,
"meta_data": {
"url": page_url,
},
}
]
assert result["data"] == expected_data
def test_get_clean_content_excludes_unnecessary_info(web_page_loader):
mock_html = """
<html>
<head>
<title>Sample HTML</title>
<style>
/* Stylesheet to be excluded */
.elementor-location-header {
background-color: #f0f0f0;
}
</style>
</head>
<body>
<header id="header">Header Content</header>
<nav class="nav">Nav Content</nav>
<aside>Aside Content</aside>
<form>Form Content</form>
<main>Main Content</main>
<footer class="footer">Footer Content</footer>
<script>Some Script</script>
<noscript>NoScript Content</noscript>
<svg>SVG Content</svg>
<canvas>Canvas Content</canvas>
<div id="sidebar">Sidebar Content</div>
<div id="main-navigation">Main Navigation Content</div>
<div id="menu-main-menu">Menu Main Menu Content</div>
<div class="header-sidebar-wrapper">Header Sidebar Wrapper Content</div>
<div class="blog-sidebar-wrapper">Blog Sidebar Wrapper Content</div>
<div class="related-posts">Related Posts Content</div>
</body>
</html>
"""
tags_to_exclude = [
"nav",
"aside",
"form",
"header",
"noscript",
"svg",
"canvas",
"footer",
"script",
"style",
]
ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
classes_to_exclude = [
"elementor-location-header",
"navbar-header",
"nav",
"header-sidebar-wrapper",
"blog-sidebar-wrapper",
"related-posts",
]
content = web_page_loader._get_clean_content(mock_html, "https://example.com/page")
for tag in tags_to_exclude:
assert tag not in content
for id in ids_to_exclude:
assert id not in content
for class_name in classes_to_exclude:
assert class_name not in content
assert len(content) > 0
def test_fetch_reference_links_success(web_page_loader):
# Mock a successful response
response = Mock(spec=requests.Response)
response.status_code = 200
response.content = b"""
<html>
<body>
<a href="http://example.com">Example</a>
<a href="https://another-example.com">Another Example</a>
<a href="/relative-link">Relative Link</a>
</body>
</html>
"""
expected_links = ["http://example.com", "https://another-example.com"]
result = web_page_loader.fetch_reference_links(response)
assert result == expected_links
def test_fetch_reference_links_failure(web_page_loader):
# Mock a failed response
response = Mock(spec=requests.Response)
response.status_code = 404
response.content = b""
expected_links = []
result = web_page_loader.fetch_reference_links(response)
assert result == expected_links
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_json.py | embedchain/tests/loaders/test_json.py | import hashlib
import pytest
from embedchain.loaders.json import JSONLoader
def test_load_data(mocker):
content = "temp.json"
mock_document = {
"doc_id": hashlib.sha256((content + ", ".join(["content1", "content2"])).encode()).hexdigest(),
"data": [
{"content": "content1", "meta_data": {"url": content}},
{"content": "content2", "meta_data": {"url": content}},
],
}
mocker.patch("embedchain.loaders.json.JSONLoader.load_data", return_value=mock_document)
json_loader = JSONLoader()
result = json_loader.load_data(content)
assert "doc_id" in result
assert "data" in result
expected_data = [
{"content": "content1", "meta_data": {"url": content}},
{"content": "content2", "meta_data": {"url": content}},
]
assert result["data"] == expected_data
expected_doc_id = hashlib.sha256((content + ", ".join(["content1", "content2"])).encode()).hexdigest()
assert result["doc_id"] == expected_doc_id
def test_load_data_url(mocker):
content = "https://example.com/posts.json"
mocker.patch("os.path.isfile", return_value=False)
mocker.patch(
"embedchain.loaders.json.JSONReader.load_data",
return_value=[
{
"text": "content1",
},
{
"text": "content2",
},
],
)
mock_response = mocker.Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"document1": "content1", "document2": "content2"}
mocker.patch("requests.get", return_value=mock_response)
result = JSONLoader.load_data(content)
assert "doc_id" in result
assert "data" in result
expected_data = [
{"content": "content1", "meta_data": {"url": content}},
{"content": "content2", "meta_data": {"url": content}},
]
assert result["data"] == expected_data
expected_doc_id = hashlib.sha256((content + ", ".join(["content1", "content2"])).encode()).hexdigest()
assert result["doc_id"] == expected_doc_id
def test_load_data_invalid_string_content(mocker):
mocker.patch("os.path.isfile", return_value=False)
mocker.patch("requests.get")
content = "123: 345}"
with pytest.raises(ValueError, match="Invalid content to load json data from"):
JSONLoader.load_data(content)
def test_load_data_invalid_url(mocker):
mocker.patch("os.path.isfile", return_value=False)
mock_response = mocker.Mock()
mock_response.status_code = 404
mocker.patch("requests.get", return_value=mock_response)
content = "http://invalid-url.com/"
with pytest.raises(ValueError, match=f"Invalid content to load json data from: {content}"):
JSONLoader.load_data(content)
def test_load_data_from_json_string(mocker):
content = '{"foo": "bar"}'
content_url_str = hashlib.sha256((content).encode("utf-8")).hexdigest()
mocker.patch("os.path.isfile", return_value=False)
mocker.patch(
"embedchain.loaders.json.JSONReader.load_data",
return_value=[
{
"text": "content1",
},
{
"text": "content2",
},
],
)
result = JSONLoader.load_data(content)
assert "doc_id" in result
assert "data" in result
expected_data = [
{"content": "content1", "meta_data": {"url": content_url_str}},
{"content": "content2", "meta_data": {"url": content_url_str}},
]
assert result["data"] == expected_data
expected_doc_id = hashlib.sha256((content_url_str + ", ".join(["content1", "content2"])).encode()).hexdigest()
assert result["doc_id"] == expected_doc_id
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_notion.py | embedchain/tests/loaders/test_notion.py | import hashlib
import os
from unittest.mock import Mock, patch
import pytest
from embedchain.loaders.notion import NotionLoader
@pytest.fixture
def notion_loader():
with patch.dict(os.environ, {"NOTION_INTEGRATION_TOKEN": "test_notion_token"}):
yield NotionLoader()
def test_load_data(notion_loader):
source = "https://www.notion.so/Test-Page-1234567890abcdef1234567890abcdef"
mock_text = "This is a test page."
expected_doc_id = hashlib.sha256((mock_text + source).encode()).hexdigest()
expected_data = [
{
"content": mock_text,
"meta_data": {"url": "notion-12345678-90ab-cdef-1234-567890abcdef"}, # formatted_id
}
]
mock_page = Mock()
mock_page.text = mock_text
mock_documents = [mock_page]
with patch("embedchain.loaders.notion.NotionPageLoader") as mock_reader:
mock_reader.return_value.load_data.return_value = mock_documents
result = notion_loader.load_data(source)
assert result["doc_id"] == expected_doc_id
assert result["data"] == expected_data
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_mdx.py | embedchain/tests/loaders/test_mdx.py | import hashlib
from unittest.mock import mock_open, patch
import pytest
from embedchain.loaders.mdx import MdxLoader
@pytest.fixture
def mdx_loader():
return MdxLoader()
def test_load_data(mdx_loader):
mock_content = "Sample MDX Content"
# Mock open function to simulate file reading
with patch("builtins.open", mock_open(read_data=mock_content)):
url = "mock_file.mdx"
result = mdx_loader.load_data(url)
assert "doc_id" in result
assert "data" in result
assert result["data"][0]["content"] == mock_content
assert result["data"][0]["meta_data"]["url"] == url
expected_doc_id = hashlib.sha256((mock_content + url).encode()).hexdigest()
assert result["doc_id"] == expected_doc_id
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_audio.py | embedchain/tests/loaders/test_audio.py | import hashlib
import os
import sys
from unittest.mock import mock_open, patch
import pytest
if sys.version_info > (3, 10): # as `match` statement was introduced in python 3.10
from deepgram import PrerecordedOptions
from embedchain.loaders.audio import AudioLoader
@pytest.fixture
def setup_audio_loader(mocker):
mock_dropbox = mocker.patch("deepgram.DeepgramClient")
mock_dbx = mocker.MagicMock()
mock_dropbox.return_value = mock_dbx
os.environ["DEEPGRAM_API_KEY"] = "test_key"
loader = AudioLoader()
loader.client = mock_dbx
yield loader, mock_dbx
if "DEEPGRAM_API_KEY" in os.environ:
del os.environ["DEEPGRAM_API_KEY"]
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="Test skipped for Python 3.9 or lower"
) # as `match` statement was introduced in python 3.10
def test_initialization(setup_audio_loader):
"""Test initialization of AudioLoader."""
loader, _ = setup_audio_loader
assert loader is not None
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="Test skipped for Python 3.9 or lower"
) # as `match` statement was introduced in python 3.10
def test_load_data_from_url(setup_audio_loader):
loader, mock_dbx = setup_audio_loader
url = "https://example.com/audio.mp3"
expected_content = "This is a test audio transcript."
mock_response = {"results": {"channels": [{"alternatives": [{"transcript": expected_content}]}]}}
mock_dbx.listen.prerecorded.v.return_value.transcribe_url.return_value = mock_response
result = loader.load_data(url)
doc_id = hashlib.sha256((expected_content + url).encode()).hexdigest()
expected_result = {
"doc_id": doc_id,
"data": [
{
"content": expected_content,
"meta_data": {"url": url},
}
],
}
assert result == expected_result
mock_dbx.listen.prerecorded.v.assert_called_once_with("1")
mock_dbx.listen.prerecorded.v.return_value.transcribe_url.assert_called_once_with(
{"url": url}, PrerecordedOptions(model="nova-2", smart_format=True)
)
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="Test skipped for Python 3.9 or lower"
) # as `match` statement was introduced in python 3.10
def test_load_data_from_file(setup_audio_loader):
loader, mock_dbx = setup_audio_loader
file_path = "local_audio.mp3"
expected_content = "This is a test audio transcript."
mock_response = {"results": {"channels": [{"alternatives": [{"transcript": expected_content}]}]}}
mock_dbx.listen.prerecorded.v.return_value.transcribe_file.return_value = mock_response
# Mock the file reading functionality
with patch("builtins.open", mock_open(read_data=b"some data")) as mock_file:
result = loader.load_data(file_path)
doc_id = hashlib.sha256((expected_content + file_path).encode()).hexdigest()
expected_result = {
"doc_id": doc_id,
"data": [
{
"content": expected_content,
"meta_data": {"url": file_path},
}
],
}
assert result == expected_result
mock_dbx.listen.prerecorded.v.assert_called_once_with("1")
mock_dbx.listen.prerecorded.v.return_value.transcribe_file.assert_called_once_with(
{"buffer": mock_file.return_value}, PrerecordedOptions(model="nova-2", smart_format=True)
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_excel_file.py | embedchain/tests/loaders/test_excel_file.py | import hashlib
from unittest.mock import patch
import pytest
from embedchain.loaders.excel_file import ExcelFileLoader
@pytest.fixture
def excel_file_loader():
return ExcelFileLoader()
def test_load_data(excel_file_loader):
mock_url = "mock_excel_file.xlsx"
expected_content = "Sample Excel Content"
# Mock the load_data method of the excel_file_loader instance
with patch.object(
excel_file_loader,
"load_data",
return_value={
"doc_id": hashlib.sha256((expected_content + mock_url).encode()).hexdigest(),
"data": [{"content": expected_content, "meta_data": {"url": mock_url}}],
},
):
result = excel_file_loader.load_data(mock_url)
assert result["data"][0]["content"] == expected_content
assert result["data"][0]["meta_data"]["url"] == mock_url
expected_doc_id = hashlib.sha256((expected_content + mock_url).encode()).hexdigest()
assert result["doc_id"] == expected_doc_id
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_local_text.py | embedchain/tests/loaders/test_local_text.py | import hashlib
import pytest
from embedchain.loaders.local_text import LocalTextLoader
@pytest.fixture
def text_loader():
return LocalTextLoader()
def test_load_data(text_loader):
mock_content = "This is a sample text content."
result = text_loader.load_data(mock_content)
assert "doc_id" in result
assert "data" in result
url = "local"
assert result["data"][0]["content"] == mock_content
assert result["data"][0]["meta_data"]["url"] == url
expected_doc_id = hashlib.sha256((mock_content + url).encode()).hexdigest()
assert result["doc_id"] == expected_doc_id
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.