repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_github.py | embedchain/tests/loaders/test_github.py | import pytest
from embedchain.loaders.github import GithubLoader
@pytest.fixture
def mock_github_loader_config():
return {
"token": "your_mock_token",
}
@pytest.fixture
def mock_github_loader(mocker, mock_github_loader_config):
mock_github = mocker.patch("github.Github")
_ = mock_github.return_value
return GithubLoader(config=mock_github_loader_config)
def test_github_loader_init(mocker, mock_github_loader_config):
mock_github = mocker.patch("github.Github")
GithubLoader(config=mock_github_loader_config)
mock_github.assert_called_once_with("your_mock_token")
def test_github_loader_init_empty_config(mocker):
with pytest.raises(ValueError, match="requires a personal access token"):
GithubLoader()
def test_github_loader_init_missing_token():
with pytest.raises(ValueError, match="requires a personal access token"):
GithubLoader(config={})
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_slack.py | embedchain/tests/loaders/test_slack.py | import pytest
from embedchain.loaders.slack import SlackLoader
@pytest.fixture
def slack_loader(mocker, monkeypatch):
# Mocking necessary dependencies
mocker.patch("slack_sdk.WebClient")
mocker.patch("ssl.create_default_context")
mocker.patch("certifi.where")
monkeypatch.setenv("SLACK_USER_TOKEN", "slack_user_token")
return SlackLoader()
def test_slack_loader_initialization(slack_loader):
assert slack_loader.client is not None
assert slack_loader.config == {"base_url": "https://www.slack.com/api/"}
def test_slack_loader_setup_loader(slack_loader):
slack_loader._setup_loader({"base_url": "https://custom.slack.api/"})
assert slack_loader.client is not None
def test_slack_loader_check_query(slack_loader):
valid_json_query = "test_query"
invalid_query = 123
slack_loader._check_query(valid_json_query)
with pytest.raises(ValueError):
slack_loader._check_query(invalid_query)
def test_slack_loader_load_data(slack_loader, mocker):
valid_json_query = "in:random"
mocker.patch.object(slack_loader.client, "search_messages", return_value={"messages": {}})
result = slack_loader.load_data(valid_json_query)
assert "doc_id" in result
assert "data" in result
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_csv.py | embedchain/tests/loaders/test_csv.py | import csv
import os
import pathlib
import tempfile
from unittest.mock import MagicMock, patch
import pytest
from embedchain.loaders.csv import CsvLoader
@pytest.mark.parametrize("delimiter", [",", "\t", ";", "|"])
def test_load_data(delimiter):
"""
Test csv loader
Tests that file is loaded, metadata is correct and content is correct
"""
# Creating temporary CSV file
with tempfile.NamedTemporaryFile(mode="w+", newline="", delete=False) as tmpfile:
writer = csv.writer(tmpfile, delimiter=delimiter)
writer.writerow(["Name", "Age", "Occupation"])
writer.writerow(["Alice", "28", "Engineer"])
writer.writerow(["Bob", "35", "Doctor"])
writer.writerow(["Charlie", "22", "Student"])
tmpfile.seek(0)
filename = tmpfile.name
# Loading CSV using CsvLoader
loader = CsvLoader()
result = loader.load_data(filename)
data = result["data"]
# Assertions
assert len(data) == 3
assert data[0]["content"] == "Name: Alice, Age: 28, Occupation: Engineer"
assert data[0]["meta_data"]["url"] == filename
assert data[0]["meta_data"]["row"] == 1
assert data[1]["content"] == "Name: Bob, Age: 35, Occupation: Doctor"
assert data[1]["meta_data"]["url"] == filename
assert data[1]["meta_data"]["row"] == 2
assert data[2]["content"] == "Name: Charlie, Age: 22, Occupation: Student"
assert data[2]["meta_data"]["url"] == filename
assert data[2]["meta_data"]["row"] == 3
# Cleaning up the temporary file
os.unlink(filename)
@pytest.mark.parametrize("delimiter", [",", "\t", ";", "|"])
def test_load_data_with_file_uri(delimiter):
"""
Test csv loader with file URI
Tests that file is loaded, metadata is correct and content is correct
"""
# Creating temporary CSV file
with tempfile.NamedTemporaryFile(mode="w+", newline="", delete=False) as tmpfile:
writer = csv.writer(tmpfile, delimiter=delimiter)
writer.writerow(["Name", "Age", "Occupation"])
writer.writerow(["Alice", "28", "Engineer"])
writer.writerow(["Bob", "35", "Doctor"])
writer.writerow(["Charlie", "22", "Student"])
tmpfile.seek(0)
filename = pathlib.Path(tmpfile.name).as_uri() # Convert path to file URI
# Loading CSV using CsvLoader
loader = CsvLoader()
result = loader.load_data(filename)
data = result["data"]
# Assertions
assert len(data) == 3
assert data[0]["content"] == "Name: Alice, Age: 28, Occupation: Engineer"
assert data[0]["meta_data"]["url"] == filename
assert data[0]["meta_data"]["row"] == 1
assert data[1]["content"] == "Name: Bob, Age: 35, Occupation: Doctor"
assert data[1]["meta_data"]["url"] == filename
assert data[1]["meta_data"]["row"] == 2
assert data[2]["content"] == "Name: Charlie, Age: 22, Occupation: Student"
assert data[2]["meta_data"]["url"] == filename
assert data[2]["meta_data"]["row"] == 3
# Cleaning up the temporary file
os.unlink(tmpfile.name)
@pytest.mark.parametrize("content", ["ftp://example.com", "sftp://example.com", "mailto://example.com"])
def test_get_file_content(content):
with pytest.raises(ValueError):
loader = CsvLoader()
loader._get_file_content(content)
@pytest.mark.parametrize("content", ["http://example.com", "https://example.com"])
def test_get_file_content_http(content):
"""
Test _get_file_content method of CsvLoader for http and https URLs
"""
with patch("requests.get") as mock_get:
mock_response = MagicMock()
mock_response.text = "Name,Age,Occupation\nAlice,28,Engineer\nBob,35,Doctor\nCharlie,22,Student"
mock_get.return_value = mock_response
loader = CsvLoader()
file_content = loader._get_file_content(content)
mock_get.assert_called_once_with(content)
mock_response.raise_for_status.assert_called_once()
assert file_content.read() == mock_response.text
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_youtube_video.py | embedchain/tests/loaders/test_youtube_video.py | import hashlib
from unittest.mock import MagicMock, Mock, patch
import pytest
from embedchain.loaders.youtube_video import YoutubeVideoLoader
@pytest.fixture
def youtube_video_loader():
return YoutubeVideoLoader()
def test_load_data(youtube_video_loader):
video_url = "https://www.youtube.com/watch?v=VIDEO_ID"
mock_loader = Mock()
mock_page_content = "This is a YouTube video content."
mock_loader.load.return_value = [
MagicMock(
page_content=mock_page_content,
metadata={"url": video_url, "title": "Test Video"},
)
]
mock_transcript = [{"text": "sample text", "start": 0.0, "duration": 5.0}]
with patch("embedchain.loaders.youtube_video.YoutubeLoader.from_youtube_url", return_value=mock_loader), patch(
"embedchain.loaders.youtube_video.YouTubeTranscriptApi.get_transcript", return_value=mock_transcript
):
result = youtube_video_loader.load_data(video_url)
expected_doc_id = hashlib.sha256((mock_page_content + video_url).encode()).hexdigest()
assert result["doc_id"] == expected_doc_id
expected_data = [
{
"content": "This is a YouTube video content.",
"meta_data": {"url": video_url, "title": "Test Video", "transcript": "Unavailable"},
}
]
assert result["data"] == expected_data
def test_load_data_with_empty_doc(youtube_video_loader):
video_url = "https://www.youtube.com/watch?v=VIDEO_ID"
mock_loader = Mock()
mock_loader.load.return_value = []
with patch("embedchain.loaders.youtube_video.YoutubeLoader.from_youtube_url", return_value=mock_loader):
with pytest.raises(ValueError):
youtube_video_loader.load_data(video_url)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_xml.py | embedchain/tests/loaders/test_xml.py | import tempfile
import pytest
from embedchain.loaders.xml import XmlLoader
# Taken from https://github.com/langchain-ai/langchain/blob/master/libs/langchain/tests/integration_tests/examples/factbook.xml
SAMPLE_XML = """<?xml version="1.0" encoding="UTF-8"?>
<factbook>
<country>
<name>United States</name>
<capital>Washington, DC</capital>
<leader>Joe Biden</leader>
<sport>Baseball</sport>
</country>
<country>
<name>Canada</name>
<capital>Ottawa</capital>
<leader>Justin Trudeau</leader>
<sport>Hockey</sport>
</country>
<country>
<name>France</name>
<capital>Paris</capital>
<leader>Emmanuel Macron</leader>
<sport>Soccer</sport>
</country>
<country>
<name>Trinidad & Tobado</name>
<capital>Port of Spain</capital>
<leader>Keith Rowley</leader>
<sport>Track & Field</sport>
</country>
</factbook>"""
@pytest.mark.parametrize("xml", [SAMPLE_XML])
def test_load_data(xml: str):
"""
Test XML loader
Tests that XML file is loaded, metadata is correct and content is correct
"""
# Creating temporary XML file
with tempfile.NamedTemporaryFile(mode="w+") as tmpfile:
tmpfile.write(xml)
tmpfile.seek(0)
filename = tmpfile.name
# Loading CSV using XmlLoader
loader = XmlLoader()
result = loader.load_data(filename)
data = result["data"]
# Assertions
assert len(data) == 1
assert "United States Washington, DC Joe Biden" in data[0]["content"]
assert "Canada Ottawa Justin Trudeau" in data[0]["content"]
assert "France Paris Emmanuel Macron" in data[0]["content"]
assert "Trinidad & Tobado Port of Spain Keith Rowley" in data[0]["content"]
assert data[0]["meta_data"]["url"] == filename
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_discourse.py | embedchain/tests/loaders/test_discourse.py | import pytest
import requests
from embedchain.loaders.discourse import DiscourseLoader
@pytest.fixture
def discourse_loader_config():
return {
"domain": "https://example.com/",
}
@pytest.fixture
def discourse_loader(discourse_loader_config):
return DiscourseLoader(config=discourse_loader_config)
def test_discourse_loader_init_with_valid_config():
config = {"domain": "https://example.com/"}
loader = DiscourseLoader(config=config)
assert loader.domain == "https://example.com/"
def test_discourse_loader_init_with_missing_config():
with pytest.raises(ValueError, match="DiscourseLoader requires a config"):
DiscourseLoader()
def test_discourse_loader_init_with_missing_domain():
config = {"another_key": "value"}
with pytest.raises(ValueError, match="DiscourseLoader requires a domain"):
DiscourseLoader(config=config)
def test_discourse_loader_check_query_with_valid_query(discourse_loader):
discourse_loader._check_query("sample query")
def test_discourse_loader_check_query_with_empty_query(discourse_loader):
with pytest.raises(ValueError, match="DiscourseLoader requires a query"):
discourse_loader._check_query("")
def test_discourse_loader_check_query_with_invalid_query_type(discourse_loader):
with pytest.raises(ValueError, match="DiscourseLoader requires a query"):
discourse_loader._check_query(123)
def test_discourse_loader_load_post_with_valid_post_id(discourse_loader, monkeypatch):
def mock_get(*args, **kwargs):
class MockResponse:
def json(self):
return {"raw": "Sample post content"}
def raise_for_status(self):
pass
return MockResponse()
monkeypatch.setattr(requests, "get", mock_get)
post_data = discourse_loader._load_post(123)
assert post_data["content"] == "Sample post content"
assert "meta_data" in post_data
def test_discourse_loader_load_data_with_valid_query(discourse_loader, monkeypatch):
def mock_get(*args, **kwargs):
class MockResponse:
def json(self):
return {"grouped_search_result": {"post_ids": [123, 456, 789]}}
def raise_for_status(self):
pass
return MockResponse()
monkeypatch.setattr(requests, "get", mock_get)
def mock_load_post(*args, **kwargs):
return {
"content": "Sample post content",
"meta_data": {
"url": "https://example.com/posts/123.json",
"created_at": "2021-01-01",
"username": "test_user",
"topic_slug": "test_topic",
"score": 10,
},
}
monkeypatch.setattr(discourse_loader, "_load_post", mock_load_post)
data = discourse_loader.load_data("sample query")
assert len(data["data"]) == 3
assert data["data"][0]["content"] == "Sample post content"
assert data["data"][0]["meta_data"]["url"] == "https://example.com/posts/123.json"
assert data["data"][0]["meta_data"]["created_at"] == "2021-01-01"
assert data["data"][0]["meta_data"]["username"] == "test_user"
assert data["data"][0]["meta_data"]["topic_slug"] == "test_topic"
assert data["data"][0]["meta_data"]["score"] == 10
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_docs_site_loader.py | embedchain/tests/loaders/test_docs_site_loader.py | import pytest
import responses
from bs4 import BeautifulSoup
@pytest.mark.parametrize(
"ignored_tag",
[
"<nav>This is a navigation bar.</nav>",
"<aside>This is an aside.</aside>",
"<form>This is a form.</form>",
"<header>This is a header.</header>",
"<noscript>This is a noscript.</noscript>",
"<svg>This is an SVG.</svg>",
"<canvas>This is a canvas.</canvas>",
"<footer>This is a footer.</footer>",
"<script>This is a script.</script>",
"<style>This is a style.</style>",
],
ids=["nav", "aside", "form", "header", "noscript", "svg", "canvas", "footer", "script", "style"],
)
@pytest.mark.parametrize(
"selectee",
[
"""
<article class="bd-article">
<h2>Article Title</h2>
<p>Article content goes here.</p>
{ignored_tag}
</article>
""",
"""
<article role="main">
<h2>Main Article Title</h2>
<p>Main article content goes here.</p>
{ignored_tag}
</article>
""",
"""
<div class="md-content">
<h2>Markdown Content</h2>
<p>Markdown content goes here.</p>
{ignored_tag}
</div>
""",
"""
<div role="main">
<h2>Main Content</h2>
<p>Main content goes here.</p>
{ignored_tag}
</div>
""",
"""
<div class="container">
<h2>Container</h2>
<p>Container content goes here.</p>
{ignored_tag}
</div>
""",
"""
<div class="section">
<h2>Section</h2>
<p>Section content goes here.</p>
{ignored_tag}
</div>
""",
"""
<article>
<h2>Generic Article</h2>
<p>Generic article content goes here.</p>
{ignored_tag}
</article>
""",
"""
<main>
<h2>Main Content</h2>
<p>Main content goes here.</p>
{ignored_tag}
</main>
""",
],
ids=[
"article.bd-article",
'article[role="main"]',
"div.md-content",
'div[role="main"]',
"div.container",
"div.section",
"article",
"main",
],
)
def test_load_data_gets_by_selectors_and_ignored_tags(selectee, ignored_tag, loader, mocked_responses, mocker):
child_url = "https://docs.embedchain.ai/quickstart"
selectee = selectee.format(ignored_tag=ignored_tag)
html_body = """
<!DOCTYPE html>
<html lang="en">
<body>
{selectee}
</body>
</html>
"""
html_body = html_body.format(selectee=selectee)
mocked_responses.get(child_url, body=html_body, status=200, content_type="text/html")
url = "https://docs.embedchain.ai/"
html_body = """
<!DOCTYPE html>
<html lang="en">
<body>
<li><a href="/quickstart">Quickstart</a></li>
</body>
</html>
"""
mocked_responses.get(url, body=html_body, status=200, content_type="text/html")
mock_sha256 = mocker.patch("embedchain.loaders.docs_site_loader.hashlib.sha256")
doc_id = "mocked_hash"
mock_sha256.return_value.hexdigest.return_value = doc_id
result = loader.load_data(url)
selector_soup = BeautifulSoup(selectee, "html.parser")
expected_content = " ".join((selector_soup.select_one("h2").get_text(), selector_soup.select_one("p").get_text()))
assert result["doc_id"] == doc_id
assert result["data"] == [
{
"content": expected_content,
"meta_data": {"url": "https://docs.embedchain.ai/quickstart"},
}
]
def test_load_data_gets_child_links_recursively(loader, mocked_responses, mocker):
child_url = "https://docs.embedchain.ai/quickstart"
html_body = """
<!DOCTYPE html>
<html lang="en">
<body>
<li><a href="/">..</a></li>
<li><a href="/quickstart">.</a></li>
</body>
</html>
"""
mocked_responses.get(child_url, body=html_body, status=200, content_type="text/html")
child_url = "https://docs.embedchain.ai/introduction"
html_body = """
<!DOCTYPE html>
<html lang="en">
<body>
<li><a href="/">..</a></li>
<li><a href="/introduction">.</a></li>
</body>
</html>
"""
mocked_responses.get(child_url, body=html_body, status=200, content_type="text/html")
url = "https://docs.embedchain.ai/"
html_body = """
<!DOCTYPE html>
<html lang="en">
<body>
<li><a href="/quickstart">Quickstart</a></li>
<li><a href="/introduction">Introduction</a></li>
</body>
</html>
"""
mocked_responses.get(url, body=html_body, status=200, content_type="text/html")
mock_sha256 = mocker.patch("embedchain.loaders.docs_site_loader.hashlib.sha256")
doc_id = "mocked_hash"
mock_sha256.return_value.hexdigest.return_value = doc_id
result = loader.load_data(url)
assert result["doc_id"] == doc_id
expected_data = [
{"content": "..\n.", "meta_data": {"url": "https://docs.embedchain.ai/quickstart"}},
{"content": "..\n.", "meta_data": {"url": "https://docs.embedchain.ai/introduction"}},
]
assert all(item in expected_data for item in result["data"])
def test_load_data_fails_to_fetch_website(loader, mocked_responses, mocker):
child_url = "https://docs.embedchain.ai/introduction"
mocked_responses.get(child_url, status=404)
url = "https://docs.embedchain.ai/"
html_body = """
<!DOCTYPE html>
<html lang="en">
<body>
<li><a href="/introduction">Introduction</a></li>
</body>
</html>
"""
mocked_responses.get(url, body=html_body, status=200, content_type="text/html")
mock_sha256 = mocker.patch("embedchain.loaders.docs_site_loader.hashlib.sha256")
doc_id = "mocked_hash"
mock_sha256.return_value.hexdigest.return_value = doc_id
result = loader.load_data(url)
assert result["doc_id"] is doc_id
assert result["data"] == []
@pytest.fixture
def loader():
from embedchain.loaders.docs_site_loader import DocsSiteLoader
return DocsSiteLoader()
@pytest.fixture
def mocked_responses():
with responses.RequestsMock() as rsps:
yield rsps
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_pdf_file.py | embedchain/tests/loaders/test_pdf_file.py | import pytest
from langchain.schema import Document
def test_load_data(loader, mocker):
mocked_pypdfloader = mocker.patch("embedchain.loaders.pdf_file.PyPDFLoader")
mocked_pypdfloader.return_value.load_and_split.return_value = [
Document(page_content="Page 0 Content", metadata={"source": "example.pdf", "page": 0}),
Document(page_content="Page 1 Content", metadata={"source": "example.pdf", "page": 1}),
]
mock_sha256 = mocker.patch("embedchain.loaders.docs_site_loader.hashlib.sha256")
doc_id = "mocked_hash"
mock_sha256.return_value.hexdigest.return_value = doc_id
result = loader.load_data("dummy_url")
assert result["doc_id"] is doc_id
assert result["data"] == [
{"content": "Page 0 Content", "meta_data": {"source": "example.pdf", "page": 0, "url": "dummy_url"}},
{"content": "Page 1 Content", "meta_data": {"source": "example.pdf", "page": 1, "url": "dummy_url"}},
]
def test_load_data_fails_to_find_data(loader, mocker):
mocked_pypdfloader = mocker.patch("embedchain.loaders.pdf_file.PyPDFLoader")
mocked_pypdfloader.return_value.load_and_split.return_value = []
with pytest.raises(ValueError):
loader.load_data("dummy_url")
@pytest.fixture
def loader():
from embedchain.loaders.pdf_file import PdfFileLoader
return PdfFileLoader()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_docs_site.py | embedchain/tests/loaders/test_docs_site.py | import hashlib
from unittest.mock import Mock, patch
import pytest
from requests import Response
from embedchain.loaders.docs_site_loader import DocsSiteLoader
@pytest.fixture
def mock_requests_get():
with patch("requests.get") as mock_get:
yield mock_get
@pytest.fixture
def docs_site_loader():
return DocsSiteLoader()
def test_get_child_links_recursive(mock_requests_get, docs_site_loader):
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = """
<html>
<a href="/page1">Page 1</a>
<a href="/page2">Page 2</a>
</html>
"""
mock_requests_get.return_value = mock_response
docs_site_loader._get_child_links_recursive("https://example.com")
assert len(docs_site_loader.visited_links) == 2
assert "https://example.com/page1" in docs_site_loader.visited_links
assert "https://example.com/page2" in docs_site_loader.visited_links
def test_get_child_links_recursive_status_not_200(mock_requests_get, docs_site_loader):
mock_response = Mock()
mock_response.status_code = 404
mock_requests_get.return_value = mock_response
docs_site_loader._get_child_links_recursive("https://example.com")
assert len(docs_site_loader.visited_links) == 0
def test_get_all_urls(mock_requests_get, docs_site_loader):
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = """
<html>
<a href="/page1">Page 1</a>
<a href="/page2">Page 2</a>
<a href="https://example.com/external">External</a>
</html>
"""
mock_requests_get.return_value = mock_response
all_urls = docs_site_loader._get_all_urls("https://example.com")
assert len(all_urls) == 3
assert "https://example.com/page1" in all_urls
assert "https://example.com/page2" in all_urls
assert "https://example.com/external" in all_urls
def test_load_data_from_url(mock_requests_get, docs_site_loader):
mock_response = Mock()
mock_response.status_code = 200
mock_response.content = """
<html>
<nav>
<h1>Navigation</h1>
</nav>
<article class="bd-article">
<p>Article Content</p>
</article>
</html>
""".encode()
mock_requests_get.return_value = mock_response
data = docs_site_loader._load_data_from_url("https://example.com/page1")
assert len(data) == 1
assert data[0]["content"] == "Article Content"
assert data[0]["meta_data"]["url"] == "https://example.com/page1"
def test_load_data_from_url_status_not_200(mock_requests_get, docs_site_loader):
mock_response = Mock()
mock_response.status_code = 404
mock_requests_get.return_value = mock_response
data = docs_site_loader._load_data_from_url("https://example.com/page1")
assert data == []
assert len(data) == 0
def test_load_data(mock_requests_get, docs_site_loader):
mock_response = Response()
mock_response.status_code = 200
mock_response._content = """
<html>
<a href="/page1">Page 1</a>
<a href="/page2">Page 2</a>
""".encode()
mock_requests_get.return_value = mock_response
url = "https://example.com"
data = docs_site_loader.load_data(url)
expected_doc_id = hashlib.sha256((" ".join(docs_site_loader.visited_links) + url).encode()).hexdigest()
assert len(data["data"]) == 2
assert data["doc_id"] == expected_doc_id
def test_if_response_status_not_200(mock_requests_get, docs_site_loader):
mock_response = Response()
mock_response.status_code = 404
mock_requests_get.return_value = mock_response
url = "https://example.com"
data = docs_site_loader.load_data(url)
expected_doc_id = hashlib.sha256((" ".join(docs_site_loader.visited_links) + url).encode()).hexdigest()
assert len(data["data"]) == 0
assert data["doc_id"] == expected_doc_id
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_dropbox.py | embedchain/tests/loaders/test_dropbox.py | import os
from unittest.mock import MagicMock
import pytest
from dropbox.files import FileMetadata
from embedchain.loaders.dropbox import DropboxLoader
@pytest.fixture
def setup_dropbox_loader(mocker):
mock_dropbox = mocker.patch("dropbox.Dropbox")
mock_dbx = mocker.MagicMock()
mock_dropbox.return_value = mock_dbx
os.environ["DROPBOX_ACCESS_TOKEN"] = "test_token"
loader = DropboxLoader()
yield loader, mock_dbx
if "DROPBOX_ACCESS_TOKEN" in os.environ:
del os.environ["DROPBOX_ACCESS_TOKEN"]
def test_initialization(setup_dropbox_loader):
"""Test initialization of DropboxLoader."""
loader, _ = setup_dropbox_loader
assert loader is not None
def test_download_folder(setup_dropbox_loader, mocker):
"""Test downloading a folder."""
loader, mock_dbx = setup_dropbox_loader
mocker.patch("os.makedirs")
mocker.patch("os.path.join", return_value="mock/path")
mock_file_metadata = mocker.MagicMock(spec=FileMetadata)
mock_dbx.files_list_folder.return_value.entries = [mock_file_metadata]
entries = loader._download_folder("path/to/folder", "local_root")
assert entries is not None
def test_generate_dir_id_from_all_paths(setup_dropbox_loader, mocker):
"""Test directory ID generation."""
loader, mock_dbx = setup_dropbox_loader
mock_file_metadata = mocker.MagicMock(spec=FileMetadata, name="file.txt")
mock_dbx.files_list_folder.return_value.entries = [mock_file_metadata]
dir_id = loader._generate_dir_id_from_all_paths("path/to/folder")
assert dir_id is not None
assert len(dir_id) == 64
def test_clean_directory(setup_dropbox_loader, mocker):
"""Test cleaning up a directory."""
loader, _ = setup_dropbox_loader
mocker.patch("os.listdir", return_value=["file1", "file2"])
mocker.patch("os.remove")
mocker.patch("os.rmdir")
loader._clean_directory("path/to/folder")
def test_load_data(mocker, setup_dropbox_loader, tmp_path):
loader = setup_dropbox_loader[0]
mock_file_metadata = MagicMock(spec=FileMetadata, name="file.txt")
mocker.patch.object(loader.dbx, "files_list_folder", return_value=MagicMock(entries=[mock_file_metadata]))
mocker.patch.object(loader.dbx, "files_download_to_file")
# Mock DirectoryLoader
mock_data = {"data": "test_data"}
mocker.patch("embedchain.loaders.directory_loader.DirectoryLoader.load_data", return_value=mock_data)
test_dir = tmp_path / "dropbox_test"
test_dir.mkdir()
test_file = test_dir / "file.txt"
test_file.write_text("dummy content")
mocker.patch.object(loader, "_generate_dir_id_from_all_paths", return_value=str(test_dir))
result = loader.load_data("path/to/folder")
assert result == {"doc_id": mocker.ANY, "data": "test_data"}
loader.dbx.files_list_folder.assert_called_once_with("path/to/folder")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_docx_file.py | embedchain/tests/loaders/test_docx_file.py | import hashlib
from unittest.mock import MagicMock, patch
import pytest
from embedchain.loaders.docx_file import DocxFileLoader
@pytest.fixture
def mock_docx2txt_loader():
with patch("embedchain.loaders.docx_file.Docx2txtLoader") as mock_loader:
yield mock_loader
@pytest.fixture
def docx_file_loader():
return DocxFileLoader()
def test_load_data(mock_docx2txt_loader, docx_file_loader):
mock_url = "mock_docx_file.docx"
mock_loader = MagicMock()
mock_loader.load.return_value = [MagicMock(page_content="Sample Docx Content", metadata={"url": "local"})]
mock_docx2txt_loader.return_value = mock_loader
result = docx_file_loader.load_data(mock_url)
assert "doc_id" in result
assert "data" in result
expected_content = "Sample Docx Content"
assert result["data"][0]["content"] == expected_content
assert result["data"][0]["meta_data"]["url"] == "local"
expected_doc_id = hashlib.sha256((expected_content + mock_url).encode()).hexdigest()
assert result["doc_id"] == expected_doc_id
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/tests/loaders/test_google_drive.py | embedchain/tests/loaders/test_google_drive.py | import pytest
from embedchain.loaders.google_drive import GoogleDriveLoader
@pytest.fixture
def google_drive_folder_loader():
return GoogleDriveLoader()
def test_load_data_invalid_drive_url(google_drive_folder_loader):
mock_invalid_drive_url = "https://example.com"
with pytest.raises(
ValueError,
match="The url provided https://example.com does not match a google drive folder url. Example "
"drive url: https://drive.google.com/drive/u/0/folders/xxxx",
):
google_drive_folder_loader.load_data(mock_invalid_drive_url)
@pytest.mark.skip(reason="This test won't work unless google api credentials are properly setup.")
def test_load_data_incorrect_drive_url(google_drive_folder_loader):
mock_invalid_drive_url = "https://drive.google.com/drive/u/0/folders/xxxx"
with pytest.raises(
FileNotFoundError, match="Unable to locate folder or files, check provided drive URL and try again"
):
google_drive_folder_loader.load_data(mock_invalid_drive_url)
@pytest.mark.skip(reason="This test won't work unless google api credentials are properly setup.")
def test_load_data(google_drive_folder_loader):
mock_valid_url = "YOUR_VALID_URL"
result = google_drive_folder_loader.load_data(mock_valid_url)
assert "doc_id" in result
assert "data" in result
assert "content" in result["data"][0]
assert "meta_data" in result["data"][0]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/api_server/api_server.py | embedchain/examples/api_server/api_server.py | import logging
from flask import Flask, jsonify, request
from embedchain import App
app = Flask(__name__)
logger = logging.getLogger(__name__)
@app.route("/add", methods=["POST"])
def add():
data = request.get_json()
data_type = data.get("data_type")
url_or_text = data.get("url_or_text")
if data_type and url_or_text:
try:
App().add(url_or_text, data_type=data_type)
return jsonify({"data": f"Added {data_type}: {url_or_text}"}), 200
except Exception:
logger.exception(f"Failed to add {data_type=}: {url_or_text=}")
return jsonify({"error": f"Failed to add {data_type}: {url_or_text}"}), 500
return jsonify({"error": "Invalid request. Please provide 'data_type' and 'url_or_text' in JSON format."}), 400
@app.route("/query", methods=["POST"])
def query():
data = request.get_json()
question = data.get("question")
if question:
try:
response = App().query(question)
return jsonify({"data": response}), 200
except Exception:
logger.exception(f"Failed to query {question=}")
return jsonify({"error": "An error occurred. Please try again!"}), 500
return jsonify({"error": "Invalid request. Please provide 'question' in JSON format."}), 400
@app.route("/chat", methods=["POST"])
def chat():
data = request.get_json()
question = data.get("question")
if question:
try:
response = App().chat(question)
return jsonify({"data": response}), 200
except Exception:
logger.exception(f"Failed to chat {question=}")
return jsonify({"error": "An error occurred. Please try again!"}), 500
return jsonify({"error": "Invalid request. Please provide 'question' in JSON format."}), 400
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=False)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/discord_bot/discord_bot.py | embedchain/examples/discord_bot/discord_bot.py | import os
import discord
from discord.ext import commands
from dotenv import load_dotenv
from embedchain import App
load_dotenv()
intents = discord.Intents.default()
intents.message_content = True
bot = commands.Bot(command_prefix="/ec ", intents=intents)
root_folder = os.getcwd()
def initialize_chat_bot():
global chat_bot
chat_bot = App()
@bot.event
async def on_ready():
print(f"Logged in as {bot.user.name}")
initialize_chat_bot()
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await send_response(ctx, "Invalid command. Please refer to the documentation for correct syntax.")
else:
print("Error occurred during command execution:", error)
@bot.command()
async def add(ctx, data_type: str, *, url_or_text: str):
print(f"User: {ctx.author.name}, Data Type: {data_type}, URL/Text: {url_or_text}")
try:
chat_bot.add(data_type, url_or_text)
await send_response(ctx, f"Added {data_type} : {url_or_text}")
except Exception as e:
await send_response(ctx, f"Failed to add {data_type} : {url_or_text}")
print("Error occurred during 'add' command:", e)
@bot.command()
async def query(ctx, *, question: str):
print(f"User: {ctx.author.name}, Query: {question}")
try:
response = chat_bot.query(question)
await send_response(ctx, response)
except Exception as e:
await send_response(ctx, "An error occurred. Please try again!")
print("Error occurred during 'query' command:", e)
@bot.command()
async def chat(ctx, *, question: str):
print(f"User: {ctx.author.name}, Query: {question}")
try:
response = chat_bot.chat(question)
await send_response(ctx, response)
except Exception as e:
await send_response(ctx, "An error occurred. Please try again!")
print("Error occurred during 'chat' command:", e)
async def send_response(ctx, message):
if ctx.guild is None:
await ctx.send(message)
else:
await ctx.reply(message)
bot.run(os.environ["DISCORD_BOT_TOKEN"])
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/chainlit/app.py | embedchain/examples/chainlit/app.py | import os
import chainlit as cl
from embedchain import App
os.environ["OPENAI_API_KEY"] = "sk-xxx"
@cl.on_chat_start
async def on_chat_start():
app = App.from_config(
config={
"app": {"config": {"name": "chainlit-app"}},
"llm": {
"config": {
"stream": True,
}
},
}
)
# import your data here
app.add("https://www.forbes.com/profile/elon-musk/")
app.collect_metrics = False
cl.user_session.set("app", app)
@cl.on_message
async def on_message(message: cl.Message):
app = cl.user_session.get("app")
msg = cl.Message(content="")
for chunk in await cl.make_async(app.chat)(message.content):
await msg.stream_token(chunk)
await msg.send()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/telegram_bot/telegram_bot.py | embedchain/examples/telegram_bot/telegram_bot.py | import os
import requests
from dotenv import load_dotenv
from flask import Flask, request
from embedchain import App
app = Flask(__name__)
load_dotenv()
bot_token = os.environ["TELEGRAM_BOT_TOKEN"]
chat_bot = App()
@app.route("/", methods=["POST"])
def telegram_webhook():
data = request.json
message = data["message"]
chat_id = message["chat"]["id"]
text = message["text"]
if text.startswith("/start"):
response_text = (
"Welcome to Embedchain Bot! Try the following commands to use the bot:\n"
"For adding data sources:\n /add <data_type> <url_or_text>\n"
"For asking queries:\n /query <question>"
)
elif text.startswith("/add"):
_, data_type, url_or_text = text.split(maxsplit=2)
response_text = add_to_chat_bot(data_type, url_or_text)
elif text.startswith("/query"):
_, question = text.split(maxsplit=1)
response_text = query_chat_bot(question)
else:
response_text = "Invalid command. Please refer to the documentation for correct syntax."
send_message(chat_id, response_text)
return "OK"
def add_to_chat_bot(data_type, url_or_text):
try:
chat_bot.add(data_type, url_or_text)
response_text = f"Added {data_type} : {url_or_text}"
except Exception as e:
response_text = f"Failed to add {data_type} : {url_or_text}"
print("Error occurred during 'add' command:", e)
return response_text
def query_chat_bot(question):
try:
response = chat_bot.chat(question)
response_text = response
except Exception as e:
response_text = "An error occurred. Please try again!"
print("Error occurred during 'query' command:", e)
return response_text
def send_message(chat_id, text):
url = f"https://api.telegram.org/bot{bot_token}/sendMessage"
data = {"chat_id": chat_id, "text": text}
requests.post(url, json=data)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000, debug=False)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/sadhguru-ai/app.py | embedchain/examples/sadhguru-ai/app.py | import csv
import queue
import threading
from io import StringIO
import requests
import streamlit as st
from embedchain import App
from embedchain.config import BaseLlmConfig
from embedchain.helpers.callbacks import StreamingStdOutCallbackHandlerYield, generate
@st.cache_resource
def sadhguru_ai():
app = App()
return app
# Function to read the CSV file row by row
def read_csv_row_by_row(file_path):
with open(file_path, mode="r", newline="", encoding="utf-8") as file:
csv_reader = csv.DictReader(file)
for row in csv_reader:
yield row
@st.cache_resource
def add_data_to_app():
app = sadhguru_ai()
url = "https://gist.githubusercontent.com/deshraj/50b0597157e04829bbbb7bc418be6ccb/raw/95b0f1547028c39691f5c7db04d362baa597f3f4/data.csv" # noqa:E501
response = requests.get(url)
csv_file = StringIO(response.text)
for row in csv.reader(csv_file):
if row and row[0] != "url":
app.add(row[0], data_type="web_page")
app = sadhguru_ai()
add_data_to_app()
assistant_avatar_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/2/21/Sadhguru-Jaggi-Vasudev.jpg/640px-Sadhguru-Jaggi-Vasudev.jpg" # noqa: E501
st.title("🙏 Sadhguru AI")
styled_caption = '<p style="font-size: 17px; color: #aaa;">🚀 An <a href="https://github.com/embedchain/embedchain">Embedchain</a> app powered with Sadhguru\'s wisdom!</p>' # noqa: E501
st.markdown(styled_caption, unsafe_allow_html=True) # noqa: E501
if "messages" not in st.session_state:
st.session_state.messages = [
{
"role": "assistant",
"content": """
Hi, I'm Sadhguru AI! I'm a mystic, yogi, visionary, and spiritual master. I'm here to answer your questions about life, the universe, and everything.
""", # noqa: E501
}
]
for message in st.session_state.messages:
role = message["role"]
with st.chat_message(role, avatar=assistant_avatar_url if role == "assistant" else None):
st.markdown(message["content"])
if prompt := st.chat_input("Ask me anything!"):
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("assistant", avatar=assistant_avatar_url):
msg_placeholder = st.empty()
msg_placeholder.markdown("Thinking...")
full_response = ""
q = queue.Queue()
def app_response(result):
config = BaseLlmConfig(stream=True, callbacks=[StreamingStdOutCallbackHandlerYield(q)])
answer, citations = app.chat(prompt, config=config, citations=True)
result["answer"] = answer
result["citations"] = citations
results = {}
thread = threading.Thread(target=app_response, args=(results,))
thread.start()
for answer_chunk in generate(q):
full_response += answer_chunk
msg_placeholder.markdown(full_response)
thread.join()
answer, citations = results["answer"], results["citations"]
if citations:
full_response += "\n\n**Sources**:\n"
sources = list(set(map(lambda x: x[1]["url"], citations)))
for i, source in enumerate(sources):
full_response += f"{i+1}. {source}\n"
msg_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/full_stack/backend/paths.py | embedchain/examples/full_stack/backend/paths.py | import os
ROOT_DIRECTORY = os.getcwd()
DB_DIRECTORY_OPEN_AI = os.path.join(os.getcwd(), "database", "open_ai")
DB_DIRECTORY_OPEN_SOURCE = os.path.join(os.getcwd(), "database", "open_source")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/full_stack/backend/models.py | embedchain/examples/full_stack/backend/models.py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class APIKey(db.Model):
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.String(255), nullable=False)
class BotList(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
slug = db.Column(db.String(255), nullable=False, unique=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/full_stack/backend/server.py | embedchain/examples/full_stack/backend/server.py | import os
from flask import Flask
from models import db
from paths import DB_DIRECTORY_OPEN_AI, ROOT_DIRECTORY
from routes.chat_response import chat_response_bp
from routes.dashboard import dashboard_bp
from routes.sources import sources_bp
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + os.path.join(ROOT_DIRECTORY, "database", "user_data.db")
app.register_blueprint(dashboard_bp)
app.register_blueprint(sources_bp)
app.register_blueprint(chat_response_bp)
# Initialize the app on startup
def load_app():
os.makedirs(DB_DIRECTORY_OPEN_AI, exist_ok=True)
db.init_app(app)
with app.app_context():
db.create_all()
if __name__ == "__main__":
load_app()
app.run(host="0.0.0.0", debug=True, port=8000)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/full_stack/backend/routes/dashboard.py | embedchain/examples/full_stack/backend/routes/dashboard.py | from flask import Blueprint, jsonify, make_response, request
from models import APIKey, BotList, db
dashboard_bp = Blueprint("dashboard", __name__)
# Set Open AI Key
@dashboard_bp.route("/api/set_key", methods=["POST"])
def set_key():
data = request.get_json()
api_key = data["openAIKey"]
existing_key = APIKey.query.first()
if existing_key:
existing_key.key = api_key
else:
new_key = APIKey(key=api_key)
db.session.add(new_key)
db.session.commit()
return make_response(jsonify(message="API key saved successfully"), 200)
# Check OpenAI Key
@dashboard_bp.route("/api/check_key", methods=["GET"])
def check_key():
existing_key = APIKey.query.first()
if existing_key:
return make_response(jsonify(status="ok", message="OpenAI Key exists"), 200)
else:
return make_response(jsonify(status="fail", message="No OpenAI Key present"), 200)
# Create a bot
@dashboard_bp.route("/api/create_bot", methods=["POST"])
def create_bot():
data = request.get_json()
name = data["name"]
slug = name.lower().replace(" ", "_")
existing_bot = BotList.query.filter_by(slug=slug).first()
if existing_bot:
return (make_response(jsonify(message="Bot already exists"), 400),)
new_bot = BotList(name=name, slug=slug)
db.session.add(new_bot)
db.session.commit()
return make_response(jsonify(message="Bot created successfully"), 200)
# Delete a bot
@dashboard_bp.route("/api/delete_bot", methods=["POST"])
def delete_bot():
data = request.get_json()
slug = data.get("slug")
bot = BotList.query.filter_by(slug=slug).first()
if bot:
db.session.delete(bot)
db.session.commit()
return make_response(jsonify(message="Bot deleted successfully"), 200)
return make_response(jsonify(message="Bot not found"), 400)
# Get the list of bots
@dashboard_bp.route("/api/get_bots", methods=["GET"])
def get_bots():
bots = BotList.query.all()
bot_list = []
for bot in bots:
bot_list.append(
{
"name": bot.name,
"slug": bot.slug,
}
)
return jsonify(bot_list)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/full_stack/backend/routes/sources.py | embedchain/examples/full_stack/backend/routes/sources.py | import os
from flask import Blueprint, jsonify, make_response, request
from models import APIKey
from paths import DB_DIRECTORY_OPEN_AI
from embedchain import App
sources_bp = Blueprint("sources", __name__)
# API route to add data sources
@sources_bp.route("/api/add_sources", methods=["POST"])
def add_sources():
try:
embedding_model = request.json.get("embedding_model")
name = request.json.get("name")
value = request.json.get("value")
if embedding_model == "open_ai":
os.chdir(DB_DIRECTORY_OPEN_AI)
api_key = APIKey.query.first().key
os.environ["OPENAI_API_KEY"] = api_key
chat_bot = App()
chat_bot.add(name, value)
return make_response(jsonify(message="Sources added successfully"), 200)
except Exception as e:
return make_response(jsonify(message=f"Error adding sources: {str(e)}"), 400)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/full_stack/backend/routes/chat_response.py | embedchain/examples/full_stack/backend/routes/chat_response.py | import os
from flask import Blueprint, jsonify, make_response, request
from models import APIKey
from paths import DB_DIRECTORY_OPEN_AI
from embedchain import App
chat_response_bp = Blueprint("chat_response", __name__)
# Chat Response for user query
@chat_response_bp.route("/api/get_answer", methods=["POST"])
def get_answer():
try:
data = request.get_json()
query = data.get("query")
embedding_model = data.get("embedding_model")
app_type = data.get("app_type")
if embedding_model == "open_ai":
os.chdir(DB_DIRECTORY_OPEN_AI)
api_key = APIKey.query.first().key
os.environ["OPENAI_API_KEY"] = api_key
if app_type == "app":
chat_bot = App()
response = chat_bot.chat(query)
return make_response(jsonify({"response": response}), 200)
except Exception as e:
return make_response(jsonify({"error": str(e)}), 400)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/rest-api/services.py | embedchain/examples/rest-api/services.py | from models import AppModel
from sqlalchemy.orm import Session
def get_app(db: Session, app_id: str):
return db.query(AppModel).filter(AppModel.app_id == app_id).first()
def get_apps(db: Session, skip: int = 0, limit: int = 100):
return db.query(AppModel).offset(skip).limit(limit).all()
def save_app(db: Session, app_id: str, config: str):
db_app = AppModel(app_id=app_id, config=config)
db.add(db_app)
db.commit()
db.refresh(db_app)
return db_app
def remove_app(db: Session, app_id: str):
db_app = db.query(AppModel).filter(AppModel.app_id == app_id).first()
db.delete(db_app)
db.commit()
return db_app
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/rest-api/models.py | embedchain/examples/rest-api/models.py | from typing import Optional
from database import Base
from pydantic import BaseModel, Field
from sqlalchemy import Column, Integer, String
class QueryApp(BaseModel):
query: str = Field("", description="The query that you want to ask the App.")
model_config = {
"json_schema_extra": {
"example": {
"query": "Who is Elon Musk?",
}
}
}
class SourceApp(BaseModel):
source: str = Field("", description="The source that you want to add to the App.")
data_type: Optional[str] = Field("", description="The type of data to add, remove it for autosense.")
model_config = {"json_schema_extra": {"example": {"source": "https://en.wikipedia.org/wiki/Elon_Musk"}}}
class DeployAppRequest(BaseModel):
api_key: str = Field("", description="The Embedchain API key for App deployments.")
model_config = {"json_schema_extra": {"example": {"api_key": "ec-xxx"}}}
class MessageApp(BaseModel):
message: str = Field("", description="The message that you want to send to the App.")
class DefaultResponse(BaseModel):
response: str
class AppModel(Base):
__tablename__ = "apps"
id = Column(Integer, primary_key=True, index=True)
app_id = Column(String, unique=True, index=True)
config = Column(String, unique=True, index=True)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/rest-api/main.py | embedchain/examples/rest-api/main.py | import logging
import os
import aiofiles
import yaml
from database import Base, SessionLocal, engine
from fastapi import Depends, FastAPI, HTTPException, UploadFile
from models import DefaultResponse, DeployAppRequest, QueryApp, SourceApp
from services import get_app, get_apps, remove_app, save_app
from sqlalchemy.orm import Session
from utils import generate_error_message_for_api_keys
from embedchain import App
from embedchain.client import Client
logger = logging.getLogger(__name__)
Base.metadata.create_all(bind=engine)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
app = FastAPI(
title="Embedchain REST API",
description="This is the REST API for Embedchain.",
version="0.0.1",
license_info={
"name": "Apache 2.0",
"url": "https://github.com/embedchain/embedchain/blob/main/LICENSE",
},
)
@app.get("/ping", tags=["Utility"])
def check_status():
"""
Endpoint to check the status of the API
"""
return {"ping": "pong"}
@app.get("/apps", tags=["Apps"])
async def get_all_apps(db: Session = Depends(get_db)):
"""
Get all apps.
"""
apps = get_apps(db)
return {"results": apps}
@app.post("/create", tags=["Apps"], response_model=DefaultResponse)
async def create_app_using_default_config(app_id: str, config: UploadFile = None, db: Session = Depends(get_db)):
"""
Create a new app using App ID.
If you don't provide a config file, Embedchain will use the default config file\n
which uses opensource GPT4ALL model.\n
app_id: The ID of the app.\n
config: The YAML config file to create an App.\n
"""
try:
if app_id is None:
raise HTTPException(detail="App ID not provided.", status_code=400)
if get_app(db, app_id) is not None:
raise HTTPException(detail=f"App with id '{app_id}' already exists.", status_code=400)
yaml_path = "default.yaml"
if config is not None:
contents = await config.read()
try:
yaml.safe_load(contents)
# TODO: validate the config yaml file here
yaml_path = f"configs/{app_id}.yaml"
async with aiofiles.open(yaml_path, mode="w") as file_out:
await file_out.write(str(contents, "utf-8"))
except yaml.YAMLError as exc:
raise HTTPException(detail=f"Error parsing YAML: {exc}", status_code=400)
save_app(db, app_id, yaml_path)
return DefaultResponse(response=f"App created successfully. App ID: {app_id}")
except Exception as e:
logger.warning(str(e))
raise HTTPException(detail=f"Error creating app: {str(e)}", status_code=400)
@app.get(
"/{app_id}/data",
tags=["Apps"],
)
async def get_datasources_associated_with_app_id(app_id: str, db: Session = Depends(get_db)):
"""
Get all data sources for an app.\n
app_id: The ID of the app. Use "default" for the default app.\n
"""
try:
if app_id is None:
raise HTTPException(
detail="App ID not provided. If you want to use the default app, use 'default' as the app_id.",
status_code=400,
)
db_app = get_app(db, app_id)
if db_app is None:
raise HTTPException(detail=f"App with id {app_id} does not exist, please create it first.", status_code=400)
app = App.from_config(config_path=db_app.config)
response = app.get_data_sources()
return {"results": response}
except ValueError as ve:
logger.warning(str(ve))
raise HTTPException(
detail=generate_error_message_for_api_keys(ve),
status_code=400,
)
except Exception as e:
logger.warning(str(e))
raise HTTPException(detail=f"Error occurred: {str(e)}", status_code=400)
@app.post(
"/{app_id}/add",
tags=["Apps"],
response_model=DefaultResponse,
)
async def add_datasource_to_an_app(body: SourceApp, app_id: str, db: Session = Depends(get_db)):
"""
Add a source to an existing app.\n
app_id: The ID of the app. Use "default" for the default app.\n
source: The source to add.\n
data_type: The data type of the source. Remove it if you want Embedchain to detect it automatically.\n
"""
try:
if app_id is None:
raise HTTPException(
detail="App ID not provided. If you want to use the default app, use 'default' as the app_id.",
status_code=400,
)
db_app = get_app(db, app_id)
if db_app is None:
raise HTTPException(detail=f"App with id {app_id} does not exist, please create it first.", status_code=400)
app = App.from_config(config_path=db_app.config)
response = app.add(source=body.source, data_type=body.data_type)
return DefaultResponse(response=response)
except ValueError as ve:
logger.warning(str(ve))
raise HTTPException(
detail=generate_error_message_for_api_keys(ve),
status_code=400,
)
except Exception as e:
logger.warning(str(e))
raise HTTPException(detail=f"Error occurred: {str(e)}", status_code=400)
@app.post(
"/{app_id}/query",
tags=["Apps"],
response_model=DefaultResponse,
)
async def query_an_app(body: QueryApp, app_id: str, db: Session = Depends(get_db)):
"""
Query an existing app.\n
app_id: The ID of the app. Use "default" for the default app.\n
query: The query that you want to ask the App.\n
"""
try:
if app_id is None:
raise HTTPException(
detail="App ID not provided. If you want to use the default app, use 'default' as the app_id.",
status_code=400,
)
db_app = get_app(db, app_id)
if db_app is None:
raise HTTPException(detail=f"App with id {app_id} does not exist, please create it first.", status_code=400)
app = App.from_config(config_path=db_app.config)
response = app.query(body.query)
return DefaultResponse(response=response)
except ValueError as ve:
logger.warning(str(ve))
raise HTTPException(
detail=generate_error_message_for_api_keys(ve),
status_code=400,
)
except Exception as e:
logger.warning(str(e))
raise HTTPException(detail=f"Error occurred: {str(e)}", status_code=400)
# FIXME: The chat implementation of Embedchain needs to be modified to work with the REST API.
# @app.post(
# "/{app_id}/chat",
# tags=["Apps"],
# response_model=DefaultResponse,
# )
# async def chat_with_an_app(body: MessageApp, app_id: str, db: Session = Depends(get_db)):
# """
# Query an existing app.\n
# app_id: The ID of the app. Use "default" for the default app.\n
# message: The message that you want to send to the App.\n
# """
# try:
# if app_id is None:
# raise HTTPException(
# detail="App ID not provided. If you want to use the default app, use 'default' as the app_id.",
# status_code=400,
# )
# db_app = get_app(db, app_id)
# if db_app is None:
# raise HTTPException(
# detail=f"App with id {app_id} does not exist, please create it first.",
# status_code=400
# )
# app = App.from_config(config_path=db_app.config)
# response = app.chat(body.message)
# return DefaultResponse(response=response)
# except ValueError as ve:
# raise HTTPException(
# detail=generate_error_message_for_api_keys(ve),
# status_code=400,
# )
# except Exception as e:
# raise HTTPException(detail=f"Error occurred: {str(e)}", status_code=400)
@app.post(
"/{app_id}/deploy",
tags=["Apps"],
response_model=DefaultResponse,
)
async def deploy_app(body: DeployAppRequest, app_id: str, db: Session = Depends(get_db)):
"""
Query an existing app.\n
app_id: The ID of the app. Use "default" for the default app.\n
api_key: The API key to use for deployment. If not provided,
Embedchain will use the API key previously used (if any).\n
"""
try:
if app_id is None:
raise HTTPException(
detail="App ID not provided. If you want to use the default app, use 'default' as the app_id.",
status_code=400,
)
db_app = get_app(db, app_id)
if db_app is None:
raise HTTPException(detail=f"App with id {app_id} does not exist, please create it first.", status_code=400)
app = App.from_config(config_path=db_app.config)
api_key = body.api_key
# this will save the api key in the embedchain.db
Client(api_key=api_key)
app.deploy()
return DefaultResponse(response="App deployed successfully.")
except ValueError as ve:
logger.warning(str(ve))
raise HTTPException(
detail=generate_error_message_for_api_keys(ve),
status_code=400,
)
except Exception as e:
logger.warning(str(e))
raise HTTPException(detail=f"Error occurred: {str(e)}", status_code=400)
@app.delete(
"/{app_id}/delete",
tags=["Apps"],
response_model=DefaultResponse,
)
async def delete_app(app_id: str, db: Session = Depends(get_db)):
"""
Delete an existing app.\n
app_id: The ID of the app to be deleted.
"""
try:
if app_id is None:
raise HTTPException(
detail="App ID not provided. If you want to use the default app, use 'default' as the app_id.",
status_code=400,
)
db_app = get_app(db, app_id)
if db_app is None:
raise HTTPException(detail=f"App with id {app_id} does not exist, please create it first.", status_code=400)
app = App.from_config(config_path=db_app.config)
# reset app.db
app.db.reset()
remove_app(db, app_id)
return DefaultResponse(response=f"App with id {app_id} deleted successfully.")
except Exception as e:
raise HTTPException(detail=f"Error occurred: {str(e)}", status_code=400)
if __name__ == "__main__":
import uvicorn
is_dev = os.getenv("DEVELOPMENT", "False")
uvicorn.run("main:app", host="0.0.0.0", port=8080, reload=bool(is_dev))
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/rest-api/utils.py | embedchain/examples/rest-api/utils.py | def generate_error_message_for_api_keys(error: ValueError) -> str:
env_mapping = {
"OPENAI_API_KEY": "OPENAI_API_KEY",
"OPENAI_API_TYPE": "OPENAI_API_TYPE",
"OPENAI_API_BASE": "OPENAI_API_BASE",
"OPENAI_API_VERSION": "OPENAI_API_VERSION",
"COHERE_API_KEY": "COHERE_API_KEY",
"TOGETHER_API_KEY": "TOGETHER_API_KEY",
"ANTHROPIC_API_KEY": "ANTHROPIC_API_KEY",
"JINACHAT_API_KEY": "JINACHAT_API_KEY",
"HUGGINGFACE_ACCESS_TOKEN": "HUGGINGFACE_ACCESS_TOKEN",
"REPLICATE_API_TOKEN": "REPLICATE_API_TOKEN",
}
missing_keys = [env_mapping[key] for key in env_mapping if key in str(error)]
if missing_keys:
missing_keys_str = ", ".join(missing_keys)
return f"""Please set the {missing_keys_str} environment variable(s) when running the Docker container.
Example: `docker run -e {missing_keys[0]}=xxx embedchain/rest-api:latest`
"""
else:
return "Error: " + str(error)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/rest-api/database.py | embedchain/examples/rest-api/database.py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URI = "sqlite:///./app.db"
engine = create_engine(SQLALCHEMY_DATABASE_URI, connect_args={"check_same_thread": False})
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/rest-api/__init__.py | embedchain/examples/rest-api/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/chat-pdf/app.py | embedchain/examples/chat-pdf/app.py | import os
import queue
import re
import tempfile
import threading
import streamlit as st
from embedchain import App
from embedchain.config import BaseLlmConfig
from embedchain.helpers.callbacks import StreamingStdOutCallbackHandlerYield, generate
def embedchain_bot(db_path, api_key):
return App.from_config(
config={
"llm": {
"provider": "openai",
"config": {
"model": "gpt-4o-mini",
"temperature": 0.5,
"max_tokens": 1000,
"top_p": 1,
"stream": True,
"api_key": api_key,
},
},
"vectordb": {
"provider": "chroma",
"config": {"collection_name": "chat-pdf", "dir": db_path, "allow_reset": True},
},
"embedder": {"provider": "openai", "config": {"api_key": api_key}},
"chunker": {"chunk_size": 2000, "chunk_overlap": 0, "length_function": "len"},
}
)
def get_db_path():
tmpdirname = tempfile.mkdtemp()
return tmpdirname
def get_ec_app(api_key):
if "app" in st.session_state:
print("Found app in session state")
app = st.session_state.app
else:
print("Creating app")
db_path = get_db_path()
app = embedchain_bot(db_path, api_key)
st.session_state.app = app
return app
with st.sidebar:
openai_access_token = st.text_input("OpenAI API Key", key="api_key", type="password")
"WE DO NOT STORE YOUR OPENAI KEY."
"Just paste your OpenAI API key here and we'll use it to power the chatbot. [Get your OpenAI API key](https://platform.openai.com/api-keys)" # noqa: E501
if st.session_state.api_key:
app = get_ec_app(st.session_state.api_key)
pdf_files = st.file_uploader("Upload your PDF files", accept_multiple_files=True, type="pdf")
add_pdf_files = st.session_state.get("add_pdf_files", [])
for pdf_file in pdf_files:
file_name = pdf_file.name
if file_name in add_pdf_files:
continue
try:
if not st.session_state.api_key:
st.error("Please enter your OpenAI API Key")
st.stop()
temp_file_name = None
with tempfile.NamedTemporaryFile(mode="wb", delete=False, prefix=file_name, suffix=".pdf") as f:
f.write(pdf_file.getvalue())
temp_file_name = f.name
if temp_file_name:
st.markdown(f"Adding {file_name} to knowledge base...")
app.add(temp_file_name, data_type="pdf_file")
st.markdown("")
add_pdf_files.append(file_name)
os.remove(temp_file_name)
st.session_state.messages.append({"role": "assistant", "content": f"Added {file_name} to knowledge base!"})
except Exception as e:
st.error(f"Error adding {file_name} to knowledge base: {e}")
st.stop()
st.session_state["add_pdf_files"] = add_pdf_files
st.title("📄 Embedchain - Chat with PDF")
styled_caption = '<p style="font-size: 17px; color: #aaa;">🚀 An <a href="https://github.com/embedchain/embedchain">Embedchain</a> app powered by OpenAI!</p>' # noqa: E501
st.markdown(styled_caption, unsafe_allow_html=True)
if "messages" not in st.session_state:
st.session_state.messages = [
{
"role": "assistant",
"content": """
Hi! I'm chatbot powered by Embedchain, which can answer questions about your pdf documents.\n
Upload your pdf documents here and I'll answer your questions about them!
""",
}
]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Ask me anything!"):
if not st.session_state.api_key:
st.error("Please enter your OpenAI API Key", icon="🤖")
st.stop()
app = get_ec_app(st.session_state.api_key)
with st.chat_message("user"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.markdown(prompt)
with st.chat_message("assistant"):
msg_placeholder = st.empty()
msg_placeholder.markdown("Thinking...")
full_response = ""
q = queue.Queue()
def app_response(result):
llm_config = app.llm.config.as_dict()
llm_config["callbacks"] = [StreamingStdOutCallbackHandlerYield(q=q)]
config = BaseLlmConfig(**llm_config)
answer, citations = app.chat(prompt, config=config, citations=True)
result["answer"] = answer
result["citations"] = citations
results = {}
thread = threading.Thread(target=app_response, args=(results,))
thread.start()
for answer_chunk in generate(q):
full_response += answer_chunk
msg_placeholder.markdown(full_response)
thread.join()
answer, citations = results["answer"], results["citations"]
if citations:
full_response += "\n\n**Sources**:\n"
sources = []
for i, citation in enumerate(citations):
source = citation[1]["url"]
pattern = re.compile(r"([^/]+)\.[^\.]+\.pdf$")
match = pattern.search(source)
if match:
source = match.group(1) + ".pdf"
sources.append(source)
sources = list(set(sources))
for source in sources:
full_response += f"- {source}\n"
msg_placeholder.markdown(full_response)
print("Answer: ", full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/private-ai/privateai.py | embedchain/examples/private-ai/privateai.py | from embedchain import App
app = App.from_config("config.yaml")
app.add("/path/to/your/folder", data_type="directory")
while True:
user_input = input("Enter your question (type 'exit' to quit): ")
# Break the loop if the user types 'exit'
if user_input.lower() == "exit":
break
# Process the input and provide a response
response = app.chat(user_input)
print(response)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/whatsapp_bot/run.py | embedchain/examples/whatsapp_bot/run.py | from embedchain.bots.whatsapp import WhatsAppBot
def main():
whatsapp_bot = WhatsAppBot()
whatsapp_bot.start()
if __name__ == "__main__":
main()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/whatsapp_bot/whatsapp_bot.py | embedchain/examples/whatsapp_bot/whatsapp_bot.py | from flask import Flask, request
from twilio.twiml.messaging_response import MessagingResponse
from embedchain import App
app = Flask(__name__)
chat_bot = App()
@app.route("/chat", methods=["POST"])
def chat():
incoming_message = request.values.get("Body", "").lower()
response = handle_message(incoming_message)
twilio_response = MessagingResponse()
twilio_response.message(response)
return str(twilio_response)
def handle_message(message):
if message.startswith("add "):
response = add_sources(message)
else:
response = query(message)
return response
def add_sources(message):
message_parts = message.split(" ", 2)
if len(message_parts) == 3:
data_type = message_parts[1]
url_or_text = message_parts[2]
try:
chat_bot.add(data_type, url_or_text)
response = f"Added {data_type}: {url_or_text}"
except Exception as e:
response = f"Failed to add {data_type}: {url_or_text}.\nError: {str(e)}"
else:
response = "Invalid 'add' command format.\nUse: add <data_type> <url_or_text>"
return response
def query(message):
try:
response = chat_bot.chat(message)
except Exception:
response = "An error occurred. Please try again!"
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000, debug=False)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/nextjs/nextjs_slack/app.py | embedchain/examples/nextjs/nextjs_slack/app.py | import logging
import os
import re
import requests
from dotenv import load_dotenv
from slack_bolt import App as SlackApp
from slack_bolt.adapter.socket_mode import SocketModeHandler
load_dotenv(".env")
logger = logging.getLogger(__name__)
def remove_mentions(message):
mention_pattern = re.compile(r"<@[^>]+>")
cleaned_message = re.sub(mention_pattern, "", message)
cleaned_message.strip()
return cleaned_message
class SlackBotApp:
def __init__(self) -> None:
logger.info("Slack Bot using Embedchain!")
def add(self, _):
raise ValueError("Add is not implemented yet")
def query(self, query, citations: bool = False):
url = os.environ["EC_APP_URL"] + "/query"
payload = {
"question": query,
"citations": citations,
}
try:
response = requests.request("POST", url, json=payload)
try:
response = response.json()
except Exception:
logger.error(f"Failed to parse response: {response}")
response = {}
return response
except Exception:
logger.exception(f"Failed to query {query}.")
response = "An error occurred. Please try again!"
return response
SLACK_APP_TOKEN = os.environ["SLACK_APP_TOKEN"]
SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"]
slack_app = SlackApp(token=SLACK_BOT_TOKEN)
slack_bot = SlackBotApp()
@slack_app.event("message")
def app_message_handler(message, say):
pass
@slack_app.event("app_mention")
def app_mention_handler(body, say, client):
# Get the timestamp of the original message to reply in the thread
if "thread_ts" in body["event"]:
# thread is already created
thread_ts = body["event"]["thread_ts"]
say(
text="🧵 Currently, we don't support answering questions in threads. Could you please send your message in the channel for a swift response? Appreciate your understanding! 🚀", # noqa: E501
thread_ts=thread_ts,
)
return
thread_ts = body["event"]["ts"]
say(
text="🎭 Putting on my thinking cap, brb with an epic response!",
thread_ts=thread_ts,
)
query = body["event"]["text"]
question = remove_mentions(query)
print("Asking question: ", question)
response = slack_bot.query(question, citations=True)
default_answer = "Sorry, I don't know the answer to that question. Please refer to the documentation.\nhttps://nextjs.org/docs" # noqa: E501
answer = response.get("answer", default_answer)
contexts = response.get("contexts", [])
if contexts:
sources = list(set(map(lambda x: x[1]["url"], contexts)))
answer += "\n\n*Sources*:\n"
for i, source in enumerate(sources):
answer += f"- {source}\n"
print("Sending answer: ", answer)
result = say(text=answer, thread_ts=thread_ts)
if result["ok"]:
channel = result["channel"]
timestamp = result["ts"]
client.reactions_add(
channel=channel,
name="open_mouth",
timestamp=timestamp,
)
client.reactions_add(
channel=channel,
name="thumbsup",
timestamp=timestamp,
)
client.reactions_add(
channel=channel,
name="heart",
timestamp=timestamp,
)
client.reactions_add(
channel=channel,
name="thumbsdown",
timestamp=timestamp,
)
def start_bot():
slack_socket_mode_handler = SocketModeHandler(slack_app, SLACK_APP_TOKEN)
slack_socket_mode_handler.start()
if __name__ == "__main__":
start_bot()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/nextjs/ec_app/app.py | embedchain/examples/nextjs/ec_app/app.py | from dotenv import load_dotenv
from fastapi import FastAPI, responses
from pydantic import BaseModel
from embedchain import App
load_dotenv(".env")
app = FastAPI(title="Embedchain FastAPI App")
embedchain_app = App()
class SourceModel(BaseModel):
source: str
class QuestionModel(BaseModel):
question: str
@app.post("/add")
async def add_source(source_model: SourceModel):
"""
Adds a new source to the EmbedChain app.
Expects a JSON with a "source" key.
"""
source = source_model.source
embedchain_app.add(source)
return {"message": f"Source '{source}' added successfully."}
@app.post("/query")
async def handle_query(question_model: QuestionModel):
"""
Handles a query to the EmbedChain app.
Expects a JSON with a "question" key.
"""
question = question_model.question
answer = embedchain_app.query(question)
return {"answer": answer}
@app.post("/chat")
async def handle_chat(question_model: QuestionModel):
"""
Handles a chat request to the EmbedChain app.
Expects a JSON with a "question" key.
"""
question = question_model.question
response = embedchain_app.chat(question)
return {"response": response}
@app.get("/")
async def root():
return responses.RedirectResponse(url="/docs")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/nextjs/nextjs_discord/app.py | embedchain/examples/nextjs/nextjs_discord/app.py | import logging
import os
import discord
import dotenv
import requests
dotenv.load_dotenv(".env")
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
discord_bot_name = os.environ["DISCORD_BOT_NAME"]
logger = logging.getLogger(__name__)
class NextJSBot:
def __init__(self) -> None:
logger.info("NextJS Bot powered with embedchain.")
def add(self, _):
raise ValueError("Add is not implemented yet")
def query(self, message, citations: bool = False):
url = os.environ["EC_APP_URL"] + "/query"
payload = {
"question": message,
"citations": citations,
}
try:
response = requests.request("POST", url, json=payload)
try:
response = response.json()
except Exception:
logger.error(f"Failed to parse response: {response}")
response = {}
return response
except Exception:
logger.exception(f"Failed to query {message}.")
response = "An error occurred. Please try again!"
return response
def start(self):
discord_token = os.environ["DISCORD_BOT_TOKEN"]
client.run(discord_token)
NEXTJS_BOT = NextJSBot()
@client.event
async def on_ready():
logger.info(f"User {client.user.name} logged in with id: {client.user.id}!")
def _get_question(message):
user_ids = message.raw_mentions
if len(user_ids) > 0:
for user_id in user_ids:
# remove mentions from message
question = message.content.replace(f"<@{user_id}>", "").strip()
return question
async def answer_query(message):
if (
message.channel.type == discord.ChannelType.public_thread
or message.channel.type == discord.ChannelType.private_thread
):
await message.channel.send(
"🧵 Currently, we don't support answering questions in threads. Could you please send your message in the channel for a swift response? Appreciate your understanding! 🚀" # noqa: E501
)
return
question = _get_question(message)
print("Answering question: ", question)
thread = await message.create_thread(name=question)
await thread.send("🎭 Putting on my thinking cap, brb with an epic response!")
response = NEXTJS_BOT.query(question, citations=True)
default_answer = "Sorry, I don't know the answer to that question. Please refer to the documentation.\nhttps://nextjs.org/docs" # noqa: E501
answer = response.get("answer", default_answer)
contexts = response.get("contexts", [])
if contexts:
sources = list(set(map(lambda x: x[1]["url"], contexts)))
answer += "\n\n**Sources**:\n"
for i, source in enumerate(sources):
answer += f"- {source}\n"
sent_message = await thread.send(answer)
await sent_message.add_reaction("😮")
await sent_message.add_reaction("👍")
await sent_message.add_reaction("❤️")
await sent_message.add_reaction("👎")
@client.event
async def on_message(message):
mentions = message.mentions
if len(mentions) > 0 and any([user.bot and user.name == discord_bot_name for user in mentions]):
await answer_query(message)
def start_bot():
NEXTJS_BOT.start()
if __name__ == "__main__":
start_bot()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/mistral-streamlit/app.py | embedchain/examples/mistral-streamlit/app.py | import os
import streamlit as st
from embedchain import App
@st.cache_resource
def ec_app():
return App.from_config(config_path="config.yaml")
with st.sidebar:
huggingface_access_token = st.text_input("Hugging face Token", key="chatbot_api_key", type="password")
"[Get Hugging Face Access Token](https://huggingface.co/settings/tokens)"
"[View the source code](https://github.com/embedchain/examples/mistral-streamlit)"
st.title("💬 Chatbot")
st.caption("🚀 An Embedchain app powered by Mistral!")
if "messages" not in st.session_state:
st.session_state.messages = [
{
"role": "assistant",
"content": """
Hi! I'm a chatbot. I can answer questions and learn new things!\n
Ask me anything and if you want me to learn something do `/add <source>`.\n
I can learn mostly everything. :)
""",
}
]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Ask me anything!"):
if not st.session_state.chatbot_api_key:
st.error("Please enter your Hugging Face Access Token")
st.stop()
os.environ["HUGGINGFACE_ACCESS_TOKEN"] = st.session_state.chatbot_api_key
app = ec_app()
if prompt.startswith("/add"):
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
prompt = prompt.replace("/add", "").strip()
with st.chat_message("assistant"):
message_placeholder = st.empty()
message_placeholder.markdown("Adding to knowledge base...")
app.add(prompt)
message_placeholder.markdown(f"Added {prompt} to knowledge base!")
st.session_state.messages.append({"role": "assistant", "content": f"Added {prompt} to knowledge base!"})
st.stop()
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("assistant"):
msg_placeholder = st.empty()
msg_placeholder.markdown("Thinking...")
full_response = ""
for response in app.chat(prompt):
msg_placeholder.empty()
full_response += response
msg_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/embedchain/examples/unacademy-ai/app.py | embedchain/examples/unacademy-ai/app.py | import queue
import streamlit as st
from embedchain import App
from embedchain.config import BaseLlmConfig
from embedchain.helpers.callbacks import StreamingStdOutCallbackHandlerYield, generate
@st.cache_resource
def unacademy_ai():
app = App()
return app
app = unacademy_ai()
assistant_avatar_url = "https://cdn-images-1.medium.com/v2/resize:fit:1200/1*LdFNhpOe7uIn-bHK9VUinA.jpeg"
st.markdown(f"# <img src='{assistant_avatar_url}' width={35} /> Unacademy UPSC AI", unsafe_allow_html=True)
styled_caption = """
<p style="font-size: 17px; color: #aaa;">
🚀 An <a href="https://github.com/embedchain/embedchain">Embedchain</a> app powered with Unacademy\'s UPSC data!
</p>
"""
st.markdown(styled_caption, unsafe_allow_html=True)
with st.expander(":grey[Want to create your own Unacademy UPSC AI?]"):
st.write(
"""
```bash
pip install embedchain
```
```python
from embedchain import App
unacademy_ai_app = App()
unacademy_ai_app.add(
"https://unacademy.com/content/upsc/study-material/plan-policy/atma-nirbhar-bharat-3-0/",
data_type="web_page"
)
unacademy_ai_app.chat("What is Atma Nirbhar 3.0?")
```
For more information, checkout the [Embedchain docs](https://docs.embedchain.ai/get-started/quickstart).
"""
)
if "messages" not in st.session_state:
st.session_state.messages = [
{
"role": "assistant",
"content": """Hi, I'm Unacademy UPSC AI bot, who can answer any questions related to UPSC preparation.
Let me help you prepare better for UPSC.\n
Sample questions:
- What are the subjects in UPSC CSE?
- What is the CSE scholarship price amount?
- What are different indian calendar forms?
""",
}
]
for message in st.session_state.messages:
role = message["role"]
with st.chat_message(role, avatar=assistant_avatar_url if role == "assistant" else None):
st.markdown(message["content"])
if prompt := st.chat_input("Ask me anything!"):
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("assistant", avatar=assistant_avatar_url):
msg_placeholder = st.empty()
msg_placeholder.markdown("Thinking...")
full_response = ""
q = queue.Queue()
def app_response(result):
llm_config = app.llm.config.as_dict()
llm_config["callbacks"] = [StreamingStdOutCallbackHandlerYield(q=q)]
config = BaseLlmConfig(**llm_config)
answer, citations = app.chat(prompt, config=config, citations=True)
result["answer"] = answer
result["citations"] = citations
results = {}
for answer_chunk in generate(q):
full_response += answer_chunk
msg_placeholder.markdown(full_response)
answer, citations = results["answer"], results["citations"]
if citations:
full_response += "\n\n**Sources**:\n"
sources = list(set(map(lambda x: x[1], citations)))
for i, source in enumerate(sources):
full_response += f"{i+1}. {source}\n"
msg_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/test_memory_integration.py | tests/test_memory_integration.py | from unittest.mock import MagicMock, patch
from mem0.memory.main import Memory
def test_memory_configuration_without_env_vars():
"""Test Memory configuration with mock config instead of environment variables"""
# Mock configuration without relying on environment variables
mock_config = {
"llm": {
"provider": "openai",
"config": {
"model": "gpt-4",
"temperature": 0.1,
"max_tokens": 1500,
},
},
"vector_store": {
"provider": "chroma",
"config": {
"collection_name": "test_collection",
"path": "./test_db",
},
},
"embedder": {
"provider": "openai",
"config": {
"model": "text-embedding-ada-002",
},
},
}
# Test messages similar to the main.py file
test_messages = [
{"role": "user", "content": "Hi, I'm Alex. I'm a vegetarian and I'm allergic to nuts."},
{
"role": "assistant",
"content": "Hello Alex! I've noted that you're a vegetarian and have a nut allergy. I'll keep this in mind for any food-related recommendations or discussions.",
},
]
# Mock the Memory class methods to avoid actual API calls
with patch.object(Memory, "__init__", return_value=None):
with patch.object(Memory, "from_config") as mock_from_config:
with patch.object(Memory, "add") as mock_add:
with patch.object(Memory, "get_all") as mock_get_all:
# Configure mocks
mock_memory_instance = MagicMock()
mock_from_config.return_value = mock_memory_instance
mock_add.return_value = {
"results": [
{"id": "1", "text": "Alex is a vegetarian"},
{"id": "2", "text": "Alex is allergic to nuts"},
]
}
mock_get_all.return_value = [
{"id": "1", "text": "Alex is a vegetarian", "metadata": {"category": "dietary_preferences"}},
{"id": "2", "text": "Alex is allergic to nuts", "metadata": {"category": "allergies"}},
]
# Test the workflow
mem = Memory.from_config(config_dict=mock_config)
assert mem is not None
# Test adding memories
result = mock_add(test_messages, user_id="alice", metadata={"category": "book_recommendations"})
assert "results" in result
assert len(result["results"]) == 2
# Test retrieving memories
all_memories = mock_get_all(user_id="alice")
assert len(all_memories) == 2
assert any("vegetarian" in memory["text"] for memory in all_memories)
assert any("allergic to nuts" in memory["text"] for memory in all_memories)
def test_azure_config_structure():
"""Test that Azure configuration structure is properly formatted"""
# Test Azure configuration structure (without actual credentials)
azure_config = {
"llm": {
"provider": "azure_openai",
"config": {
"model": "gpt-4",
"temperature": 0.1,
"max_tokens": 1500,
"azure_kwargs": {
"azure_deployment": "test-deployment",
"api_version": "2023-12-01-preview",
"azure_endpoint": "https://test.openai.azure.com/",
"api_key": "test-key",
},
},
},
"vector_store": {
"provider": "azure_ai_search",
"config": {
"service_name": "test-service",
"api_key": "test-key",
"collection_name": "test-collection",
"embedding_model_dims": 1536,
},
},
"embedder": {
"provider": "azure_openai",
"config": {
"model": "text-embedding-ada-002",
"api_key": "test-key",
"azure_kwargs": {
"api_version": "2023-12-01-preview",
"azure_deployment": "test-embedding-deployment",
"azure_endpoint": "https://test.openai.azure.com/",
"api_key": "test-key",
},
},
},
}
# Validate configuration structure
assert "llm" in azure_config
assert "vector_store" in azure_config
assert "embedder" in azure_config
# Validate Azure-specific configurations
assert azure_config["llm"]["provider"] == "azure_openai"
assert "azure_kwargs" in azure_config["llm"]["config"]
assert "azure_deployment" in azure_config["llm"]["config"]["azure_kwargs"]
assert azure_config["vector_store"]["provider"] == "azure_ai_search"
assert "service_name" in azure_config["vector_store"]["config"]
assert azure_config["embedder"]["provider"] == "azure_openai"
assert "azure_kwargs" in azure_config["embedder"]["config"]
def test_memory_messages_format():
"""Test that memory messages are properly formatted"""
# Test message format from main.py
messages = [
{"role": "user", "content": "Hi, I'm Alex. I'm a vegetarian and I'm allergic to nuts."},
{
"role": "assistant",
"content": "Hello Alex! I've noted that you're a vegetarian and have a nut allergy. I'll keep this in mind for any food-related recommendations or discussions.",
},
]
# Validate message structure
assert len(messages) == 2
assert all("role" in msg for msg in messages)
assert all("content" in msg for msg in messages)
# Validate roles
assert messages[0]["role"] == "user"
assert messages[1]["role"] == "assistant"
# Validate content
assert "vegetarian" in messages[0]["content"].lower()
assert "allergic to nuts" in messages[0]["content"].lower()
assert "vegetarian" in messages[1]["content"].lower()
assert "nut allergy" in messages[1]["content"].lower()
def test_safe_update_prompt_constant():
"""Test the SAFE_UPDATE_PROMPT constant from main.py"""
SAFE_UPDATE_PROMPT = """
Based on the user's latest messages, what new preference can be inferred?
Reply only in this json_object format:
"""
# Validate prompt structure
assert isinstance(SAFE_UPDATE_PROMPT, str)
assert "user's latest messages" in SAFE_UPDATE_PROMPT
assert "json_object format" in SAFE_UPDATE_PROMPT
assert len(SAFE_UPDATE_PROMPT.strip()) > 0
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/test_main.py | tests/test_main.py | import os
from unittest.mock import Mock, patch
import pytest
from mem0.configs.base import MemoryConfig
from mem0.memory.main import Memory
@pytest.fixture(autouse=True)
def mock_openai():
os.environ["OPENAI_API_KEY"] = "123"
with patch("openai.OpenAI") as mock:
mock.return_value = Mock()
yield mock
@pytest.fixture
def memory_instance():
with (
patch("mem0.utils.factory.EmbedderFactory") as mock_embedder,
patch("mem0.memory.main.VectorStoreFactory") as mock_vector_store,
patch("mem0.utils.factory.LlmFactory") as mock_llm,
patch("mem0.memory.telemetry.capture_event"),
patch("mem0.memory.graph_memory.MemoryGraph"),
patch("mem0.memory.main.GraphStoreFactory") as mock_graph_store,
):
mock_embedder.create.return_value = Mock()
mock_vector_store.create.return_value = Mock()
mock_vector_store.create.return_value.search.return_value = []
mock_llm.create.return_value = Mock()
# Create a mock instance that won't try to access config attributes
mock_graph_instance = Mock()
mock_graph_store.create.return_value = mock_graph_instance
config = MemoryConfig(version="v1.1")
config.graph_store.config = {"some_config": "value"}
return Memory(config)
@pytest.fixture
def memory_custom_instance():
with (
patch("mem0.utils.factory.EmbedderFactory") as mock_embedder,
patch("mem0.memory.main.VectorStoreFactory") as mock_vector_store,
patch("mem0.utils.factory.LlmFactory") as mock_llm,
patch("mem0.memory.telemetry.capture_event"),
patch("mem0.memory.graph_memory.MemoryGraph"),
patch("mem0.memory.main.GraphStoreFactory") as mock_graph_store,
):
mock_embedder.create.return_value = Mock()
mock_vector_store.create.return_value = Mock()
mock_vector_store.create.return_value.search.return_value = []
mock_llm.create.return_value = Mock()
# Create a mock instance that won't try to access config attributes
mock_graph_instance = Mock()
mock_graph_store.create.return_value = mock_graph_instance
config = MemoryConfig(
version="v1.1",
custom_fact_extraction_prompt="custom prompt extracting memory",
custom_update_memory_prompt="custom prompt determining memory update",
)
config.graph_store.config = {"some_config": "value"}
return Memory(config)
@pytest.mark.parametrize("version, enable_graph", [("v1.0", False), ("v1.1", True)])
def test_add(memory_instance, version, enable_graph):
memory_instance.config.version = version
memory_instance.enable_graph = enable_graph
memory_instance._add_to_vector_store = Mock(return_value=[{"memory": "Test memory", "event": "ADD"}])
memory_instance._add_to_graph = Mock(return_value=[])
result = memory_instance.add(messages=[{"role": "user", "content": "Test message"}], user_id="test_user")
if enable_graph:
assert "results" in result
assert result["results"] == [{"memory": "Test memory", "event": "ADD"}]
assert "relations" in result
assert result["relations"] == []
else:
assert "results" in result
assert result["results"] == [{"memory": "Test memory", "event": "ADD"}]
memory_instance._add_to_vector_store.assert_called_once_with(
[{"role": "user", "content": "Test message"}], {"user_id": "test_user"}, {"user_id": "test_user"}, True
)
# Remove the conditional assertion for _add_to_graph
memory_instance._add_to_graph.assert_called_once_with(
[{"role": "user", "content": "Test message"}], {"user_id": "test_user"}
)
def test_get(memory_instance):
mock_memory = Mock(
id="test_id",
payload={
"data": "Test memory",
"user_id": "test_user",
"hash": "test_hash",
"created_at": "2023-01-01T00:00:00",
"updated_at": "2023-01-02T00:00:00",
"extra_field": "extra_value",
},
)
memory_instance.vector_store.get = Mock(return_value=mock_memory)
result = memory_instance.get("test_id")
assert result["id"] == "test_id"
assert result["memory"] == "Test memory"
assert result["user_id"] == "test_user"
assert result["hash"] == "test_hash"
assert result["created_at"] == "2023-01-01T00:00:00"
assert result["updated_at"] == "2023-01-02T00:00:00"
assert result["metadata"] == {"extra_field": "extra_value"}
@pytest.mark.parametrize("version, enable_graph", [("v1.0", False), ("v1.1", True)])
def test_search(memory_instance, version, enable_graph):
memory_instance.config.version = version
memory_instance.enable_graph = enable_graph
mock_memories = [
Mock(id="1", payload={"data": "Memory 1", "user_id": "test_user"}, score=0.9),
Mock(id="2", payload={"data": "Memory 2", "user_id": "test_user"}, score=0.8),
]
memory_instance.vector_store.search = Mock(return_value=mock_memories)
memory_instance.embedding_model.embed = Mock(return_value=[0.1, 0.2, 0.3])
memory_instance.graph.search = Mock(return_value=[{"relation": "test_relation"}])
result = memory_instance.search("test query", user_id="test_user")
if version == "v1.1":
assert "results" in result
assert len(result["results"]) == 2
assert result["results"][0]["id"] == "1"
assert result["results"][0]["memory"] == "Memory 1"
assert result["results"][0]["user_id"] == "test_user"
assert result["results"][0]["score"] == 0.9
if enable_graph:
assert "relations" in result
assert result["relations"] == [{"relation": "test_relation"}]
else:
assert "relations" not in result
else:
assert isinstance(result, dict)
assert "results" in result
assert len(result["results"]) == 2
assert result["results"][0]["id"] == "1"
assert result["results"][0]["memory"] == "Memory 1"
assert result["results"][0]["user_id"] == "test_user"
assert result["results"][0]["score"] == 0.9
memory_instance.vector_store.search.assert_called_once_with(
query="test query", vectors=[0.1, 0.2, 0.3], limit=100, filters={"user_id": "test_user"}
)
memory_instance.embedding_model.embed.assert_called_once_with("test query", "search")
if enable_graph:
memory_instance.graph.search.assert_called_once_with("test query", {"user_id": "test_user"}, 100)
else:
memory_instance.graph.search.assert_not_called()
def test_update(memory_instance):
memory_instance.embedding_model = Mock()
memory_instance.embedding_model.embed = Mock(return_value=[0.1, 0.2, 0.3])
memory_instance._update_memory = Mock()
result = memory_instance.update("test_id", "Updated memory")
memory_instance._update_memory.assert_called_once_with(
"test_id", "Updated memory", {"Updated memory": [0.1, 0.2, 0.3]}
)
assert result["message"] == "Memory updated successfully!"
def test_delete(memory_instance):
memory_instance._delete_memory = Mock()
result = memory_instance.delete("test_id")
memory_instance._delete_memory.assert_called_once_with("test_id")
assert result["message"] == "Memory deleted successfully!"
@pytest.mark.parametrize("version, enable_graph", [("v1.0", False), ("v1.1", True)])
def test_delete_all(memory_instance, version, enable_graph):
memory_instance.config.version = version
memory_instance.enable_graph = enable_graph
mock_memories = [Mock(id="1"), Mock(id="2")]
memory_instance.vector_store.list = Mock(return_value=(mock_memories, None))
memory_instance._delete_memory = Mock()
memory_instance.graph.delete_all = Mock()
result = memory_instance.delete_all(user_id="test_user")
assert memory_instance._delete_memory.call_count == 2
if enable_graph:
memory_instance.graph.delete_all.assert_called_once_with({"user_id": "test_user"})
else:
memory_instance.graph.delete_all.assert_not_called()
assert result["message"] == "Memories deleted successfully!"
@pytest.mark.parametrize(
"version, enable_graph, expected_result",
[
("v1.0", False, {"results": [{"id": "1", "memory": "Memory 1", "user_id": "test_user"}]}),
("v1.1", False, {"results": [{"id": "1", "memory": "Memory 1", "user_id": "test_user"}]}),
(
"v1.1",
True,
{
"results": [{"id": "1", "memory": "Memory 1", "user_id": "test_user"}],
"relations": [{"source": "entity1", "relationship": "rel", "target": "entity2"}],
},
),
],
)
def test_get_all(memory_instance, version, enable_graph, expected_result):
memory_instance.config.version = version
memory_instance.enable_graph = enable_graph
mock_memories = [Mock(id="1", payload={"data": "Memory 1", "user_id": "test_user"})]
memory_instance.vector_store.list = Mock(return_value=(mock_memories, None))
memory_instance.graph.get_all = Mock(
return_value=[{"source": "entity1", "relationship": "rel", "target": "entity2"}]
)
result = memory_instance.get_all(user_id="test_user")
assert isinstance(result, dict)
assert "results" in result
assert len(result["results"]) == len(expected_result["results"])
for expected_item, result_item in zip(expected_result["results"], result["results"]):
assert all(key in result_item for key in expected_item)
assert result_item["id"] == expected_item["id"]
assert result_item["memory"] == expected_item["memory"]
assert result_item["user_id"] == expected_item["user_id"]
if enable_graph:
assert "relations" in result
assert result["relations"] == expected_result["relations"]
else:
assert "relations" not in result
memory_instance.vector_store.list.assert_called_once_with(filters={"user_id": "test_user"}, limit=100)
if enable_graph:
memory_instance.graph.get_all.assert_called_once_with({"user_id": "test_user"}, 100)
else:
memory_instance.graph.get_all.assert_not_called()
def test_custom_prompts(memory_custom_instance):
messages = [{"role": "user", "content": "Test message"}]
from mem0.embeddings.mock import MockEmbeddings
memory_custom_instance.llm.generate_response = Mock()
memory_custom_instance.llm.generate_response.return_value = '{"facts": ["fact1", "fact2"]}'
memory_custom_instance.embedding_model = MockEmbeddings()
with patch("mem0.memory.main.parse_messages", return_value="Test message") as mock_parse_messages:
with patch(
"mem0.memory.main.get_update_memory_messages", return_value="custom update memory prompt"
) as mock_get_update_memory_messages:
memory_custom_instance.add(messages=messages, user_id="test_user")
## custom prompt
##
mock_parse_messages.assert_called_once_with(messages)
memory_custom_instance.llm.generate_response.assert_any_call(
messages=[
{"role": "system", "content": memory_custom_instance.config.custom_fact_extraction_prompt},
{"role": "user", "content": f"Input:\n{mock_parse_messages.return_value}"},
],
response_format={"type": "json_object"},
)
## custom update memory prompt
##
mock_get_update_memory_messages.assert_called_once_with(
[], ["fact1", "fact2"], memory_custom_instance.config.custom_update_memory_prompt
)
memory_custom_instance.llm.generate_response.assert_any_call(
messages=[{"role": "user", "content": mock_get_update_memory_messages.return_value}],
response_format={"type": "json_object"},
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/test_proxy.py | tests/test_proxy.py | from unittest.mock import Mock, patch
import pytest
from mem0 import Memory, MemoryClient
from mem0.proxy.main import Chat, Completions, Mem0
@pytest.fixture
def mock_memory_client():
mock_client = Mock(spec=MemoryClient)
mock_client.user_email = None
return mock_client
@pytest.fixture
def mock_openai_embedding_client():
with patch("mem0.embeddings.openai.OpenAI") as mock_openai:
mock_client = Mock()
mock_openai.return_value = mock_client
yield mock_client
@pytest.fixture
def mock_openai_llm_client():
with patch("mem0.llms.openai.OpenAI") as mock_openai:
mock_client = Mock()
mock_openai.return_value = mock_client
yield mock_client
@pytest.fixture
def mock_litellm():
with patch("mem0.proxy.main.litellm") as mock:
yield mock
def test_mem0_initialization_with_api_key(mock_openai_embedding_client, mock_openai_llm_client):
mem0 = Mem0()
assert isinstance(mem0.mem0_client, Memory)
assert isinstance(mem0.chat, Chat)
def test_mem0_initialization_with_config():
config = {"some_config": "value"}
with patch("mem0.Memory.from_config") as mock_from_config:
mem0 = Mem0(config=config)
mock_from_config.assert_called_once_with(config)
assert isinstance(mem0.chat, Chat)
def test_mem0_initialization_without_params(mock_openai_embedding_client, mock_openai_llm_client):
mem0 = Mem0()
assert isinstance(mem0.mem0_client, Memory)
assert isinstance(mem0.chat, Chat)
def test_chat_initialization(mock_memory_client):
chat = Chat(mock_memory_client)
assert isinstance(chat.completions, Completions)
def test_completions_create(mock_memory_client, mock_litellm):
completions = Completions(mock_memory_client)
messages = [{"role": "user", "content": "Hello, how are you?"}]
mock_memory_client.search.return_value = [{"memory": "Some relevant memory"}]
mock_litellm.completion.return_value = {"choices": [{"message": {"content": "I'm doing well, thank you!"}}]}
mock_litellm.supports_function_calling.return_value = True
response = completions.create(model="gpt-4.1-nano-2025-04-14", messages=messages, user_id="test_user", temperature=0.7)
mock_memory_client.add.assert_called_once()
mock_memory_client.search.assert_called_once()
mock_litellm.completion.assert_called_once()
call_args = mock_litellm.completion.call_args[1]
assert call_args["model"] == "gpt-4.1-nano-2025-04-14"
assert len(call_args["messages"]) == 2
assert call_args["temperature"] == 0.7
assert response == {"choices": [{"message": {"content": "I'm doing well, thank you!"}}]}
def test_completions_create_with_system_message(mock_memory_client, mock_litellm):
completions = Completions(mock_memory_client)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_memory_client.search.return_value = [{"memory": "Some relevant memory"}]
mock_litellm.completion.return_value = {"choices": [{"message": {"content": "I'm doing well, thank you!"}}]}
mock_litellm.supports_function_calling.return_value = True
completions.create(model="gpt-4.1-nano-2025-04-14", messages=messages, user_id="test_user")
call_args = mock_litellm.completion.call_args[1]
assert call_args["messages"][0]["role"] == "system"
assert call_args["messages"][0]["content"] == "You are a helpful assistant."
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/test_memory.py | tests/test_memory.py | from unittest.mock import MagicMock, patch
import pytest
from mem0 import Memory
from mem0.configs.base import MemoryConfig
class MockVectorMemory:
"""Mock memory object for testing incomplete payloads."""
def __init__(self, memory_id: str, payload: dict, score: float = 0.8):
self.id = memory_id
self.payload = payload
self.score = score
@pytest.fixture
def memory_client():
with patch.object(Memory, "__init__", return_value=None):
client = Memory()
client.add = MagicMock(return_value={"results": [{"id": "1", "memory": "Name is John Doe.", "event": "ADD"}]})
client.get = MagicMock(return_value={"id": "1", "memory": "Name is John Doe."})
client.update = MagicMock(return_value={"message": "Memory updated successfully!"})
client.delete = MagicMock(return_value={"message": "Memory deleted successfully!"})
client.history = MagicMock(return_value=[{"memory": "I like Indian food."}, {"memory": "I like Italian food."}])
client.get_all = MagicMock(return_value=["Name is John Doe.", "Name is John Doe. I like to code in Python."])
yield client
def test_create_memory(memory_client):
data = "Name is John Doe."
result = memory_client.add([{"role": "user", "content": data}], user_id="test_user")
assert result["results"][0]["memory"] == data
def test_get_memory(memory_client):
data = "Name is John Doe."
memory_client.add([{"role": "user", "content": data}], user_id="test_user")
result = memory_client.get("1")
assert result["memory"] == data
def test_update_memory(memory_client):
data = "Name is John Doe."
memory_client.add([{"role": "user", "content": data}], user_id="test_user")
new_data = "Name is John Kapoor."
update_result = memory_client.update("1", text=new_data)
assert update_result["message"] == "Memory updated successfully!"
def test_delete_memory(memory_client):
data = "Name is John Doe."
memory_client.add([{"role": "user", "content": data}], user_id="test_user")
delete_result = memory_client.delete("1")
assert delete_result["message"] == "Memory deleted successfully!"
def test_history(memory_client):
data = "I like Indian food."
memory_client.add([{"role": "user", "content": data}], user_id="test_user")
memory_client.update("1", text="I like Italian food.")
history = memory_client.history("1")
assert history[0]["memory"] == "I like Indian food."
assert history[1]["memory"] == "I like Italian food."
def test_list_memories(memory_client):
data1 = "Name is John Doe."
data2 = "Name is John Doe. I like to code in Python."
memory_client.add([{"role": "user", "content": data1}], user_id="test_user")
memory_client.add([{"role": "user", "content": data2}], user_id="test_user")
memories = memory_client.get_all(user_id="test_user")
assert data1 in memories
assert data2 in memories
@patch('mem0.utils.factory.EmbedderFactory.create')
@patch('mem0.utils.factory.VectorStoreFactory.create')
@patch('mem0.utils.factory.LlmFactory.create')
@patch('mem0.memory.storage.SQLiteManager')
def test_collection_name_preserved_after_reset(mock_sqlite, mock_llm_factory, mock_vector_factory, mock_embedder_factory):
mock_embedder_factory.return_value = MagicMock()
mock_vector_store = MagicMock()
mock_vector_factory.return_value = mock_vector_store
mock_llm_factory.return_value = MagicMock()
mock_sqlite.return_value = MagicMock()
test_collection_name = "mem0"
config = MemoryConfig()
config.vector_store.config.collection_name = test_collection_name
memory = Memory(config)
assert memory.collection_name == test_collection_name
assert memory.config.vector_store.config.collection_name == test_collection_name
memory.reset()
assert memory.collection_name == test_collection_name
assert memory.config.vector_store.config.collection_name == test_collection_name
reset_calls = [call for call in mock_vector_factory.call_args_list if len(mock_vector_factory.call_args_list) > 2]
if reset_calls:
reset_config = reset_calls[-1][0][1]
assert reset_config.collection_name == test_collection_name, f"Reset used wrong collection name: {reset_config.collection_name}"
@patch('mem0.utils.factory.EmbedderFactory.create')
@patch('mem0.utils.factory.VectorStoreFactory.create')
@patch('mem0.utils.factory.LlmFactory.create')
@patch('mem0.memory.storage.SQLiteManager')
def test_search_handles_incomplete_payloads(mock_sqlite, mock_llm_factory, mock_vector_factory, mock_embedder_factory):
"""Test that search operations handle memory objects with missing 'data' key gracefully."""
mock_embedder_factory.return_value = MagicMock()
mock_vector_store = MagicMock()
mock_vector_factory.return_value = mock_vector_store
mock_llm_factory.return_value = MagicMock()
mock_sqlite.return_value = MagicMock()
from mem0.memory.main import Memory as MemoryClass
config = MemoryConfig()
memory = MemoryClass(config)
# Create test data with both complete and incomplete payloads
incomplete_memory = MockVectorMemory("mem_1", {"hash": "abc123"})
complete_memory = MockVectorMemory("mem_2", {"data": "content", "hash": "def456"})
mock_vector_store.search.return_value = [incomplete_memory, complete_memory]
mock_embedder = MagicMock()
mock_embedder.embed.return_value = [0.1, 0.2, 0.3]
memory.embedding_model = mock_embedder
result = memory._search_vector_store("test", {"user_id": "test"}, 10)
assert len(result) == 2
memories_by_id = {mem["id"]: mem for mem in result}
assert memories_by_id["mem_1"]["memory"] == ""
assert memories_by_id["mem_2"]["memory"] == "content"
@patch('mem0.utils.factory.EmbedderFactory.create')
@patch('mem0.utils.factory.VectorStoreFactory.create')
@patch('mem0.utils.factory.LlmFactory.create')
@patch('mem0.memory.storage.SQLiteManager')
def test_get_all_handles_nested_list_from_chroma(mock_sqlite, mock_llm_factory, mock_vector_factory, mock_embedder_factory):
"""
Test that get_all() handles nested list return from Chroma/Milvus.
Issue #3674: Some vector stores return [[mem1, mem2]] instead of [mem1, mem2]
This test ensures the unified unwrapping logic handles this correctly.
"""
mock_embedder_factory.return_value = MagicMock()
mock_vector_store = MagicMock()
mock_vector_factory.return_value = mock_vector_store
mock_llm_factory.return_value = MagicMock()
mock_sqlite.return_value = MagicMock()
from mem0.memory.main import Memory as MemoryClass
config = MemoryConfig()
memory = MemoryClass(config)
# Create test data
mem1 = MockVectorMemory("mem_1", {"data": "My dog name is Sheru"})
mem2 = MockVectorMemory("mem_2", {"data": "I like to code in Python"})
mem3 = MockVectorMemory("mem_3", {"data": "I live in California"})
# Chroma/Milvus returns nested list: [[mem1, mem2, mem3]]
mock_vector_store.list.return_value = [[mem1, mem2, mem3]]
result = memory._get_all_from_vector_store({"user_id": "test"}, 100)
# Should successfully unwrap and return 3 memories
assert len(result) == 3
assert result[0]["memory"] == "My dog name is Sheru"
assert result[1]["memory"] == "I like to code in Python"
assert result[2]["memory"] == "I live in California"
@patch('mem0.utils.factory.EmbedderFactory.create')
@patch('mem0.utils.factory.VectorStoreFactory.create')
@patch('mem0.utils.factory.LlmFactory.create')
@patch('mem0.memory.storage.SQLiteManager')
def test_get_all_handles_tuple_from_qdrant(mock_sqlite, mock_llm_factory, mock_vector_factory, mock_embedder_factory):
"""
Test that get_all() handles tuple return from Qdrant.
Qdrant returns: ([mem1, mem2], count)
Should unwrap to [mem1, mem2]
"""
mock_embedder_factory.return_value = MagicMock()
mock_vector_store = MagicMock()
mock_vector_factory.return_value = mock_vector_store
mock_llm_factory.return_value = MagicMock()
mock_sqlite.return_value = MagicMock()
from mem0.memory.main import Memory as MemoryClass
config = MemoryConfig()
memory = MemoryClass(config)
mem1 = MockVectorMemory("mem_1", {"data": "Memory 1"})
mem2 = MockVectorMemory("mem_2", {"data": "Memory 2"})
# Qdrant returns tuple: ([mem1, mem2], count)
mock_vector_store.list.return_value = ([mem1, mem2], 100)
result = memory._get_all_from_vector_store({"user_id": "test"}, 100)
assert len(result) == 2
assert result[0]["memory"] == "Memory 1"
assert result[1]["memory"] == "Memory 2"
@patch('mem0.utils.factory.EmbedderFactory.create')
@patch('mem0.utils.factory.VectorStoreFactory.create')
@patch('mem0.utils.factory.LlmFactory.create')
@patch('mem0.memory.storage.SQLiteManager')
def test_get_all_handles_flat_list_from_postgres(mock_sqlite, mock_llm_factory, mock_vector_factory, mock_embedder_factory):
"""
Test that get_all() handles flat list return from PostgreSQL.
PostgreSQL returns: [mem1, mem2]
Should keep as-is without unwrapping
"""
mock_embedder_factory.return_value = MagicMock()
mock_vector_store = MagicMock()
mock_vector_factory.return_value = mock_vector_store
mock_llm_factory.return_value = MagicMock()
mock_sqlite.return_value = MagicMock()
from mem0.memory.main import Memory as MemoryClass
config = MemoryConfig()
memory = MemoryClass(config)
mem1 = MockVectorMemory("mem_1", {"data": "Memory 1"})
mem2 = MockVectorMemory("mem_2", {"data": "Memory 2"})
# PostgreSQL returns flat list: [mem1, mem2]
mock_vector_store.list.return_value = [mem1, mem2]
result = memory._get_all_from_vector_store({"user_id": "test"}, 100)
assert len(result) == 2
assert result[0]["memory"] == "Memory 1"
assert result[1]["memory"] == "Memory 2"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/__init__.py | tests/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/test_telemetry.py | tests/test_telemetry.py | import os
from unittest.mock import patch
import pytest
MEM0_TELEMETRY = os.environ.get("MEM0_TELEMETRY", "True")
if isinstance(MEM0_TELEMETRY, str):
MEM0_TELEMETRY = MEM0_TELEMETRY.lower() in ("true", "1", "yes")
def use_telemetry():
if os.getenv("MEM0_TELEMETRY", "true").lower() == "true":
return True
return False
@pytest.fixture(autouse=True)
def reset_env():
with patch.dict(os.environ, {}, clear=True):
yield
def test_telemetry_enabled():
with patch.dict(os.environ, {"MEM0_TELEMETRY": "true"}):
assert use_telemetry() is True
def test_telemetry_disabled():
with patch.dict(os.environ, {"MEM0_TELEMETRY": "false"}):
assert use_telemetry() is False
def test_telemetry_default_enabled():
assert use_telemetry() is True
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/memory/test_neptune_memory.py | tests/memory/test_neptune_memory.py | import unittest
from unittest.mock import MagicMock, patch
import pytest
from mem0.graphs.neptune.neptunedb import MemoryGraph
from mem0.graphs.neptune.base import NeptuneBase
class TestNeptuneMemory(unittest.TestCase):
"""Test suite for the Neptune Memory implementation."""
def setUp(self):
"""Set up test fixtures before each test method."""
# Create a mock config
self.config = MagicMock()
self.config.graph_store.config.endpoint = "neptune-db://test-graph"
self.config.graph_store.config.base_label = True
self.config.graph_store.threshold = 0.7
self.config.llm.provider = "openai_structured"
self.config.graph_store.llm = None
self.config.graph_store.custom_prompt = None
self.config.vector_store.provider = "qdrant"
self.config.vector_store.config = MagicMock()
# Create mock for NeptuneGraph
self.mock_graph = MagicMock()
# Create mocks for static methods
self.mock_embedding_model = MagicMock()
self.mock_llm = MagicMock()
self.mock_vector_store = MagicMock()
# Patch the necessary components
self.neptune_graph_patcher = patch("mem0.graphs.neptune.neptunedb.NeptuneGraph")
self.mock_neptune_graph = self.neptune_graph_patcher.start()
self.mock_neptune_graph.return_value = self.mock_graph
# Patch the static methods
self.create_embedding_model_patcher = patch.object(NeptuneBase, "_create_embedding_model")
self.mock_create_embedding_model = self.create_embedding_model_patcher.start()
self.mock_create_embedding_model.return_value = self.mock_embedding_model
self.create_llm_patcher = patch.object(NeptuneBase, "_create_llm")
self.mock_create_llm = self.create_llm_patcher.start()
self.mock_create_llm.return_value = self.mock_llm
self.create_vector_store_patcher = patch.object(NeptuneBase, "_create_vector_store")
self.mock_create_vector_store = self.create_vector_store_patcher.start()
self.mock_create_vector_store.return_value = self.mock_vector_store
# Create the MemoryGraph instance
self.memory_graph = MemoryGraph(self.config)
# Set up common test data
self.user_id = "test_user"
self.test_filters = {"user_id": self.user_id}
def tearDown(self):
"""Tear down test fixtures after each test method."""
self.neptune_graph_patcher.stop()
self.create_embedding_model_patcher.stop()
self.create_llm_patcher.stop()
self.create_vector_store_patcher.stop()
def test_initialization(self):
"""Test that the MemoryGraph is initialized correctly."""
self.assertEqual(self.memory_graph.graph, self.mock_graph)
self.assertEqual(self.memory_graph.embedding_model, self.mock_embedding_model)
self.assertEqual(self.memory_graph.llm, self.mock_llm)
self.assertEqual(self.memory_graph.vector_store, self.mock_vector_store)
self.assertEqual(self.memory_graph.llm_provider, "openai_structured")
self.assertEqual(self.memory_graph.node_label, ":`__Entity__`")
self.assertEqual(self.memory_graph.threshold, 0.7)
self.assertEqual(self.memory_graph.vector_store_limit, 5)
def test_collection_name_variants(self):
"""Test all collection_name configuration variants."""
# Test 1: graph_store.config.collection_name is set
config1 = MagicMock()
config1.graph_store.config.endpoint = "neptune-db://test-graph"
config1.graph_store.config.base_label = True
config1.graph_store.config.collection_name = "custom_collection"
config1.llm.provider = "openai"
config1.graph_store.llm = None
config1.vector_store.provider = "qdrant"
config1.vector_store.config = MagicMock()
MemoryGraph(config1)
self.assertEqual(config1.vector_store.config.collection_name, "custom_collection")
# Test 2: vector_store.config.collection_name exists, graph_store.config.collection_name is None
config2 = MagicMock()
config2.graph_store.config.endpoint = "neptune-db://test-graph"
config2.graph_store.config.base_label = True
config2.graph_store.config.collection_name = None
config2.llm.provider = "openai"
config2.graph_store.llm = None
config2.vector_store.provider = "qdrant"
config2.vector_store.config = MagicMock()
config2.vector_store.config.collection_name = "existing_collection"
MemoryGraph(config2)
self.assertEqual(config2.vector_store.config.collection_name, "existing_collection_neptune_vector_store")
# Test 3: Neither collection_name is set (default case)
config3 = MagicMock()
config3.graph_store.config.endpoint = "neptune-db://test-graph"
config3.graph_store.config.base_label = True
config3.graph_store.config.collection_name = None
config3.llm.provider = "openai"
config3.graph_store.llm = None
config3.vector_store.provider = "qdrant"
config3.vector_store.config = MagicMock()
config3.vector_store.config.collection_name = None
MemoryGraph(config3)
self.assertEqual(config3.vector_store.config.collection_name, "mem0_neptune_vector_store")
def test_init(self):
"""Test the class init functions"""
# Create a mock config with bad endpoint
config_no_endpoint = MagicMock()
config_no_endpoint.graph_store.config.endpoint = None
# Create the MemoryGraph instance
with pytest.raises(ValueError):
MemoryGraph(config_no_endpoint)
# Create a mock config with wrong endpoint type
config_wrong_endpoint = MagicMock()
config_wrong_endpoint.graph_store.config.endpoint = "neptune-graph://test-graph"
with pytest.raises(ValueError):
MemoryGraph(config_wrong_endpoint)
def test_add_method(self):
"""Test the add method with mocked components."""
# Mock the necessary methods that add() calls
self.memory_graph._retrieve_nodes_from_data = MagicMock(return_value={"alice": "person", "bob": "person"})
self.memory_graph._establish_nodes_relations_from_data = MagicMock(
return_value=[{"source": "alice", "relationship": "knows", "destination": "bob"}]
)
self.memory_graph._search_graph_db = MagicMock(return_value=[])
self.memory_graph._get_delete_entities_from_search_output = MagicMock(return_value=[])
self.memory_graph._delete_entities = MagicMock(return_value=[])
self.memory_graph._add_entities = MagicMock(
return_value=[{"source": "alice", "relationship": "knows", "target": "bob"}]
)
# Call the add method
result = self.memory_graph.add("Alice knows Bob", self.test_filters)
# Verify the method calls
self.memory_graph._retrieve_nodes_from_data.assert_called_once_with("Alice knows Bob", self.test_filters)
self.memory_graph._establish_nodes_relations_from_data.assert_called_once()
self.memory_graph._search_graph_db.assert_called_once()
self.memory_graph._get_delete_entities_from_search_output.assert_called_once()
self.memory_graph._delete_entities.assert_called_once_with([], self.user_id)
self.memory_graph._add_entities.assert_called_once()
# Check the result structure
self.assertIn("deleted_entities", result)
self.assertIn("added_entities", result)
def test_search_method(self):
"""Test the search method with mocked components."""
# Mock the necessary methods that search() calls
self.memory_graph._retrieve_nodes_from_data = MagicMock(return_value={"alice": "person"})
# Mock search results
mock_search_results = [
{"source": "alice", "relationship": "knows", "destination": "bob"},
{"source": "alice", "relationship": "works_with", "destination": "charlie"},
]
self.memory_graph._search_graph_db = MagicMock(return_value=mock_search_results)
# Mock BM25Okapi
with patch("mem0.graphs.neptune.base.BM25Okapi") as mock_bm25:
mock_bm25_instance = MagicMock()
mock_bm25.return_value = mock_bm25_instance
# Mock get_top_n to return reranked results
reranked_results = [["alice", "knows", "bob"], ["alice", "works_with", "charlie"]]
mock_bm25_instance.get_top_n.return_value = reranked_results
# Call the search method
result = self.memory_graph.search("Find Alice", self.test_filters, limit=5)
# Verify the method calls
self.memory_graph._retrieve_nodes_from_data.assert_called_once_with("Find Alice", self.test_filters)
self.memory_graph._search_graph_db.assert_called_once_with(node_list=["alice"], filters=self.test_filters)
# Check the result structure
self.assertEqual(len(result), 2)
self.assertEqual(result[0]["source"], "alice")
self.assertEqual(result[0]["relationship"], "knows")
self.assertEqual(result[0]["destination"], "bob")
def test_get_all_method(self):
"""Test the get_all method."""
# Mock the _get_all_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"user_id": self.user_id, "limit": 10}
self.memory_graph._get_all_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [
{"source": "alice", "relationship": "knows", "target": "bob"},
{"source": "bob", "relationship": "works_with", "target": "charlie"},
]
self.mock_graph.query.return_value = mock_query_result
# Call the get_all method
result = self.memory_graph.get_all(self.test_filters, limit=10)
# Verify the method calls
self.memory_graph._get_all_cypher.assert_called_once_with(self.test_filters, 10)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result structure
self.assertEqual(len(result), 2)
self.assertEqual(result[0]["source"], "alice")
self.assertEqual(result[0]["relationship"], "knows")
self.assertEqual(result[0]["target"], "bob")
def test_delete_all_method(self):
"""Test the delete_all method."""
# Mock the _delete_all_cypher method
mock_cypher = "MATCH (n) DETACH DELETE n"
mock_params = {"user_id": self.user_id}
self.memory_graph._delete_all_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Call the delete_all method
self.memory_graph.delete_all(self.test_filters)
# Verify the method calls
self.memory_graph._delete_all_cypher.assert_called_once_with(self.test_filters)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
def test_search_source_node(self):
"""Test the _search_source_node method."""
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
# Mock the _search_source_node_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.9}
self.memory_graph._search_source_node_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"id(source_candidate)": 123, "cosine_similarity": 0.95}]
self.mock_graph.query.return_value = mock_query_result
# Call the _search_source_node method
result = self.memory_graph._search_source_node(mock_embedding, self.user_id, threshold=0.9)
# Verify the method calls
self.memory_graph._search_source_node_cypher.assert_called_once_with(mock_embedding, self.user_id, 0.9)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, mock_query_result)
def test_search_destination_node(self):
"""Test the _search_destination_node method."""
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
# Mock the _search_destination_node_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"destination_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.9}
self.memory_graph._search_destination_node_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"id(destination_candidate)": 456, "cosine_similarity": 0.92}]
self.mock_graph.query.return_value = mock_query_result
# Call the _search_destination_node method
result = self.memory_graph._search_destination_node(mock_embedding, self.user_id, threshold=0.9)
# Verify the method calls
self.memory_graph._search_destination_node_cypher.assert_called_once_with(mock_embedding, self.user_id, 0.9)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, mock_query_result)
def test_search_graph_db(self):
"""Test the _search_graph_db method."""
# Mock node list
node_list = ["alice", "bob"]
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
self.mock_embedding_model.embed.return_value = mock_embedding
# Mock the _search_graph_db_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"n_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.7, "limit": 10}
self.memory_graph._search_graph_db_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query results
mock_query_result1 = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
mock_query_result2 = [{"source": "bob", "relationship": "works_with", "destination": "charlie"}]
self.mock_graph.query.side_effect = [mock_query_result1, mock_query_result2]
# Call the _search_graph_db method
result = self.memory_graph._search_graph_db(node_list, self.test_filters, limit=10)
# Verify the method calls
self.assertEqual(self.mock_embedding_model.embed.call_count, 2)
self.assertEqual(self.memory_graph._search_graph_db_cypher.call_count, 2)
self.assertEqual(self.mock_graph.query.call_count, 2)
# Check the result
expected_result = mock_query_result1 + mock_query_result2
self.assertEqual(result, expected_result)
def test_add_entities(self):
"""Test the _add_entities method."""
# Mock data
to_be_added = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
entity_type_map = {"alice": "person", "bob": "person"}
# Mock embeddings
mock_embedding = [0.1, 0.2, 0.3]
self.mock_embedding_model.embed.return_value = mock_embedding
# Mock search results
mock_source_search = [{"id(source_candidate)": 123, "cosine_similarity": 0.95}]
mock_dest_search = [{"id(destination_candidate)": 456, "cosine_similarity": 0.92}]
# Mock the search methods
self.memory_graph._search_source_node = MagicMock(return_value=mock_source_search)
self.memory_graph._search_destination_node = MagicMock(return_value=mock_dest_search)
# Mock the _add_entities_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_id": 123, "destination_id": 456}
self.memory_graph._add_entities_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"source": "alice", "relationship": "knows", "target": "bob"}]
self.mock_graph.query.return_value = mock_query_result
# Call the _add_entities method
result = self.memory_graph._add_entities(to_be_added, self.user_id, entity_type_map)
# Verify the method calls
self.assertEqual(self.mock_embedding_model.embed.call_count, 2)
self.memory_graph._search_source_node.assert_called_once_with(mock_embedding, self.user_id, threshold=0.7)
self.memory_graph._search_destination_node.assert_called_once_with(mock_embedding, self.user_id, threshold=0.7)
self.memory_graph._add_entities_cypher.assert_called_once()
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, [mock_query_result])
def test_delete_entities(self):
"""Test the _delete_entities method."""
# Mock data
to_be_deleted = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
# Mock the _delete_entities_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_name": "alice", "dest_name": "bob", "user_id": self.user_id}
self.memory_graph._delete_entities_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"source": "alice", "relationship": "knows", "target": "bob"}]
self.mock_graph.query.return_value = mock_query_result
# Call the _delete_entities method
result = self.memory_graph._delete_entities(to_be_deleted, self.user_id)
# Verify the method calls
self.memory_graph._delete_entities_cypher.assert_called_once_with("alice", "bob", "knows", self.user_id)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, [mock_query_result])
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/memory/test_main.py | tests/memory/test_main.py | import logging
from unittest.mock import MagicMock
import pytest
from mem0.memory.main import AsyncMemory, Memory
def _setup_mocks(mocker):
"""Helper to setup common mocks for both sync and async fixtures"""
mock_embedder = mocker.MagicMock()
mock_embedder.return_value.embed.return_value = [0.1, 0.2, 0.3]
mocker.patch("mem0.utils.factory.EmbedderFactory.create", mock_embedder)
mock_vector_store = mocker.MagicMock()
mock_vector_store.return_value.search.return_value = []
mocker.patch(
"mem0.utils.factory.VectorStoreFactory.create", side_effect=[mock_vector_store.return_value, mocker.MagicMock()]
)
mock_llm = mocker.MagicMock()
mocker.patch("mem0.utils.factory.LlmFactory.create", mock_llm)
mocker.patch("mem0.memory.storage.SQLiteManager", mocker.MagicMock())
return mock_llm, mock_vector_store
class TestAddToVectorStoreErrors:
@pytest.fixture
def mock_memory(self, mocker):
"""Fixture that returns a Memory instance with mocker-based mocks"""
mock_llm, _ = _setup_mocks(mocker)
memory = Memory()
memory.config = mocker.MagicMock()
memory.config.custom_fact_extraction_prompt = None
memory.config.custom_update_memory_prompt = None
memory.api_version = "v1.1"
return memory
def test_empty_llm_response_fact_extraction(self, mocker, mock_memory, caplog):
"""Test empty response from LLM during fact extraction"""
# Setup
mock_memory.llm.generate_response.return_value = "invalid json" # This will trigger a JSON decode error
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
# Execute
with caplog.at_level(logging.ERROR):
result = mock_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, filters={}, infer=True
)
# Verify
assert mock_memory.llm.generate_response.call_count == 1
assert result == [] # Should return empty list when no memories processed
# Check for error message in any of the log records
assert any("Error in new_retrieved_facts" in record.msg for record in caplog.records), "Expected error message not found in logs"
assert mock_capture_event.call_count == 1
def test_empty_llm_response_memory_actions(self, mock_memory, caplog):
"""Test empty response from LLM during memory actions"""
# Setup
# First call returns valid JSON, second call returns empty string
mock_memory.llm.generate_response.side_effect = ['{"facts": ["test fact"]}', ""]
# Execute
with caplog.at_level(logging.WARNING):
result = mock_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, filters={}, infer=True
)
# Verify
assert mock_memory.llm.generate_response.call_count == 2
assert result == [] # Should return empty list when no memories processed
assert "Empty response from LLM, no memories to extract" in caplog.text
@pytest.mark.asyncio
class TestAsyncAddToVectorStoreErrors:
@pytest.fixture
def mock_async_memory(self, mocker):
"""Fixture for AsyncMemory with mocker-based mocks"""
mock_llm, _ = _setup_mocks(mocker)
memory = AsyncMemory()
memory.config = mocker.MagicMock()
memory.config.custom_fact_extraction_prompt = None
memory.config.custom_update_memory_prompt = None
memory.api_version = "v1.1"
return memory
@pytest.mark.asyncio
async def test_async_empty_llm_response_fact_extraction(self, mock_async_memory, caplog, mocker):
"""Test empty response in AsyncMemory._add_to_vector_store"""
mocker.patch("mem0.utils.factory.EmbedderFactory.create", return_value=MagicMock())
mock_async_memory.llm.generate_response.return_value = "invalid json" # This will trigger a JSON decode error
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
with caplog.at_level(logging.ERROR):
result = await mock_async_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, effective_filters={}, infer=True
)
assert mock_async_memory.llm.generate_response.call_count == 1
assert result == []
# Check for error message in any of the log records
assert any("Error in new_retrieved_facts" in record.msg for record in caplog.records), "Expected error message not found in logs"
assert mock_capture_event.call_count == 1
@pytest.mark.asyncio
async def test_async_empty_llm_response_memory_actions(self, mock_async_memory, caplog, mocker):
"""Test empty response in AsyncMemory._add_to_vector_store"""
mocker.patch("mem0.utils.factory.EmbedderFactory.create", return_value=MagicMock())
mock_async_memory.llm.generate_response.side_effect = ['{"facts": ["test fact"]}', ""]
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
with caplog.at_level(logging.WARNING):
result = await mock_async_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, effective_filters={}, infer=True
)
assert result == []
assert "Empty response from LLM, no memories to extract" in caplog.text
assert mock_capture_event.call_count == 1
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/memory/test_neptune_analytics_memory.py | tests/memory/test_neptune_analytics_memory.py | import unittest
from unittest.mock import MagicMock, patch
import pytest
from mem0.graphs.neptune.neptunegraph import MemoryGraph
from mem0.graphs.neptune.base import NeptuneBase
class TestNeptuneMemory(unittest.TestCase):
"""Test suite for the Neptune Memory implementation."""
def setUp(self):
"""Set up test fixtures before each test method."""
# Create a mock config
self.config = MagicMock()
self.config.graph_store.config.endpoint = "neptune-graph://test-graph"
self.config.graph_store.config.base_label = True
self.config.graph_store.threshold = 0.7
self.config.llm.provider = "openai_structured"
self.config.graph_store.llm = None
self.config.graph_store.custom_prompt = None
# Create mock for NeptuneAnalyticsGraph
self.mock_graph = MagicMock()
self.mock_graph.client.get_graph.return_value = {"status": "AVAILABLE"}
# Create mocks for static methods
self.mock_embedding_model = MagicMock()
self.mock_llm = MagicMock()
# Patch the necessary components
self.neptune_analytics_graph_patcher = patch("mem0.graphs.neptune.neptunegraph.NeptuneAnalyticsGraph")
self.mock_neptune_analytics_graph = self.neptune_analytics_graph_patcher.start()
self.mock_neptune_analytics_graph.return_value = self.mock_graph
# Patch the static methods
self.create_embedding_model_patcher = patch.object(NeptuneBase, "_create_embedding_model")
self.mock_create_embedding_model = self.create_embedding_model_patcher.start()
self.mock_create_embedding_model.return_value = self.mock_embedding_model
self.create_llm_patcher = patch.object(NeptuneBase, "_create_llm")
self.mock_create_llm = self.create_llm_patcher.start()
self.mock_create_llm.return_value = self.mock_llm
# Create the MemoryGraph instance
self.memory_graph = MemoryGraph(self.config)
# Set up common test data
self.user_id = "test_user"
self.test_filters = {"user_id": self.user_id}
def tearDown(self):
"""Tear down test fixtures after each test method."""
self.neptune_analytics_graph_patcher.stop()
self.create_embedding_model_patcher.stop()
self.create_llm_patcher.stop()
def test_initialization(self):
"""Test that the MemoryGraph is initialized correctly."""
self.assertEqual(self.memory_graph.graph, self.mock_graph)
self.assertEqual(self.memory_graph.embedding_model, self.mock_embedding_model)
self.assertEqual(self.memory_graph.llm, self.mock_llm)
self.assertEqual(self.memory_graph.llm_provider, "openai_structured")
self.assertEqual(self.memory_graph.node_label, ":`__Entity__`")
self.assertEqual(self.memory_graph.threshold, 0.7)
def test_init(self):
"""Test the class init functions"""
# Create a mock config with bad endpoint
config_no_endpoint = MagicMock()
config_no_endpoint.graph_store.config.endpoint = None
# Create the MemoryGraph instance
with pytest.raises(ValueError):
MemoryGraph(config_no_endpoint)
# Create a mock config with bad endpoint
config_ndb_endpoint = MagicMock()
config_ndb_endpoint.graph_store.config.endpoint = "neptune-db://test-graph"
with pytest.raises(ValueError):
MemoryGraph(config_ndb_endpoint)
def test_add_method(self):
"""Test the add method with mocked components."""
# Mock the necessary methods that add() calls
self.memory_graph._retrieve_nodes_from_data = MagicMock(return_value={"alice": "person", "bob": "person"})
self.memory_graph._establish_nodes_relations_from_data = MagicMock(
return_value=[{"source": "alice", "relationship": "knows", "destination": "bob"}]
)
self.memory_graph._search_graph_db = MagicMock(return_value=[])
self.memory_graph._get_delete_entities_from_search_output = MagicMock(return_value=[])
self.memory_graph._delete_entities = MagicMock(return_value=[])
self.memory_graph._add_entities = MagicMock(
return_value=[{"source": "alice", "relationship": "knows", "target": "bob"}]
)
# Call the add method
result = self.memory_graph.add("Alice knows Bob", self.test_filters)
# Verify the method calls
self.memory_graph._retrieve_nodes_from_data.assert_called_once_with("Alice knows Bob", self.test_filters)
self.memory_graph._establish_nodes_relations_from_data.assert_called_once()
self.memory_graph._search_graph_db.assert_called_once()
self.memory_graph._get_delete_entities_from_search_output.assert_called_once()
self.memory_graph._delete_entities.assert_called_once_with([], self.user_id)
self.memory_graph._add_entities.assert_called_once()
# Check the result structure
self.assertIn("deleted_entities", result)
self.assertIn("added_entities", result)
def test_search_method(self):
"""Test the search method with mocked components."""
# Mock the necessary methods that search() calls
self.memory_graph._retrieve_nodes_from_data = MagicMock(return_value={"alice": "person"})
# Mock search results
mock_search_results = [
{"source": "alice", "relationship": "knows", "destination": "bob"},
{"source": "alice", "relationship": "works_with", "destination": "charlie"},
]
self.memory_graph._search_graph_db = MagicMock(return_value=mock_search_results)
# Mock BM25Okapi
with patch("mem0.graphs.neptune.base.BM25Okapi") as mock_bm25:
mock_bm25_instance = MagicMock()
mock_bm25.return_value = mock_bm25_instance
# Mock get_top_n to return reranked results
reranked_results = [["alice", "knows", "bob"], ["alice", "works_with", "charlie"]]
mock_bm25_instance.get_top_n.return_value = reranked_results
# Call the search method
result = self.memory_graph.search("Find Alice", self.test_filters, limit=5)
# Verify the method calls
self.memory_graph._retrieve_nodes_from_data.assert_called_once_with("Find Alice", self.test_filters)
self.memory_graph._search_graph_db.assert_called_once_with(node_list=["alice"], filters=self.test_filters)
# Check the result structure
self.assertEqual(len(result), 2)
self.assertEqual(result[0]["source"], "alice")
self.assertEqual(result[0]["relationship"], "knows")
self.assertEqual(result[0]["destination"], "bob")
def test_get_all_method(self):
"""Test the get_all method."""
# Mock the _get_all_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"user_id": self.user_id, "limit": 10}
self.memory_graph._get_all_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [
{"source": "alice", "relationship": "knows", "target": "bob"},
{"source": "bob", "relationship": "works_with", "target": "charlie"},
]
self.mock_graph.query.return_value = mock_query_result
# Call the get_all method
result = self.memory_graph.get_all(self.test_filters, limit=10)
# Verify the method calls
self.memory_graph._get_all_cypher.assert_called_once_with(self.test_filters, 10)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result structure
self.assertEqual(len(result), 2)
self.assertEqual(result[0]["source"], "alice")
self.assertEqual(result[0]["relationship"], "knows")
self.assertEqual(result[0]["target"], "bob")
def test_delete_all_method(self):
"""Test the delete_all method."""
# Mock the _delete_all_cypher method
mock_cypher = "MATCH (n) DETACH DELETE n"
mock_params = {"user_id": self.user_id}
self.memory_graph._delete_all_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Call the delete_all method
self.memory_graph.delete_all(self.test_filters)
# Verify the method calls
self.memory_graph._delete_all_cypher.assert_called_once_with(self.test_filters)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
def test_search_source_node(self):
"""Test the _search_source_node method."""
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
# Mock the _search_source_node_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.9}
self.memory_graph._search_source_node_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"id(source_candidate)": 123, "cosine_similarity": 0.95}]
self.mock_graph.query.return_value = mock_query_result
# Call the _search_source_node method
result = self.memory_graph._search_source_node(mock_embedding, self.user_id, threshold=0.9)
# Verify the method calls
self.memory_graph._search_source_node_cypher.assert_called_once_with(mock_embedding, self.user_id, 0.9)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, mock_query_result)
def test_search_destination_node(self):
"""Test the _search_destination_node method."""
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
# Mock the _search_destination_node_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"destination_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.9}
self.memory_graph._search_destination_node_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"id(destination_candidate)": 456, "cosine_similarity": 0.92}]
self.mock_graph.query.return_value = mock_query_result
# Call the _search_destination_node method
result = self.memory_graph._search_destination_node(mock_embedding, self.user_id, threshold=0.9)
# Verify the method calls
self.memory_graph._search_destination_node_cypher.assert_called_once_with(mock_embedding, self.user_id, 0.9)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, mock_query_result)
def test_search_graph_db(self):
"""Test the _search_graph_db method."""
# Mock node list
node_list = ["alice", "bob"]
# Mock embedding
mock_embedding = [0.1, 0.2, 0.3]
self.mock_embedding_model.embed.return_value = mock_embedding
# Mock the _search_graph_db_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"n_embedding": mock_embedding, "user_id": self.user_id, "threshold": 0.7, "limit": 10}
self.memory_graph._search_graph_db_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query results
mock_query_result1 = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
mock_query_result2 = [{"source": "bob", "relationship": "works_with", "destination": "charlie"}]
self.mock_graph.query.side_effect = [mock_query_result1, mock_query_result2]
# Call the _search_graph_db method
result = self.memory_graph._search_graph_db(node_list, self.test_filters, limit=10)
# Verify the method calls
self.assertEqual(self.mock_embedding_model.embed.call_count, 2)
self.assertEqual(self.memory_graph._search_graph_db_cypher.call_count, 2)
self.assertEqual(self.mock_graph.query.call_count, 2)
# Check the result
expected_result = mock_query_result1 + mock_query_result2
self.assertEqual(result, expected_result)
def test_add_entities(self):
"""Test the _add_entities method."""
# Mock data
to_be_added = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
entity_type_map = {"alice": "person", "bob": "person"}
# Mock embeddings
mock_embedding = [0.1, 0.2, 0.3]
self.mock_embedding_model.embed.return_value = mock_embedding
# Mock search results
mock_source_search = [{"id(source_candidate)": 123, "cosine_similarity": 0.95}]
mock_dest_search = [{"id(destination_candidate)": 456, "cosine_similarity": 0.92}]
# Mock the search methods
self.memory_graph._search_source_node = MagicMock(return_value=mock_source_search)
self.memory_graph._search_destination_node = MagicMock(return_value=mock_dest_search)
# Mock the _add_entities_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_id": 123, "destination_id": 456}
self.memory_graph._add_entities_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"source": "alice", "relationship": "knows", "target": "bob"}]
self.mock_graph.query.return_value = mock_query_result
# Call the _add_entities method
result = self.memory_graph._add_entities(to_be_added, self.user_id, entity_type_map)
# Verify the method calls
self.assertEqual(self.mock_embedding_model.embed.call_count, 2)
self.memory_graph._search_source_node.assert_called_once_with(mock_embedding, self.user_id, threshold=0.7)
self.memory_graph._search_destination_node.assert_called_once_with(mock_embedding, self.user_id, threshold=0.7)
self.memory_graph._add_entities_cypher.assert_called_once()
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, [mock_query_result])
def test_delete_entities(self):
"""Test the _delete_entities method."""
# Mock data
to_be_deleted = [{"source": "alice", "relationship": "knows", "destination": "bob"}]
# Mock the _delete_entities_cypher method
mock_cypher = "MATCH (n) RETURN n"
mock_params = {"source_name": "alice", "dest_name": "bob", "user_id": self.user_id}
self.memory_graph._delete_entities_cypher = MagicMock(return_value=(mock_cypher, mock_params))
# Mock the graph.query result
mock_query_result = [{"source": "alice", "relationship": "knows", "target": "bob"}]
self.mock_graph.query.return_value = mock_query_result
# Call the _delete_entities method
result = self.memory_graph._delete_entities(to_be_deleted, self.user_id)
# Verify the method calls
self.memory_graph._delete_entities_cypher.assert_called_once_with("alice", "bob", "knows", self.user_id)
self.mock_graph.query.assert_called_once_with(mock_cypher, params=mock_params)
# Check the result
self.assertEqual(result, [mock_query_result])
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/memory/test_neo4j_cypher_syntax.py | tests/memory/test_neo4j_cypher_syntax.py | import os
from unittest.mock import Mock, patch
class TestNeo4jCypherSyntaxFix:
"""Test that Neo4j Cypher syntax fixes work correctly"""
def test_get_all_generates_valid_cypher_with_agent_id(self):
"""Test that get_all method generates valid Cypher with agent_id"""
# Mock the langchain_neo4j module to avoid import issues
with patch.dict('sys.modules', {'langchain_neo4j': Mock()}):
from mem0.memory.graph_memory import MemoryGraph
# Create instance (will fail on actual connection, but that's fine for syntax testing)
try:
_ = MemoryGraph(url="bolt://localhost:7687", username="test", password="test")
except Exception:
# Expected to fail on connection, just test the class exists
assert MemoryGraph is not None
return
def test_cypher_syntax_validation(self):
"""Test that our Cypher fixes don't contain problematic patterns"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Ensure the old buggy pattern is not present
assert "AND n.agent_id = $agent_id AND m.agent_id = $agent_id" not in content
assert "WHERE 1=1 {agent_filter}" not in content
# Ensure proper node property syntax is present
assert "node_props" in content
assert "agent_id: $agent_id" in content
# Ensure run_id follows the same pattern
# Check for absence of problematic run_id patterns
assert "AND n.run_id = $run_id AND m.run_id = $run_id" not in content
assert "WHERE 1=1 {run_id_filter}" not in content
def test_no_undefined_variables_in_cypher(self):
"""Test that we don't have undefined variable patterns"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check for patterns that would cause "Variable 'm' not defined" errors
lines = content.split('\n')
for i, line in enumerate(lines):
# Look for WHERE clauses that reference variables not in MATCH
if 'WHERE' in line and 'm.agent_id' in line:
# Check if there's a MATCH clause before this that defines 'm'
preceding_lines = lines[max(0, i-10):i]
match_found = any('MATCH' in prev_line and ' m ' in prev_line for prev_line in preceding_lines)
assert match_found, f"Line {i+1}: WHERE clause references 'm' without MATCH definition"
# Also check for run_id patterns that might have similar issues
if 'WHERE' in line and 'm.run_id' in line:
# Check if there's a MATCH clause before this that defines 'm'
preceding_lines = lines[max(0, i-10):i]
match_found = any('MATCH' in prev_line and ' m ' in prev_line for prev_line in preceding_lines)
assert match_found, f"Line {i+1}: WHERE clause references 'm.run_id' without MATCH definition"
def test_agent_id_integration_syntax(self):
"""Test that agent_id is properly integrated into MATCH clauses"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Should have node property building logic
assert 'node_props = [' in content
assert 'node_props.append("agent_id: $agent_id")' in content
assert 'node_props_str = ", ".join(node_props)' in content
# Should use the node properties in MATCH clauses
assert '{{{node_props_str}}}' in content or '{node_props_str}' in content
def test_run_id_integration_syntax(self):
"""Test that run_id is properly integrated into MATCH clauses"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Should have node property building logic for run_id
assert 'node_props = [' in content
assert 'node_props.append("run_id: $run_id")' in content
assert 'node_props_str = ", ".join(node_props)' in content
# Should use the node properties in MATCH clauses
assert '{{{node_props_str}}}' in content or '{node_props_str}' in content
def test_agent_id_filter_patterns(self):
"""Test that agent_id filtering follows the correct pattern"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that agent_id is handled in filters
assert 'if filters.get("agent_id"):' in content
assert 'params["agent_id"] = filters["agent_id"]' in content
# Check that agent_id is used in node properties
assert 'node_props.append("agent_id: $agent_id")' in content
def test_run_id_filter_patterns(self):
"""Test that run_id filtering follows the same pattern as agent_id"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that run_id is handled in filters
assert 'if filters.get("run_id"):' in content
assert 'params["run_id"] = filters["run_id"]' in content
# Check that run_id is used in node properties
assert 'node_props.append("run_id: $run_id")' in content
def test_agent_id_cypher_generation(self):
"""Test that agent_id is properly included in Cypher query generation"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that the dynamic property building pattern exists
assert 'node_props = [' in content
assert 'node_props_str = ", ".join(node_props)' in content
# Check that agent_id is handled in the pattern
assert 'if filters.get(' in content
assert 'node_props.append(' in content
# Verify the pattern is used in MATCH clauses
assert '{{{node_props_str}}}' in content or '{node_props_str}' in content
def test_run_id_cypher_generation(self):
"""Test that run_id is properly included in Cypher query generation"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that the dynamic property building pattern exists
assert 'node_props = [' in content
assert 'node_props_str = ", ".join(node_props)' in content
# Check that run_id is handled in the pattern
assert 'if filters.get(' in content
assert 'node_props.append(' in content
# Verify the pattern is used in MATCH clauses
assert '{{{node_props_str}}}' in content or '{node_props_str}' in content
def test_agent_id_implementation_pattern(self):
"""Test that the code structure supports agent_id implementation"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Verify that agent_id pattern is used consistently
assert 'node_props = [' in content
assert 'node_props_str = ", ".join(node_props)' in content
assert 'if filters.get("agent_id"):' in content
assert 'node_props.append("agent_id: $agent_id")' in content
def test_run_id_implementation_pattern(self):
"""Test that the code structure supports run_id implementation"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Verify that run_id pattern is used consistently
assert 'node_props = [' in content
assert 'node_props_str = ", ".join(node_props)' in content
assert 'if filters.get("run_id"):' in content
assert 'node_props.append("run_id: $run_id")' in content
def test_user_identity_integration(self):
"""Test that both agent_id and run_id are properly integrated into user identity"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that user_identity building includes both agent_id and run_id
assert 'user_identity = f"user_id: {filters[\'user_id\']}"' in content
assert 'user_identity += f", agent_id: {filters[\'agent_id\']}"' in content
assert 'user_identity += f", run_id: {filters[\'run_id\']}"' in content
def test_search_methods_integration(self):
"""Test that both agent_id and run_id are properly integrated into search methods"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that search methods handle both agent_id and run_id
assert 'where_conditions.append("source_candidate.agent_id = $agent_id")' in content
assert 'where_conditions.append("source_candidate.run_id = $run_id")' in content
assert 'where_conditions.append("destination_candidate.agent_id = $agent_id")' in content
assert 'where_conditions.append("destination_candidate.run_id = $run_id")' in content
def test_add_entities_integration(self):
"""Test that both agent_id and run_id are properly integrated into add_entities"""
graph_memory_path = 'mem0/memory/graph_memory.py'
# Check if file exists before reading
if not os.path.exists(graph_memory_path):
# Skip test if file doesn't exist (e.g., in CI environment)
return
with open(graph_memory_path, 'r') as f:
content = f.read()
# Check that add_entities handles both agent_id and run_id
assert 'agent_id = filters.get("agent_id", None)' in content
assert 'run_id = filters.get("run_id", None)' in content
# Check that merge properties include both
assert 'if agent_id:' in content
assert 'if run_id:' in content
assert 'merge_props.append("agent_id: $agent_id")' in content
assert 'merge_props.append("run_id: $run_id")' in content
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/memory/test_kuzu.py | tests/memory/test_kuzu.py | import numpy as np
import pytest
from unittest.mock import Mock, patch
from mem0.memory.kuzu_memory import MemoryGraph
class TestKuzu:
"""Test that Kuzu memory works correctly"""
# Create distinct embeddings that won't match with threshold=0.7
# Each embedding is mostly zeros with ones in different positions to ensure low similarity
alice_emb = np.zeros(384)
alice_emb[0:96] = 1.0
bob_emb = np.zeros(384)
bob_emb[96:192] = 1.0
charlie_emb = np.zeros(384)
charlie_emb[192:288] = 1.0
dave_emb = np.zeros(384)
dave_emb[288:384] = 1.0
embeddings = {
"alice": alice_emb.tolist(),
"bob": bob_emb.tolist(),
"charlie": charlie_emb.tolist(),
"dave": dave_emb.tolist(),
}
@pytest.fixture
def mock_config(self):
"""Create a mock configuration for testing"""
config = Mock()
# Mock embedder config
config.embedder.provider = "mock_embedder"
config.embedder.config = {"model": "mock_model"}
config.vector_store.config = {"dimensions": 384}
# Mock graph store config
config.graph_store.config.db = ":memory:"
config.graph_store.threshold = 0.7
# Mock LLM config
config.llm.provider = "mock_llm"
config.llm.config = {"api_key": "test_key"}
return config
@pytest.fixture
def mock_embedding_model(self):
"""Create a mock embedding model"""
mock_model = Mock()
mock_model.config.embedding_dims = 384
def mock_embed(text):
return self.embeddings[text]
mock_model.embed.side_effect = mock_embed
return mock_model
@pytest.fixture
def mock_llm(self):
"""Create a mock LLM"""
mock_llm = Mock()
mock_llm.generate_response.return_value = {
"tool_calls": [
{
"name": "extract_entities",
"arguments": {"entities": [{"entity": "test_entity", "entity_type": "test_type"}]},
}
]
}
return mock_llm
@patch("mem0.memory.kuzu_memory.EmbedderFactory")
@patch("mem0.memory.kuzu_memory.LlmFactory")
def test_kuzu_memory_initialization(
self, mock_llm_factory, mock_embedder_factory, mock_config, mock_embedding_model, mock_llm
):
"""Test that Kuzu memory initializes correctly"""
# Setup mocks
mock_embedder_factory.create.return_value = mock_embedding_model
mock_llm_factory.create.return_value = mock_llm
# Create instance
kuzu_memory = MemoryGraph(mock_config)
# Verify initialization
assert kuzu_memory.config == mock_config
assert kuzu_memory.embedding_model == mock_embedding_model
assert kuzu_memory.embedding_dims == 384
assert kuzu_memory.llm == mock_llm
assert kuzu_memory.threshold == 0.7
@pytest.mark.parametrize(
"embedding_dims",
[None, 0, -1],
)
@patch("mem0.memory.kuzu_memory.EmbedderFactory")
def test_kuzu_memory_initialization_invalid_embedding_dims(
self, mock_embedder_factory, embedding_dims, mock_config
):
"""Test that Kuzu memory raises ValuError when initialized with invalid embedding_dims"""
# Setup mocks
mock_embedding_model = Mock()
mock_embedding_model.config.embedding_dims = embedding_dims
mock_embedder_factory.create.return_value = mock_embedding_model
with pytest.raises(ValueError, match="must be a positive"):
MemoryGraph(mock_config)
@patch("mem0.memory.kuzu_memory.EmbedderFactory")
@patch("mem0.memory.kuzu_memory.LlmFactory")
def test_kuzu(self, mock_llm_factory, mock_embedder_factory, mock_config, mock_embedding_model, mock_llm):
"""Test adding memory to the graph"""
mock_embedder_factory.create.return_value = mock_embedding_model
mock_llm_factory.create.return_value = mock_llm
kuzu_memory = MemoryGraph(mock_config)
filters = {"user_id": "test_user", "agent_id": "test_agent", "run_id": "test_run"}
data1 = [
{"source": "alice", "destination": "bob", "relationship": "knows"},
{"source": "bob", "destination": "charlie", "relationship": "knows"},
{"source": "charlie", "destination": "alice", "relationship": "knows"},
]
data2 = [
{"source": "charlie", "destination": "alice", "relationship": "likes"},
]
result = kuzu_memory._add_entities(data1, filters, {})
assert result[0] == [{"source": "alice", "relationship": "knows", "target": "bob"}]
assert result[1] == [{"source": "bob", "relationship": "knows", "target": "charlie"}]
assert result[2] == [{"source": "charlie", "relationship": "knows", "target": "alice"}]
assert get_node_count(kuzu_memory) == 3
assert get_edge_count(kuzu_memory) == 3
result = kuzu_memory._add_entities(data2, filters, {})
assert result[0] == [{"source": "charlie", "relationship": "likes", "target": "alice"}]
assert get_node_count(kuzu_memory) == 3
assert get_edge_count(kuzu_memory) == 4
data3 = [
{"source": "dave", "destination": "alice", "relationship": "admires"}
]
result = kuzu_memory._add_entities(data3, filters, {})
assert result[0] == [{"source": "dave", "relationship": "admires", "target": "alice"}]
assert get_node_count(kuzu_memory) == 4 # dave is new
assert get_edge_count(kuzu_memory) == 5
results = kuzu_memory.get_all(filters)
assert set([f"{result['source']}_{result['relationship']}_{result['target']}" for result in results]) == set([
"alice_knows_bob",
"bob_knows_charlie",
"charlie_likes_alice",
"charlie_knows_alice",
"dave_admires_alice"
])
results = kuzu_memory._search_graph_db(["bob"], filters, threshold=0.8)
assert set([f"{result['source']}_{result['relationship']}_{result['destination']}" for result in results]) == set([
"alice_knows_bob",
"bob_knows_charlie",
])
result = kuzu_memory._delete_entities(data2, filters)
assert result[0] == [{"source": "charlie", "relationship": "likes", "target": "alice"}]
assert get_node_count(kuzu_memory) == 4
assert get_edge_count(kuzu_memory) == 4
result = kuzu_memory._delete_entities(data1, filters)
assert result[0] == [{"source": "alice", "relationship": "knows", "target": "bob"}]
assert result[1] == [{"source": "bob", "relationship": "knows", "target": "charlie"}]
assert result[2] == [{"source": "charlie", "relationship": "knows", "target": "alice"}]
assert get_node_count(kuzu_memory) == 4
assert get_edge_count(kuzu_memory) == 1
result = kuzu_memory.delete_all(filters)
assert get_node_count(kuzu_memory) == 0
assert get_edge_count(kuzu_memory) == 0
result = kuzu_memory._add_entities(data2, filters, {})
assert result[0] == [{"source": "charlie", "relationship": "likes", "target": "alice"}]
assert get_node_count(kuzu_memory) == 2
assert get_edge_count(kuzu_memory) == 1
result = kuzu_memory.reset()
assert get_node_count(kuzu_memory) == 0
assert get_edge_count(kuzu_memory) == 0
def get_node_count(kuzu_memory):
results = kuzu_memory.kuzu_execute(
"""
MATCH (n)
RETURN COUNT(n) as count
"""
)
return int(results[0]['count'])
def get_edge_count(kuzu_memory):
results = kuzu_memory.kuzu_execute(
"""
MATCH (n)-[e]->(m)
RETURN COUNT(e) as count
"""
)
return int(results[0]['count'])
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/memory/test_storage.py | tests/memory/test_storage.py | import os
import sqlite3
import tempfile
import uuid
from datetime import datetime
import pytest
from mem0.memory.storage import SQLiteManager
class TestSQLiteManager:
"""Comprehensive test cases for SQLiteManager class."""
@pytest.fixture
def temp_db_path(self):
"""Create temporary database file."""
temp_db = tempfile.NamedTemporaryFile(delete=False, suffix=".db")
temp_db.close()
yield temp_db.name
if os.path.exists(temp_db.name):
os.unlink(temp_db.name)
@pytest.fixture
def sqlite_manager(self, temp_db_path):
"""Create SQLiteManager instance with temporary database."""
manager = SQLiteManager(temp_db_path)
yield manager
if manager.connection:
manager.close()
@pytest.fixture
def memory_manager(self):
"""Create in-memory SQLiteManager instance."""
manager = SQLiteManager(":memory:")
yield manager
if manager.connection:
manager.close()
@pytest.fixture
def sample_data(self):
"""Sample test data."""
now = datetime.now().isoformat()
return {
"memory_id": str(uuid.uuid4()),
"old_memory": "Old memory content",
"new_memory": "New memory content",
"event": "ADD",
"created_at": now,
"updated_at": now,
"actor_id": "test_actor",
"role": "user",
}
# ========== Initialization Tests ==========
@pytest.mark.parametrize("db_type,path", [("file", "temp_db_path"), ("memory", ":memory:")])
def test_initialization(self, db_type, path, request):
"""Test SQLiteManager initialization with different database types."""
if db_type == "file":
db_path = request.getfixturevalue(path)
else:
db_path = path
manager = SQLiteManager(db_path)
assert manager.connection is not None
assert manager.db_path == db_path
manager.close()
def test_table_schema_creation(self, sqlite_manager):
"""Test that history table is created with correct schema."""
cursor = sqlite_manager.connection.cursor()
cursor.execute("PRAGMA table_info(history)")
columns = {row[1] for row in cursor.fetchall()}
expected_columns = {
"id",
"memory_id",
"old_memory",
"new_memory",
"event",
"created_at",
"updated_at",
"is_deleted",
"actor_id",
"role",
}
assert columns == expected_columns
# ========== Add History Tests ==========
def test_add_history_basic(self, sqlite_manager, sample_data):
"""Test basic add_history functionality."""
sqlite_manager.add_history(
memory_id=sample_data["memory_id"],
old_memory=sample_data["old_memory"],
new_memory=sample_data["new_memory"],
event=sample_data["event"],
created_at=sample_data["created_at"],
actor_id=sample_data["actor_id"],
role=sample_data["role"],
)
cursor = sqlite_manager.connection.cursor()
cursor.execute("SELECT * FROM history WHERE memory_id = ?", (sample_data["memory_id"],))
result = cursor.fetchone()
assert result is not None
assert result[1] == sample_data["memory_id"]
assert result[2] == sample_data["old_memory"]
assert result[3] == sample_data["new_memory"]
assert result[4] == sample_data["event"]
assert result[8] == sample_data["actor_id"]
assert result[9] == sample_data["role"]
@pytest.mark.parametrize(
"old_memory,new_memory,is_deleted", [(None, "New memory", 0), ("Old memory", None, 1), (None, None, 1)]
)
def test_add_history_optional_params(self, sqlite_manager, sample_data, old_memory, new_memory, is_deleted):
"""Test add_history with various optional parameter combinations."""
sqlite_manager.add_history(
memory_id=sample_data["memory_id"],
old_memory=old_memory,
new_memory=new_memory,
event="UPDATE",
updated_at=sample_data["updated_at"],
is_deleted=is_deleted,
actor_id=sample_data["actor_id"],
role=sample_data["role"],
)
cursor = sqlite_manager.connection.cursor()
cursor.execute("SELECT * FROM history WHERE memory_id = ?", (sample_data["memory_id"],))
result = cursor.fetchone()
assert result[2] == old_memory
assert result[3] == new_memory
assert result[6] == sample_data["updated_at"]
assert result[7] == is_deleted
def test_add_history_generates_unique_ids(self, sqlite_manager, sample_data):
"""Test that add_history generates unique IDs for each record."""
for i in range(3):
sqlite_manager.add_history(
memory_id=sample_data["memory_id"],
old_memory=f"Memory {i}",
new_memory=f"Updated Memory {i}",
event="ADD" if i == 0 else "UPDATE",
)
cursor = sqlite_manager.connection.cursor()
cursor.execute("SELECT id FROM history WHERE memory_id = ?", (sample_data["memory_id"],))
ids = [row[0] for row in cursor.fetchall()]
assert len(ids) == 3
assert len(set(ids)) == 3
# ========== Get History Tests ==========
def test_get_history_empty(self, sqlite_manager):
"""Test get_history for non-existent memory_id."""
result = sqlite_manager.get_history("non-existent-id")
assert result == []
def test_get_history_single_record(self, sqlite_manager, sample_data):
"""Test get_history for single record."""
sqlite_manager.add_history(
memory_id=sample_data["memory_id"],
old_memory=sample_data["old_memory"],
new_memory=sample_data["new_memory"],
event=sample_data["event"],
created_at=sample_data["created_at"],
actor_id=sample_data["actor_id"],
role=sample_data["role"],
)
result = sqlite_manager.get_history(sample_data["memory_id"])
assert len(result) == 1
record = result[0]
assert record["memory_id"] == sample_data["memory_id"]
assert record["old_memory"] == sample_data["old_memory"]
assert record["new_memory"] == sample_data["new_memory"]
assert record["event"] == sample_data["event"]
assert record["created_at"] == sample_data["created_at"]
assert record["actor_id"] == sample_data["actor_id"]
assert record["role"] == sample_data["role"]
assert record["is_deleted"] is False
def test_get_history_chronological_ordering(self, sqlite_manager, sample_data):
"""Test get_history returns records in chronological order."""
import time
timestamps = []
for i in range(3):
ts = datetime.now().isoformat()
timestamps.append(ts)
sqlite_manager.add_history(
memory_id=sample_data["memory_id"],
old_memory=f"Memory {i}",
new_memory=f"Memory {i+1}",
event="ADD" if i == 0 else "UPDATE",
created_at=ts,
updated_at=ts if i > 0 else None,
)
time.sleep(0.01)
result = sqlite_manager.get_history(sample_data["memory_id"])
result_timestamps = [r["created_at"] for r in result]
assert result_timestamps == sorted(timestamps)
def test_migration_preserves_data(self, temp_db_path, sample_data):
"""Test that migration preserves existing data."""
manager1 = SQLiteManager(temp_db_path)
manager1.add_history(
memory_id=sample_data["memory_id"],
old_memory=sample_data["old_memory"],
new_memory=sample_data["new_memory"],
event=sample_data["event"],
created_at=sample_data["created_at"],
)
original_data = manager1.get_history(sample_data["memory_id"])
manager1.close()
manager2 = SQLiteManager(temp_db_path)
migrated_data = manager2.get_history(sample_data["memory_id"])
manager2.close()
assert len(migrated_data) == len(original_data)
assert migrated_data[0]["memory_id"] == original_data[0]["memory_id"]
assert migrated_data[0]["new_memory"] == original_data[0]["new_memory"]
def test_large_batch_operations(self, sqlite_manager):
"""Test performance with large batch of operations."""
batch_size = 1000
memory_ids = [str(uuid.uuid4()) for _ in range(batch_size)]
for i, memory_id in enumerate(memory_ids):
sqlite_manager.add_history(
memory_id=memory_id, old_memory=None, new_memory=f"Batch memory {i}", event="ADD"
)
cursor = sqlite_manager.connection.cursor()
cursor.execute("SELECT COUNT(*) FROM history")
count = cursor.fetchone()[0]
assert count == batch_size
for memory_id in memory_ids[:10]:
result = sqlite_manager.get_history(memory_id)
assert len(result) == 1
# ========== Tests for Migration, Reset, and Close ==========
def test_explicit_old_schema_migration(self, temp_db_path):
"""Test migration path from a legacy schema to new schema."""
# Create a legacy 'history' table missing new columns
legacy_conn = sqlite3.connect(temp_db_path)
legacy_conn.execute("""
CREATE TABLE history (
id TEXT PRIMARY KEY,
memory_id TEXT,
old_memory TEXT,
new_memory TEXT,
event TEXT,
created_at DATETIME
)
""")
legacy_id = str(uuid.uuid4())
legacy_conn.execute(
"INSERT INTO history (id, memory_id, old_memory, new_memory, event, created_at) VALUES (?, ?, ?, ?, ?, ?)",
(legacy_id, "m1", "o", "n", "ADD", datetime.now().isoformat()),
)
legacy_conn.commit()
legacy_conn.close()
# Trigger migration
mgr = SQLiteManager(temp_db_path)
history = mgr.get_history("m1")
assert len(history) == 1
assert history[0]["id"] == legacy_id
assert history[0]["actor_id"] is None
assert history[0]["is_deleted"] is False
mgr.close()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_vllm.py | tests/llms/test_vllm.py | from unittest.mock import MagicMock, Mock, patch
import pytest
from mem0 import AsyncMemory, Memory
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.vllm import VllmLLM
@pytest.fixture
def mock_vllm_client():
with patch("mem0.llms.vllm.OpenAI") as mock_openai:
mock_client = Mock()
mock_openai.return_value = mock_client
yield mock_client
def test_generate_response_without_tools(mock_vllm_client):
config = BaseLlmConfig(model="Qwen/Qwen2.5-32B-Instruct", temperature=0.7, max_tokens=100, top_p=1.0)
llm = VllmLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
mock_vllm_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages)
mock_vllm_client.chat.completions.create.assert_called_once_with(
model="Qwen/Qwen2.5-32B-Instruct", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_vllm_client):
config = BaseLlmConfig(model="Qwen/Qwen2.5-32B-Instruct", temperature=0.7, max_tokens=100, top_p=1.0)
llm = VllmLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [
{
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
},
},
}
]
mock_response = Mock()
mock_message = Mock()
mock_message.content = "I've added the memory for you."
mock_tool_call = Mock()
mock_tool_call.function.name = "add_memory"
mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}'
mock_message.tool_calls = [mock_tool_call]
mock_response.choices = [Mock(message=mock_message)]
mock_vllm_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages, tools=tools)
mock_vllm_client.chat.completions.create.assert_called_once_with(
model="Qwen/Qwen2.5-32B-Instruct",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
tools=tools,
tool_choice="auto",
)
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
def create_mocked_memory():
"""Create a fully mocked Memory instance for testing."""
with patch('mem0.utils.factory.LlmFactory.create') as mock_llm_factory, \
patch('mem0.utils.factory.EmbedderFactory.create') as mock_embedder_factory, \
patch('mem0.utils.factory.VectorStoreFactory.create') as mock_vector_factory, \
patch('mem0.memory.storage.SQLiteManager') as mock_sqlite:
mock_llm = MagicMock()
mock_llm_factory.return_value = mock_llm
mock_embedder = MagicMock()
mock_embedder.embed.return_value = [0.1, 0.2, 0.3]
mock_embedder_factory.return_value = mock_embedder
mock_vector_store = MagicMock()
mock_vector_store.search.return_value = []
mock_vector_store.add.return_value = None
mock_vector_factory.return_value = mock_vector_store
mock_sqlite.return_value = MagicMock()
memory = Memory()
memory.api_version = "v1.0"
return memory, mock_llm, mock_vector_store
def create_mocked_async_memory():
"""Create a fully mocked AsyncMemory instance for testing."""
with patch('mem0.utils.factory.LlmFactory.create') as mock_llm_factory, \
patch('mem0.utils.factory.EmbedderFactory.create') as mock_embedder_factory, \
patch('mem0.utils.factory.VectorStoreFactory.create') as mock_vector_factory, \
patch('mem0.memory.storage.SQLiteManager') as mock_sqlite:
mock_llm = MagicMock()
mock_llm_factory.return_value = mock_llm
mock_embedder = MagicMock()
mock_embedder.embed.return_value = [0.1, 0.2, 0.3]
mock_embedder_factory.return_value = mock_embedder
mock_vector_store = MagicMock()
mock_vector_store.search.return_value = []
mock_vector_store.add.return_value = None
mock_vector_factory.return_value = mock_vector_store
mock_sqlite.return_value = MagicMock()
memory = AsyncMemory()
memory.api_version = "v1.0"
return memory, mock_llm, mock_vector_store
def test_thinking_tags_sync():
"""Test thinking tags handling in Memory._add_to_vector_store (sync)."""
memory, mock_llm, mock_vector_store = create_mocked_memory()
# Mock LLM responses for both phases
mock_llm.generate_response.side_effect = [
' <think>Sync fact extraction</think> \n{"facts": ["User loves sci-fi"]}',
' <think>Sync memory actions</think> \n{"memory": [{"text": "Loves sci-fi", "event": "ADD"}]}'
]
mock_vector_store.search.return_value = []
result = memory._add_to_vector_store(
messages=[{"role": "user", "content": "I love sci-fi movies"}],
metadata={},
filters={},
infer=True
)
assert len(result) == 1
assert result[0]["memory"] == "Loves sci-fi"
assert result[0]["event"] == "ADD"
@pytest.mark.asyncio
async def test_async_thinking_tags_async():
"""Test thinking tags handling in AsyncMemory._add_to_vector_store."""
memory, mock_llm, mock_vector_store = create_mocked_async_memory()
# Directly mock llm.generate_response instead of via asyncio.to_thread
mock_llm.generate_response.side_effect = [
' <think>Async fact extraction</think> \n{"facts": ["User loves sci-fi"]}',
' <think>Async memory actions</think> \n{"memory": [{"text": "Loves sci-fi", "event": "ADD"}]}'
]
# Mock asyncio.to_thread to call the function directly (bypass threading)
async def mock_to_thread(func, *args, **kwargs):
if func == mock_llm.generate_response:
return func(*args, **kwargs)
elif hasattr(func, '__name__') and 'embed' in func.__name__:
return [0.1, 0.2, 0.3]
elif hasattr(func, '__name__') and 'search' in func.__name__:
return []
else:
return func(*args, **kwargs)
with patch('mem0.memory.main.asyncio.to_thread', side_effect=mock_to_thread):
result = await memory._add_to_vector_store(
messages=[{"role": "user", "content": "I love sci-fi movies"}],
metadata={},
effective_filters={},
infer=True
)
assert len(result) == 1
assert result[0]["memory"] == "Loves sci-fi"
assert result[0]["event"] == "ADD" | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_azure_openai_structured.py | tests/llms/test_azure_openai_structured.py | from unittest import mock
from mem0.llms.azure_openai_structured import SCOPE, AzureOpenAIStructuredLLM
class DummyAzureKwargs:
def __init__(
self,
api_key=None,
azure_deployment="test-deployment",
azure_endpoint="https://test-endpoint.openai.azure.com",
api_version="2024-06-01-preview",
default_headers=None,
):
self.api_key = api_key
self.azure_deployment = azure_deployment
self.azure_endpoint = azure_endpoint
self.api_version = api_version
self.default_headers = default_headers
class DummyConfig:
def __init__(
self,
model=None,
azure_kwargs=None,
temperature=0.7,
max_tokens=256,
top_p=1.0,
http_client=None,
):
self.model = model
self.azure_kwargs = azure_kwargs or DummyAzureKwargs()
self.temperature = temperature
self.max_tokens = max_tokens
self.top_p = top_p
self.http_client = http_client
@mock.patch("mem0.llms.azure_openai_structured.AzureOpenAI")
def test_init_with_api_key(mock_azure_openai):
config = DummyConfig(model="test-model", azure_kwargs=DummyAzureKwargs(api_key="real-key"))
llm = AzureOpenAIStructuredLLM(config)
assert llm.config.model == "test-model"
mock_azure_openai.assert_called_once()
args, kwargs = mock_azure_openai.call_args
assert kwargs["api_key"] == "real-key"
assert kwargs["azure_ad_token_provider"] is None
@mock.patch("mem0.llms.azure_openai_structured.AzureOpenAI")
@mock.patch("mem0.llms.azure_openai_structured.get_bearer_token_provider")
@mock.patch("mem0.llms.azure_openai_structured.DefaultAzureCredential")
def test_init_with_default_credential(mock_credential, mock_token_provider, mock_azure_openai):
config = DummyConfig(model=None, azure_kwargs=DummyAzureKwargs(api_key=None))
mock_token_provider.return_value = "token-provider"
llm = AzureOpenAIStructuredLLM(config)
# Should set default model if not provided
assert llm.config.model == "gpt-4.1-nano-2025-04-14"
mock_credential.assert_called_once()
mock_token_provider.assert_called_once_with(mock_credential.return_value, SCOPE)
mock_azure_openai.assert_called_once()
args, kwargs = mock_azure_openai.call_args
assert kwargs["api_key"] is None
assert kwargs["azure_ad_token_provider"] == "token-provider"
def test_init_with_env_vars(monkeypatch, mocker):
mock_azure_openai = mocker.patch("mem0.llms.azure_openai_structured.AzureOpenAI")
monkeypatch.setenv("LLM_AZURE_DEPLOYMENT", "test-deployment")
monkeypatch.setenv("LLM_AZURE_ENDPOINT", "https://test-endpoint.openai.azure.com")
monkeypatch.setenv("LLM_AZURE_API_VERSION", "2024-06-01-preview")
config = DummyConfig(model="test-model", azure_kwargs=DummyAzureKwargs(api_key=None))
AzureOpenAIStructuredLLM(config)
mock_azure_openai.assert_called_once()
args, kwargs = mock_azure_openai.call_args
assert kwargs["api_key"] is None
assert kwargs["azure_deployment"] == "test-deployment"
assert kwargs["azure_endpoint"] == "https://test-endpoint.openai.azure.com"
assert kwargs["api_version"] == "2024-06-01-preview"
@mock.patch("mem0.llms.azure_openai_structured.AzureOpenAI")
def test_init_with_placeholder_api_key_uses_default_credential(
mock_azure_openai,
):
with (
mock.patch("mem0.llms.azure_openai_structured.DefaultAzureCredential") as mock_credential,
mock.patch("mem0.llms.azure_openai_structured.get_bearer_token_provider") as mock_token_provider,
):
config = DummyConfig(model=None, azure_kwargs=DummyAzureKwargs(api_key="your-api-key"))
mock_token_provider.return_value = "token-provider"
llm = AzureOpenAIStructuredLLM(config)
assert llm.config.model == "gpt-4.1-nano-2025-04-14"
mock_credential.assert_called_once()
mock_token_provider.assert_called_once_with(mock_credential.return_value, SCOPE)
mock_azure_openai.assert_called_once()
args, kwargs = mock_azure_openai.call_args
assert kwargs["api_key"] is None
assert kwargs["azure_ad_token_provider"] == "token-provider"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_openai.py | tests/llms/test_openai.py | import os
from unittest.mock import Mock, patch
import pytest
from mem0.configs.llms.openai import OpenAIConfig
from mem0.llms.openai import OpenAILLM
@pytest.fixture
def mock_openai_client():
with patch("mem0.llms.openai.OpenAI") as mock_openai:
mock_client = Mock()
mock_openai.return_value = mock_client
yield mock_client
def test_openai_llm_base_url():
# case1: default config: with openai official base url
config = OpenAIConfig(model="gpt-4.1-nano-2025-04-14", temperature=0.7, max_tokens=100, top_p=1.0, api_key="api_key")
llm = OpenAILLM(config)
# Note: openai client will parse the raw base_url into a URL object, which will have a trailing slash
assert str(llm.client.base_url) == "https://api.openai.com/v1/"
# case2: with env variable OPENAI_API_BASE
provider_base_url = "https://api.provider.com/v1"
os.environ["OPENAI_BASE_URL"] = provider_base_url
config = OpenAIConfig(model="gpt-4.1-nano-2025-04-14", temperature=0.7, max_tokens=100, top_p=1.0, api_key="api_key")
llm = OpenAILLM(config)
# Note: openai client will parse the raw base_url into a URL object, which will have a trailing slash
assert str(llm.client.base_url) == provider_base_url + "/"
# case3: with config.openai_base_url
config_base_url = "https://api.config.com/v1"
config = OpenAIConfig(
model="gpt-4.1-nano-2025-04-14", temperature=0.7, max_tokens=100, top_p=1.0, api_key="api_key", openai_base_url=config_base_url
)
llm = OpenAILLM(config)
# Note: openai client will parse the raw base_url into a URL object, which will have a trailing slash
assert str(llm.client.base_url) == config_base_url + "/"
def test_generate_response_without_tools(mock_openai_client):
config = OpenAIConfig(model="gpt-4.1-nano-2025-04-14", temperature=0.7, max_tokens=100, top_p=1.0)
llm = OpenAILLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
mock_openai_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages)
mock_openai_client.chat.completions.create.assert_called_once_with(
model="gpt-4.1-nano-2025-04-14", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0, store=False
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_openai_client):
config = OpenAIConfig(model="gpt-4.1-nano-2025-04-14", temperature=0.7, max_tokens=100, top_p=1.0)
llm = OpenAILLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [
{
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
},
},
}
]
mock_response = Mock()
mock_message = Mock()
mock_message.content = "I've added the memory for you."
mock_tool_call = Mock()
mock_tool_call.function.name = "add_memory"
mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}'
mock_message.tool_calls = [mock_tool_call]
mock_response.choices = [Mock(message=mock_message)]
mock_openai_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages, tools=tools)
mock_openai_client.chat.completions.create.assert_called_once_with(
model="gpt-4.1-nano-2025-04-14", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0, tools=tools, tool_choice="auto", store=False
)
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
def test_response_callback_invocation(mock_openai_client):
# Setup mock callback
mock_callback = Mock()
config = OpenAIConfig(model="gpt-4.1-nano-2025-04-14", response_callback=mock_callback)
llm = OpenAILLM(config)
messages = [{"role": "user", "content": "Test callback"}]
# Mock response
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="Response"))]
mock_openai_client.chat.completions.create.return_value = mock_response
# Call method
llm.generate_response(messages)
# Verify callback called with correct arguments
mock_callback.assert_called_once()
args = mock_callback.call_args[0]
assert args[0] is llm # llm_instance
assert args[1] == mock_response # raw_response
assert "messages" in args[2] # params
def test_no_response_callback(mock_openai_client):
config = OpenAIConfig(model="gpt-4.1-nano-2025-04-14")
llm = OpenAILLM(config)
messages = [{"role": "user", "content": "Test no callback"}]
# Mock response
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="Response"))]
mock_openai_client.chat.completions.create.return_value = mock_response
# Should complete without calling any callback
response = llm.generate_response(messages)
assert response == "Response"
# Verify no callback is set
assert llm.config.response_callback is None
def test_callback_exception_handling(mock_openai_client):
# Callback that raises exception
def faulty_callback(*args):
raise ValueError("Callback error")
config = OpenAIConfig(model="gpt-4.1-nano-2025-04-14", response_callback=faulty_callback)
llm = OpenAILLM(config)
messages = [{"role": "user", "content": "Test exception"}]
# Mock response
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="Expected response"))]
mock_openai_client.chat.completions.create.return_value = mock_response
# Should complete without raising
response = llm.generate_response(messages)
assert response == "Expected response"
# Verify callback was called (even though it raised an exception)
assert llm.config.response_callback is faulty_callback
def test_callback_with_tools(mock_openai_client):
mock_callback = Mock()
config = OpenAIConfig(model="gpt-4.1-nano-2025-04-14", response_callback=mock_callback)
llm = OpenAILLM(config)
messages = [{"role": "user", "content": "Test tools"}]
tools = [
{
"type": "function",
"function": {
"name": "test_tool",
"description": "A test tool",
"parameters": {
"type": "object",
"properties": {"param1": {"type": "string"}},
"required": ["param1"],
},
}
}
]
# Mock tool response
mock_response = Mock()
mock_message = Mock()
mock_message.content = "Tool response"
mock_tool_call = Mock()
mock_tool_call.function.name = "test_tool"
mock_tool_call.function.arguments = '{"param1": "value1"}'
mock_message.tool_calls = [mock_tool_call]
mock_response.choices = [Mock(message=mock_message)]
mock_openai_client.chat.completions.create.return_value = mock_response
llm.generate_response(messages, tools=tools)
# Verify callback called with tool response
mock_callback.assert_called_once()
# Check that tool_calls exists in the message
assert hasattr(mock_callback.call_args[0][1].choices[0].message, 'tool_calls')
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_groq.py | tests/llms/test_groq.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.groq import GroqLLM
@pytest.fixture
def mock_groq_client():
with patch("mem0.llms.groq.Groq") as mock_groq:
mock_client = Mock()
mock_groq.return_value = mock_client
yield mock_client
def test_generate_response_without_tools(mock_groq_client):
config = BaseLlmConfig(model="llama3-70b-8192", temperature=0.7, max_tokens=100, top_p=1.0)
llm = GroqLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
mock_groq_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages)
mock_groq_client.chat.completions.create.assert_called_once_with(
model="llama3-70b-8192", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_groq_client):
config = BaseLlmConfig(model="llama3-70b-8192", temperature=0.7, max_tokens=100, top_p=1.0)
llm = GroqLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [
{
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
},
},
}
]
mock_response = Mock()
mock_message = Mock()
mock_message.content = "I've added the memory for you."
mock_tool_call = Mock()
mock_tool_call.function.name = "add_memory"
mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}'
mock_message.tool_calls = [mock_tool_call]
mock_response.choices = [Mock(message=mock_message)]
mock_groq_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages, tools=tools)
mock_groq_client.chat.completions.create.assert_called_once_with(
model="llama3-70b-8192",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
tools=tools,
tool_choice="auto",
)
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_together.py | tests/llms/test_together.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.together import TogetherLLM
@pytest.fixture
def mock_together_client():
with patch("mem0.llms.together.Together") as mock_together:
mock_client = Mock()
mock_together.return_value = mock_client
yield mock_client
def test_generate_response_without_tools(mock_together_client):
config = BaseLlmConfig(model="mistralai/Mixtral-8x7B-Instruct-v0.1", temperature=0.7, max_tokens=100, top_p=1.0)
llm = TogetherLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
mock_together_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages)
mock_together_client.chat.completions.create.assert_called_once_with(
model="mistralai/Mixtral-8x7B-Instruct-v0.1", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_together_client):
config = BaseLlmConfig(model="mistralai/Mixtral-8x7B-Instruct-v0.1", temperature=0.7, max_tokens=100, top_p=1.0)
llm = TogetherLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [
{
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
},
},
}
]
mock_response = Mock()
mock_message = Mock()
mock_message.content = "I've added the memory for you."
mock_tool_call = Mock()
mock_tool_call.function.name = "add_memory"
mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}'
mock_message.tool_calls = [mock_tool_call]
mock_response.choices = [Mock(message=mock_message)]
mock_together_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages, tools=tools)
mock_together_client.chat.completions.create.assert_called_once_with(
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
tools=tools,
tool_choice="auto",
)
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_deepseek.py | tests/llms/test_deepseek.py | import os
from unittest.mock import Mock, patch
import pytest
from mem0.configs.llms.base import BaseLlmConfig
from mem0.configs.llms.deepseek import DeepSeekConfig
from mem0.llms.deepseek import DeepSeekLLM
@pytest.fixture
def mock_deepseek_client():
with patch("mem0.llms.deepseek.OpenAI") as mock_openai:
mock_client = Mock()
mock_openai.return_value = mock_client
yield mock_client
def test_deepseek_llm_base_url():
# case1: default config with deepseek official base url
config = BaseLlmConfig(model="deepseek-chat", temperature=0.7, max_tokens=100, top_p=1.0, api_key="api_key")
llm = DeepSeekLLM(config)
assert str(llm.client.base_url) == "https://api.deepseek.com"
# case2: with env variable DEEPSEEK_API_BASE
provider_base_url = "https://api.provider.com/v1/"
os.environ["DEEPSEEK_API_BASE"] = provider_base_url
config = DeepSeekConfig(model="deepseek-chat", temperature=0.7, max_tokens=100, top_p=1.0, api_key="api_key")
llm = DeepSeekLLM(config)
assert str(llm.client.base_url) == provider_base_url
# case3: with config.deepseek_base_url
config_base_url = "https://api.config.com/v1/"
config = DeepSeekConfig(
model="deepseek-chat",
temperature=0.7,
max_tokens=100,
top_p=1.0,
api_key="api_key",
deepseek_base_url=config_base_url,
)
llm = DeepSeekLLM(config)
assert str(llm.client.base_url) == config_base_url
def test_generate_response_without_tools(mock_deepseek_client):
config = BaseLlmConfig(model="deepseek-chat", temperature=0.7, max_tokens=100, top_p=1.0)
llm = DeepSeekLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
mock_deepseek_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages)
mock_deepseek_client.chat.completions.create.assert_called_once_with(
model="deepseek-chat", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_deepseek_client):
config = BaseLlmConfig(model="deepseek-chat", temperature=0.7, max_tokens=100, top_p=1.0)
llm = DeepSeekLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [
{
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
},
},
}
]
mock_response = Mock()
mock_message = Mock()
mock_message.content = "I've added the memory for you."
mock_tool_call = Mock()
mock_tool_call.function.name = "add_memory"
mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}'
mock_message.tool_calls = [mock_tool_call]
mock_response.choices = [Mock(message=mock_message)]
mock_deepseek_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages, tools=tools)
mock_deepseek_client.chat.completions.create.assert_called_once_with(
model="deepseek-chat",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
tools=tools,
tool_choice="auto",
)
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_azure_openai.py | tests/llms/test_azure_openai.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.llms.azure import AzureOpenAIConfig
from mem0.llms.azure_openai import AzureOpenAILLM
MODEL = "gpt-4.1-nano-2025-04-14" # or your custom deployment name
TEMPERATURE = 0.7
MAX_TOKENS = 100
TOP_P = 1.0
@pytest.fixture
def mock_openai_client():
with patch("mem0.llms.azure_openai.AzureOpenAI") as mock_openai:
mock_client = Mock()
mock_openai.return_value = mock_client
yield mock_client
def test_generate_response_without_tools(mock_openai_client):
config = AzureOpenAIConfig(model=MODEL, temperature=TEMPERATURE, max_tokens=MAX_TOKENS, top_p=TOP_P)
llm = AzureOpenAILLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
mock_openai_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages)
mock_openai_client.chat.completions.create.assert_called_once_with(
model=MODEL, messages=messages, temperature=TEMPERATURE, max_tokens=MAX_TOKENS, top_p=TOP_P
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_openai_client):
config = AzureOpenAIConfig(model=MODEL, temperature=TEMPERATURE, max_tokens=MAX_TOKENS, top_p=TOP_P)
llm = AzureOpenAILLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [
{
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
},
},
}
]
mock_response = Mock()
mock_message = Mock()
mock_message.content = "I've added the memory for you."
mock_tool_call = Mock()
mock_tool_call.function.name = "add_memory"
mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}'
mock_message.tool_calls = [mock_tool_call]
mock_response.choices = [Mock(message=mock_message)]
mock_openai_client.chat.completions.create.return_value = mock_response
response = llm.generate_response(messages, tools=tools)
mock_openai_client.chat.completions.create.assert_called_once_with(
model=MODEL,
messages=messages,
temperature=TEMPERATURE,
max_tokens=MAX_TOKENS,
top_p=TOP_P,
tools=tools,
tool_choice="auto",
)
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
@pytest.mark.parametrize(
"default_headers",
[None, {"Firstkey": "FirstVal", "SecondKey": "SecondVal"}],
)
def test_generate_with_http_proxies(default_headers):
mock_http_client = Mock()
mock_http_client_instance = Mock()
mock_http_client.return_value = mock_http_client_instance
azure_kwargs = {"api_key": "test"}
if default_headers:
azure_kwargs["default_headers"] = default_headers
with (
patch("mem0.llms.azure_openai.AzureOpenAI") as mock_azure_openai,
patch("httpx.Client", new=mock_http_client),
):
config = AzureOpenAIConfig(
model=MODEL,
temperature=TEMPERATURE,
max_tokens=MAX_TOKENS,
top_p=TOP_P,
api_key="test",
http_client_proxies="http://testproxy.mem0.net:8000",
azure_kwargs=azure_kwargs,
)
_ = AzureOpenAILLM(config)
mock_azure_openai.assert_called_once_with(
api_key="test",
http_client=mock_http_client_instance,
azure_deployment=None,
azure_endpoint=None,
azure_ad_token_provider=None,
api_version=None,
default_headers=default_headers,
)
mock_http_client.assert_called_once_with(proxies="http://testproxy.mem0.net:8000")
def test_init_with_api_key(monkeypatch):
# Patch environment variables to None to force config usage
monkeypatch.delenv("LLM_AZURE_OPENAI_API_KEY", raising=False)
monkeypatch.delenv("LLM_AZURE_DEPLOYMENT", raising=False)
monkeypatch.delenv("LLM_AZURE_ENDPOINT", raising=False)
monkeypatch.delenv("LLM_AZURE_API_VERSION", raising=False)
config = AzureOpenAIConfig(
model=MODEL,
temperature=TEMPERATURE,
max_tokens=MAX_TOKENS,
top_p=TOP_P,
)
# Set Azure kwargs directly
config.azure_kwargs.api_key = "test-key"
config.azure_kwargs.azure_deployment = "test-deployment"
config.azure_kwargs.azure_endpoint = "https://test-endpoint"
config.azure_kwargs.api_version = "2024-01-01"
config.azure_kwargs.default_headers = {"x-test": "header"}
config.http_client = None
with patch("mem0.llms.azure_openai.AzureOpenAI") as mock_azure_openai:
llm = AzureOpenAILLM(config)
mock_azure_openai.assert_called_once_with(
azure_deployment="test-deployment",
azure_endpoint="https://test-endpoint",
azure_ad_token_provider=None,
api_version="2024-01-01",
api_key="test-key",
http_client=None,
default_headers={"x-test": "header"},
)
assert llm.config.model == MODEL
def test_init_with_env_vars(monkeypatch):
monkeypatch.setenv("LLM_AZURE_OPENAI_API_KEY", "env-key")
monkeypatch.setenv("LLM_AZURE_DEPLOYMENT", "env-deployment")
monkeypatch.setenv("LLM_AZURE_ENDPOINT", "https://env-endpoint")
monkeypatch.setenv("LLM_AZURE_API_VERSION", "2024-02-02")
config = AzureOpenAIConfig(model=None)
config.azure_kwargs.api_key = None
config.azure_kwargs.azure_deployment = None
config.azure_kwargs.azure_endpoint = None
config.azure_kwargs.api_version = None
config.azure_kwargs.default_headers = None
config.http_client = None
with patch("mem0.llms.azure_openai.AzureOpenAI") as mock_azure_openai:
llm = AzureOpenAILLM(config)
mock_azure_openai.assert_called_once_with(
azure_deployment="env-deployment",
azure_endpoint="https://env-endpoint",
azure_ad_token_provider=None,
api_version="2024-02-02",
api_key="env-key",
http_client=None,
default_headers=None,
)
# Should default to "gpt-4.1-nano-2025-04-14" if model is None
assert llm.config.model == "gpt-4.1-nano-2025-04-14"
def test_init_with_default_azure_credential(monkeypatch):
# No API key in config or env, triggers DefaultAzureCredential
monkeypatch.delenv("LLM_AZURE_OPENAI_API_KEY", raising=False)
config = AzureOpenAIConfig(model=MODEL)
config.azure_kwargs.api_key = None
config.azure_kwargs.azure_deployment = "dep"
config.azure_kwargs.azure_endpoint = "https://endpoint"
config.azure_kwargs.api_version = "2024-03-03"
config.azure_kwargs.default_headers = None
config.http_client = None
with (
patch("mem0.llms.azure_openai.DefaultAzureCredential") as mock_cred,
patch("mem0.llms.azure_openai.get_bearer_token_provider") as mock_token_provider,
patch("mem0.llms.azure_openai.AzureOpenAI") as mock_azure_openai,
):
mock_cred_instance = mock_cred.return_value
mock_token_provider.return_value = "token-provider"
AzureOpenAILLM(config)
mock_cred.assert_called_once()
mock_token_provider.assert_called_once_with(mock_cred_instance, "https://cognitiveservices.azure.com/.default")
mock_azure_openai.assert_called_once_with(
azure_deployment="dep",
azure_endpoint="https://endpoint",
azure_ad_token_provider="token-provider",
api_version="2024-03-03",
api_key=None,
http_client=None,
default_headers=None,
)
def test_init_with_placeholder_api_key(monkeypatch):
# Placeholder API key should trigger DefaultAzureCredential
config = AzureOpenAIConfig(model=MODEL)
config.azure_kwargs.api_key = "your-api-key"
config.azure_kwargs.azure_deployment = "dep"
config.azure_kwargs.azure_endpoint = "https://endpoint"
config.azure_kwargs.api_version = "2024-04-04"
config.azure_kwargs.default_headers = None
config.http_client = None
with (
patch("mem0.llms.azure_openai.DefaultAzureCredential") as mock_cred,
patch("mem0.llms.azure_openai.get_bearer_token_provider") as mock_token_provider,
patch("mem0.llms.azure_openai.AzureOpenAI") as mock_azure_openai,
):
mock_cred_instance = mock_cred.return_value
mock_token_provider.return_value = "token-provider"
AzureOpenAILLM(config)
mock_cred.assert_called_once()
mock_token_provider.assert_called_once_with(mock_cred_instance, "https://cognitiveservices.azure.com/.default")
mock_azure_openai.assert_called_once_with(
azure_deployment="dep",
azure_endpoint="https://endpoint",
azure_ad_token_provider="token-provider",
api_version="2024-04-04",
api_key=None,
http_client=None,
default_headers=None,
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_gemini.py | tests/llms/test_gemini.py | from unittest.mock import Mock, patch
import pytest
from google.genai import types
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.gemini import GeminiLLM
@pytest.fixture
def mock_gemini_client():
with patch("mem0.llms.gemini.genai.Client") as mock_client_class:
mock_client = Mock()
mock_client_class.return_value = mock_client
yield mock_client
def test_generate_response_without_tools(mock_gemini_client: Mock):
config = BaseLlmConfig(model="gemini-2.0-flash-latest", temperature=0.7, max_tokens=100, top_p=1.0)
llm = GeminiLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_part = Mock(text="I'm doing well, thank you for asking!")
mock_content = Mock(parts=[mock_part])
mock_candidate = Mock(content=mock_content)
mock_response = Mock(candidates=[mock_candidate])
mock_gemini_client.models.generate_content.return_value = mock_response
response = llm.generate_response(messages)
# Check the actual call - system instruction is now in config
mock_gemini_client.models.generate_content.assert_called_once()
call_args = mock_gemini_client.models.generate_content.call_args
# Verify model and contents
assert call_args.kwargs["model"] == "gemini-2.0-flash-latest"
assert len(call_args.kwargs["contents"]) == 1 # Only user message
# Verify config has system instruction
config_arg = call_args.kwargs["config"]
assert config_arg.system_instruction == "You are a helpful assistant."
assert config_arg.temperature == 0.7
assert config_arg.max_output_tokens == 100
assert config_arg.top_p == 1.0
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_gemini_client: Mock):
config = BaseLlmConfig(model="gemini-1.5-flash-latest", temperature=0.7, max_tokens=100, top_p=1.0)
llm = GeminiLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [
{
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
},
},
}
]
mock_tool_call = Mock()
mock_tool_call.name = "add_memory"
mock_tool_call.args = {"data": "Today is a sunny day."}
# Create mock parts with both text and function_call
mock_text_part = Mock()
mock_text_part.text = "I've added the memory for you."
mock_text_part.function_call = None
mock_func_part = Mock()
mock_func_part.text = None
mock_func_part.function_call = mock_tool_call
mock_content = Mock()
mock_content.parts = [mock_text_part, mock_func_part]
mock_candidate = Mock()
mock_candidate.content = mock_content
mock_response = Mock(candidates=[mock_candidate])
mock_gemini_client.models.generate_content.return_value = mock_response
response = llm.generate_response(messages, tools=tools)
# Check the actual call
mock_gemini_client.models.generate_content.assert_called_once()
call_args = mock_gemini_client.models.generate_content.call_args
# Verify model and contents
assert call_args.kwargs["model"] == "gemini-1.5-flash-latest"
assert len(call_args.kwargs["contents"]) == 1 # Only user message
# Verify config has system instruction and tools
config_arg = call_args.kwargs["config"]
assert config_arg.system_instruction == "You are a helpful assistant."
assert config_arg.temperature == 0.7
assert config_arg.max_output_tokens == 100
assert config_arg.top_p == 1.0
assert len(config_arg.tools) == 1
assert config_arg.tool_config.function_calling_config.mode == types.FunctionCallingConfigMode.AUTO
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_lm_studio.py | tests/llms/test_lm_studio.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.llms.lmstudio import LMStudioConfig
from mem0.llms.lmstudio import LMStudioLLM
@pytest.fixture
def mock_lm_studio_client():
with patch("mem0.llms.lmstudio.OpenAI") as mock_openai: # Corrected path
mock_client = Mock()
mock_client.chat.completions.create.return_value = Mock(
choices=[Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
)
mock_openai.return_value = mock_client
yield mock_client
def test_generate_response_without_tools(mock_lm_studio_client):
config = LMStudioConfig(
model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf",
temperature=0.7,
max_tokens=100,
top_p=1.0,
)
llm = LMStudioLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
response = llm.generate_response(messages)
mock_lm_studio_client.chat.completions.create.assert_called_once_with(
model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
response_format={"type": "json_object"},
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_specifying_response_format(mock_lm_studio_client):
config = LMStudioConfig(
model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf",
temperature=0.7,
max_tokens=100,
top_p=1.0,
lmstudio_response_format={"type": "json_schema"}, # Specifying the response format in config
)
llm = LMStudioLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
response = llm.generate_response(messages)
mock_lm_studio_client.chat.completions.create.assert_called_once_with(
model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
response_format={"type": "json_schema"},
)
assert response == "I'm doing well, thank you for asking!"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_langchain.py | tests/llms/test_langchain.py | from unittest.mock import Mock
import pytest
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.langchain import LangchainLLM
# Add the import for BaseChatModel
try:
from langchain.chat_models.base import BaseChatModel
except ImportError:
from unittest.mock import MagicMock
BaseChatModel = MagicMock
@pytest.fixture
def mock_langchain_model():
"""Mock a Langchain model for testing."""
mock_model = Mock(spec=BaseChatModel)
mock_model.invoke.return_value = Mock(content="This is a test response")
return mock_model
def test_langchain_initialization(mock_langchain_model):
"""Test that LangchainLLM initializes correctly with a valid model."""
# Create a config with the model instance directly
config = BaseLlmConfig(model=mock_langchain_model, temperature=0.7, max_tokens=100, api_key="test-api-key")
# Initialize the LangchainLLM
llm = LangchainLLM(config)
# Verify the model was correctly assigned
assert llm.langchain_model == mock_langchain_model
def test_generate_response(mock_langchain_model):
"""Test that generate_response correctly processes messages and returns a response."""
# Create a config with the model instance
config = BaseLlmConfig(model=mock_langchain_model, temperature=0.7, max_tokens=100, api_key="test-api-key")
# Initialize the LangchainLLM
llm = LangchainLLM(config)
# Create test messages
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
{"role": "assistant", "content": "I'm doing well! How can I help you?"},
{"role": "user", "content": "Tell me a joke."},
]
# Get response
response = llm.generate_response(messages)
# Verify the correct message format was passed to the model
expected_langchain_messages = [
("system", "You are a helpful assistant."),
("human", "Hello, how are you?"),
("ai", "I'm doing well! How can I help you?"),
("human", "Tell me a joke."),
]
mock_langchain_model.invoke.assert_called_once()
# Extract the first argument of the first call
actual_messages = mock_langchain_model.invoke.call_args[0][0]
assert actual_messages == expected_langchain_messages
assert response == "This is a test response"
def test_generate_response_with_tools(mock_langchain_model):
config = BaseLlmConfig(model=mock_langchain_model, temperature=0.7, max_tokens=100, api_key="test-api-key")
llm = LangchainLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [
{
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
},
},
}
]
mock_response = Mock()
mock_response.content = "I've added the memory for you."
mock_tool_call = Mock()
mock_tool_call.__getitem__ = Mock(
side_effect={"name": "add_memory", "args": {"data": "Today is a sunny day."}}.__getitem__
)
mock_response.tool_calls = [mock_tool_call]
mock_langchain_model.invoke.return_value = mock_response
mock_langchain_model.bind_tools.return_value = mock_langchain_model
response = llm.generate_response(messages, tools=tools)
mock_langchain_model.invoke.assert_called_once()
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
def test_invalid_model():
"""Test that LangchainLLM raises an error with an invalid model."""
config = BaseLlmConfig(model="not-a-valid-model-instance", temperature=0.7, max_tokens=100, api_key="test-api-key")
with pytest.raises(ValueError, match="`model` must be an instance of BaseChatModel"):
LangchainLLM(config)
def test_missing_model():
"""Test that LangchainLLM raises an error when model is None."""
config = BaseLlmConfig(model=None, temperature=0.7, max_tokens=100, api_key="test-api-key")
with pytest.raises(ValueError, match="`model` parameter is required"):
LangchainLLM(config)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_litellm.py | tests/llms/test_litellm.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms import litellm
@pytest.fixture
def mock_litellm():
with patch("mem0.llms.litellm.litellm") as mock_litellm:
yield mock_litellm
def test_generate_response_with_unsupported_model(mock_litellm):
config = BaseLlmConfig(model="unsupported-model", temperature=0.7, max_tokens=100, top_p=1)
llm = litellm.LiteLLM(config)
messages = [{"role": "user", "content": "Hello"}]
mock_litellm.supports_function_calling.return_value = False
with pytest.raises(ValueError, match="Model 'unsupported-model' in litellm does not support function calling."):
llm.generate_response(messages)
def test_generate_response_without_tools(mock_litellm):
config = BaseLlmConfig(model="gpt-4.1-nano-2025-04-14", temperature=0.7, max_tokens=100, top_p=1)
llm = litellm.LiteLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
mock_litellm.completion.return_value = mock_response
mock_litellm.supports_function_calling.return_value = True
response = llm.generate_response(messages)
mock_litellm.completion.assert_called_once_with(
model="gpt-4.1-nano-2025-04-14", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_litellm):
config = BaseLlmConfig(model="gpt-4.1-nano-2025-04-14", temperature=0.7, max_tokens=100, top_p=1)
llm = litellm.LiteLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [
{
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
},
},
}
]
mock_response = Mock()
mock_message = Mock()
mock_message.content = "I've added the memory for you."
mock_tool_call = Mock()
mock_tool_call.function.name = "add_memory"
mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}'
mock_message.tool_calls = [mock_tool_call]
mock_response.choices = [Mock(message=mock_message)]
mock_litellm.completion.return_value = mock_response
mock_litellm.supports_function_calling.return_value = True
response = llm.generate_response(messages, tools=tools)
mock_litellm.completion.assert_called_once_with(
model="gpt-4.1-nano-2025-04-14", messages=messages, temperature=0.7, max_tokens=100, top_p=1, tools=tools, tool_choice="auto"
)
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/llms/test_ollama.py | tests/llms/test_ollama.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.llms.ollama import OllamaConfig
from mem0.llms.ollama import OllamaLLM
@pytest.fixture
def mock_ollama_client():
with patch("mem0.llms.ollama.Client") as mock_ollama:
mock_client = Mock()
mock_client.list.return_value = {"models": [{"name": "llama3.1:70b"}]}
mock_ollama.return_value = mock_client
yield mock_client
def test_generate_response_without_tools(mock_ollama_client):
config = OllamaConfig(model="llama3.1:70b", temperature=0.7, max_tokens=100, top_p=1.0)
llm = OllamaLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
mock_response = {"message": {"content": "I'm doing well, thank you for asking!"}}
mock_ollama_client.chat.return_value = mock_response
response = llm.generate_response(messages)
mock_ollama_client.chat.assert_called_once_with(
model="llama3.1:70b", messages=messages, options={"temperature": 0.7, "num_predict": 100, "top_p": 1.0}
)
assert response == "I'm doing well, thank you for asking!"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_pinecone.py | tests/vector_stores/test_pinecone.py | from unittest.mock import MagicMock
import pytest
from mem0.vector_stores.pinecone import PineconeDB
@pytest.fixture
def mock_pinecone_client():
client = MagicMock()
client.Index.return_value = MagicMock()
client.list_indexes.return_value.names.return_value = []
return client
@pytest.fixture
def pinecone_db(mock_pinecone_client):
return PineconeDB(
collection_name="test_index",
embedding_model_dims=128,
client=mock_pinecone_client,
api_key="fake_api_key",
environment="us-west1-gcp",
serverless_config=None,
pod_config=None,
hybrid_search=False,
metric="cosine",
batch_size=100,
extra_params=None,
namespace="test_namespace",
)
def test_create_col_existing_index(mock_pinecone_client):
# Set up the mock before creating the PineconeDB object
mock_pinecone_client.list_indexes.return_value.names.return_value = ["test_index"]
pinecone_db = PineconeDB(
collection_name="test_index",
embedding_model_dims=128,
client=mock_pinecone_client,
api_key="fake_api_key",
environment="us-west1-gcp",
serverless_config=None,
pod_config=None,
hybrid_search=False,
metric="cosine",
batch_size=100,
extra_params=None,
namespace="test_namespace",
)
# Reset the mock to verify it wasn't called during the test
mock_pinecone_client.create_index.reset_mock()
pinecone_db.create_col(128, "cosine")
mock_pinecone_client.create_index.assert_not_called()
def test_create_col_new_index(pinecone_db, mock_pinecone_client):
mock_pinecone_client.list_indexes.return_value.names.return_value = []
pinecone_db.create_col(128, "cosine")
mock_pinecone_client.create_index.assert_called()
def test_insert_vectors(pinecone_db):
vectors = [[0.1] * 128, [0.2] * 128]
payloads = [{"name": "vector1"}, {"name": "vector2"}]
ids = ["id1", "id2"]
pinecone_db.insert(vectors, payloads, ids)
pinecone_db.index.upsert.assert_called_with(
vectors=[
{"id": "id1", "values": [0.1] * 128, "metadata": {"name": "vector1"}},
{"id": "id2", "values": [0.2] * 128, "metadata": {"name": "vector2"}},
],
namespace="test_namespace",
)
def test_search_vectors(pinecone_db):
pinecone_db.index.query.return_value.matches = [{"id": "id1", "score": 0.9, "metadata": {"name": "vector1"}}]
results = pinecone_db.search("test query", [0.1] * 128, limit=1)
pinecone_db.index.query.assert_called_with(
vector=[0.1] * 128,
top_k=1,
include_metadata=True,
include_values=False,
namespace="test_namespace",
)
assert len(results) == 1
assert results[0].id == "id1"
assert results[0].score == 0.9
def test_update_vector(pinecone_db):
pinecone_db.update("id1", vector=[0.5] * 128, payload={"name": "updated"})
pinecone_db.index.upsert.assert_called_with(
vectors=[{"id": "id1", "values": [0.5] * 128, "metadata": {"name": "updated"}}],
namespace="test_namespace",
)
def test_get_vector_found(pinecone_db):
# Looking at the _parse_output method, it expects a Vector object
# or a list of dictionaries, not a dictionary with an 'id' field
# Create a mock Vector object
from pinecone import Vector
mock_vector = Vector(id="id1", values=[0.1] * 128, metadata={"name": "vector1"})
# Mock the fetch method to return the mock response object
mock_response = MagicMock()
mock_response.vectors = {"id1": mock_vector}
pinecone_db.index.fetch.return_value = mock_response
result = pinecone_db.get("id1")
pinecone_db.index.fetch.assert_called_with(ids=["id1"], namespace="test_namespace")
assert result is not None
assert result.id == "id1"
assert result.payload == {"name": "vector1"}
def test_delete_vector(pinecone_db):
pinecone_db.delete("id1")
pinecone_db.index.delete.assert_called_with(ids=["id1"], namespace="test_namespace")
def test_get_vector_not_found(pinecone_db):
pinecone_db.index.fetch.return_value.vectors = {}
result = pinecone_db.get("id1")
pinecone_db.index.fetch.assert_called_with(ids=["id1"], namespace="test_namespace")
assert result is None
def test_list_cols(pinecone_db):
pinecone_db.list_cols()
pinecone_db.client.list_indexes.assert_called()
def test_delete_col(pinecone_db):
pinecone_db.delete_col()
pinecone_db.client.delete_index.assert_called_with("test_index")
def test_col_info(pinecone_db):
pinecone_db.col_info()
pinecone_db.client.describe_index.assert_called_with("test_index")
def test_count_with_namespace(pinecone_db):
stats_mock = MagicMock()
stats_mock.namespaces = {"test_namespace": MagicMock(vector_count=10)}
pinecone_db.index.describe_index_stats.return_value = stats_mock
count = pinecone_db.count()
assert count == 10
pinecone_db.index.describe_index_stats.assert_called_once()
def test_count_without_namespace(pinecone_db):
pinecone_db.namespace = None
stats_mock = MagicMock()
stats_mock.total_vector_count = 20
pinecone_db.index.describe_index_stats.return_value = stats_mock
count = pinecone_db.count()
assert count == 20
pinecone_db.index.describe_index_stats.assert_called_once()
def test_count_with_non_existent_namespace(pinecone_db):
stats_mock = MagicMock()
stats_mock.namespaces = {"another_namespace": MagicMock(vector_count=5)}
pinecone_db.index.describe_index_stats.return_value = stats_mock
count = pinecone_db.count()
assert count == 0
pinecone_db.index.describe_index_stats.assert_called_once()
def test_count_with_none_vector_count(pinecone_db):
stats_mock = MagicMock()
stats_mock.namespaces = {"test_namespace": MagicMock(vector_count=None)}
pinecone_db.index.describe_index_stats.return_value = stats_mock
count = pinecone_db.count()
assert count == 0
pinecone_db.index.describe_index_stats.assert_called_once()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_azure_ai_search.py | tests/vector_stores/test_azure_ai_search.py | import json
from unittest.mock import MagicMock, Mock, patch
import pytest
from azure.core.exceptions import HttpResponseError
from mem0.configs.vector_stores.azure_ai_search import AzureAISearchConfig
# Import the AzureAISearch class and related models
from mem0.vector_stores.azure_ai_search import AzureAISearch
# Fixture to patch SearchClient and SearchIndexClient and create an instance of AzureAISearch.
@pytest.fixture
def mock_clients():
with (
patch("mem0.vector_stores.azure_ai_search.SearchClient") as MockSearchClient,
patch("mem0.vector_stores.azure_ai_search.SearchIndexClient") as MockIndexClient,
patch("mem0.vector_stores.azure_ai_search.AzureKeyCredential") as MockAzureKeyCredential,
):
# Create mocked instances for search and index clients.
mock_search_client = MockSearchClient.return_value
mock_index_client = MockIndexClient.return_value
# Mock the client._client._config.user_agent_policy.add_user_agent
mock_search_client._client = MagicMock()
mock_search_client._client._config.user_agent_policy.add_user_agent = Mock()
mock_index_client._client = MagicMock()
mock_index_client._client._config.user_agent_policy.add_user_agent = Mock()
# Stub required methods on search_client.
mock_search_client.upload_documents = Mock()
mock_search_client.upload_documents.return_value = [{"status": True, "id": "doc1"}]
mock_search_client.search = Mock()
mock_search_client.delete_documents = Mock()
mock_search_client.delete_documents.return_value = [{"status": True, "id": "doc1"}]
mock_search_client.merge_or_upload_documents = Mock()
mock_search_client.merge_or_upload_documents.return_value = [{"status": True, "id": "doc1"}]
mock_search_client.get_document = Mock()
mock_search_client.close = Mock()
# Stub required methods on index_client.
mock_index_client.create_or_update_index = Mock()
mock_index_client.list_indexes = Mock()
mock_index_client.list_index_names = Mock(return_value=[])
mock_index_client.delete_index = Mock()
# For col_info() we assume get_index returns an object with name and fields attributes.
fake_index = Mock()
fake_index.name = "test-index"
fake_index.fields = ["id", "vector", "payload", "user_id", "run_id", "agent_id"]
mock_index_client.get_index = Mock(return_value=fake_index)
mock_index_client.close = Mock()
yield mock_search_client, mock_index_client, MockAzureKeyCredential
@pytest.fixture
def azure_ai_search_instance(mock_clients):
mock_search_client, mock_index_client, _ = mock_clients
# Create an instance with dummy parameters.
instance = AzureAISearch(
service_name="test-service",
collection_name="test-index",
api_key="test-api-key",
embedding_model_dims=3,
compression_type="binary", # testing binary quantization option
use_float16=True,
)
# Return instance and clients for verification.
return instance, mock_search_client, mock_index_client
# --- Tests for AzureAISearchConfig ---
def test_config_validation_valid():
"""Test valid configurations are accepted."""
# Test minimal configuration
config = AzureAISearchConfig(service_name="test-service", api_key="test-api-key", embedding_model_dims=768)
assert config.collection_name == "mem0" # Default value
assert config.service_name == "test-service"
assert config.api_key == "test-api-key"
assert config.embedding_model_dims == 768
assert config.compression_type is None
assert config.use_float16 is False
# Test with all optional parameters
config = AzureAISearchConfig(
collection_name="custom-index",
service_name="test-service",
api_key="test-api-key",
embedding_model_dims=1536,
compression_type="scalar",
use_float16=True,
)
assert config.collection_name == "custom-index"
assert config.compression_type == "scalar"
assert config.use_float16 is True
def test_config_validation_invalid_compression_type():
"""Test that invalid compression types are rejected."""
with pytest.raises(ValueError) as exc_info:
AzureAISearchConfig(
service_name="test-service",
api_key="test-api-key",
embedding_model_dims=768,
compression_type="invalid-type", # Not a valid option
)
assert "Invalid compression_type" in str(exc_info.value)
def test_config_validation_deprecated_use_compression():
"""Test that using the deprecated use_compression parameter raises an error."""
with pytest.raises(ValueError) as exc_info:
AzureAISearchConfig(
service_name="test-service",
api_key="test-api-key",
embedding_model_dims=768,
use_compression=True, # Deprecated parameter
)
# Fix: Use a partial string match instead of exact match
assert "use_compression" in str(exc_info.value)
assert "no longer supported" in str(exc_info.value)
def test_config_validation_extra_fields():
"""Test that extra fields are rejected."""
with pytest.raises(ValueError) as exc_info:
AzureAISearchConfig(
service_name="test-service",
api_key="test-api-key",
embedding_model_dims=768,
unknown_parameter="value", # Extra field
)
assert "Extra fields not allowed" in str(exc_info.value)
assert "unknown_parameter" in str(exc_info.value)
# --- Tests for AzureAISearch initialization ---
def test_initialization(mock_clients):
"""Test AzureAISearch initialization with different parameters."""
mock_search_client, mock_index_client, mock_azure_key_credential = mock_clients
# Test with minimal parameters
instance = AzureAISearch(
service_name="test-service", collection_name="test-index", api_key="test-api-key", embedding_model_dims=768
)
# Verify initialization parameters
assert instance.index_name == "test-index"
assert instance.collection_name == "test-index"
assert instance.embedding_model_dims == 768
assert instance.compression_type == "none" # Default when None is passed
assert instance.use_float16 is False
# Verify client creation
mock_azure_key_credential.assert_called_with("test-api-key")
assert "mem0" in mock_search_client._client._config.user_agent_policy.add_user_agent.call_args[0]
assert "mem0" in mock_index_client._client._config.user_agent_policy.add_user_agent.call_args[0]
# Verify index creation was called
mock_index_client.create_or_update_index.assert_called_once()
def test_initialization_with_compression_types(mock_clients):
"""Test initialization with different compression types."""
mock_search_client, mock_index_client, _ = mock_clients
# Test with scalar compression
instance = AzureAISearch(
service_name="test-service",
collection_name="scalar-index",
api_key="test-api-key",
embedding_model_dims=768,
compression_type="scalar",
)
assert instance.compression_type == "scalar"
# Capture the index creation call
args, _ = mock_index_client.create_or_update_index.call_args_list[-1]
index = args[0]
# Verify scalar compression was configured
assert hasattr(index.vector_search, "compressions")
assert len(index.vector_search.compressions) > 0
assert "ScalarQuantizationCompression" in str(type(index.vector_search.compressions[0]))
# Test with binary compression
instance = AzureAISearch(
service_name="test-service",
collection_name="binary-index",
api_key="test-api-key",
embedding_model_dims=768,
compression_type="binary",
)
assert instance.compression_type == "binary"
# Capture the index creation call
args, _ = mock_index_client.create_or_update_index.call_args_list[-1]
index = args[0]
# Verify binary compression was configured
assert hasattr(index.vector_search, "compressions")
assert len(index.vector_search.compressions) > 0
assert "BinaryQuantizationCompression" in str(type(index.vector_search.compressions[0]))
# Test with no compression
instance = AzureAISearch(
service_name="test-service",
collection_name="no-compression-index",
api_key="test-api-key",
embedding_model_dims=768,
compression_type=None,
)
assert instance.compression_type == "none"
# Capture the index creation call
args, _ = mock_index_client.create_or_update_index.call_args_list[-1]
index = args[0]
# Verify no compression was configured
assert hasattr(index.vector_search, "compressions")
assert len(index.vector_search.compressions) == 0
def test_initialization_with_float_precision(mock_clients):
"""Test initialization with different float precision settings."""
mock_search_client, mock_index_client, _ = mock_clients
# Test with half precision (float16)
instance = AzureAISearch(
service_name="test-service",
collection_name="float16-index",
api_key="test-api-key",
embedding_model_dims=768,
use_float16=True,
)
assert instance.use_float16 is True
# Capture the index creation call
args, _ = mock_index_client.create_or_update_index.call_args_list[-1]
index = args[0]
# Find the vector field and check its type
vector_field = next((f for f in index.fields if f.name == "vector"), None)
assert vector_field is not None
assert "Edm.Half" in vector_field.type
# Test with full precision (float32)
instance = AzureAISearch(
service_name="test-service",
collection_name="float32-index",
api_key="test-api-key",
embedding_model_dims=768,
use_float16=False,
)
assert instance.use_float16 is False
# Capture the index creation call
args, _ = mock_index_client.create_or_update_index.call_args_list[-1]
index = args[0]
# Find the vector field and check its type
vector_field = next((f for f in index.fields if f.name == "vector"), None)
assert vector_field is not None
assert "Edm.Single" in vector_field.type
# --- Tests for create_col method ---
def test_create_col(azure_ai_search_instance):
"""Test the create_col method creates an index with the correct configuration."""
instance, _, mock_index_client = azure_ai_search_instance
# create_col is called during initialization, so we check the call that was already made
mock_index_client.create_or_update_index.assert_called_once()
# Verify the index configuration
args, _ = mock_index_client.create_or_update_index.call_args
index = args[0]
# Check basic properties
assert index.name == "test-index"
assert len(index.fields) == 6 # id, user_id, run_id, agent_id, vector, payload
# Check that required fields are present
field_names = [f.name for f in index.fields]
assert "id" in field_names
assert "vector" in field_names
assert "payload" in field_names
assert "user_id" in field_names
assert "run_id" in field_names
assert "agent_id" in field_names
# Check that id is the key field
id_field = next(f for f in index.fields if f.name == "id")
assert id_field.key is True
# Check vector search configuration
assert index.vector_search is not None
assert len(index.vector_search.profiles) == 1
assert index.vector_search.profiles[0].name == "my-vector-config"
assert index.vector_search.profiles[0].algorithm_configuration_name == "my-algorithms-config"
# Check algorithms
assert len(index.vector_search.algorithms) == 1
assert index.vector_search.algorithms[0].name == "my-algorithms-config"
assert "HnswAlgorithmConfiguration" in str(type(index.vector_search.algorithms[0]))
# With binary compression and float16, we should have compression configuration
assert len(index.vector_search.compressions) == 1
assert index.vector_search.compressions[0].compression_name == "myCompression"
assert "BinaryQuantizationCompression" in str(type(index.vector_search.compressions[0]))
def test_create_col_scalar_compression(mock_clients):
"""Test creating a collection with scalar compression."""
mock_search_client, mock_index_client, _ = mock_clients
AzureAISearch(
service_name="test-service",
collection_name="scalar-index",
api_key="test-api-key",
embedding_model_dims=768,
compression_type="scalar",
)
# Verify the index configuration
args, _ = mock_index_client.create_or_update_index.call_args
index = args[0]
# Check compression configuration
assert len(index.vector_search.compressions) == 1
assert index.vector_search.compressions[0].compression_name == "myCompression"
assert "ScalarQuantizationCompression" in str(type(index.vector_search.compressions[0]))
# Check profile references compression
assert index.vector_search.profiles[0].compression_name == "myCompression"
def test_create_col_no_compression(mock_clients):
"""Test creating a collection with no compression."""
mock_search_client, mock_index_client, _ = mock_clients
AzureAISearch(
service_name="test-service",
collection_name="no-compression-index",
api_key="test-api-key",
embedding_model_dims=768,
compression_type=None,
)
# Verify the index configuration
args, _ = mock_index_client.create_or_update_index.call_args
index = args[0]
# Check compression configuration - should be empty
assert len(index.vector_search.compressions) == 0
# Check profile doesn't reference compression
assert index.vector_search.profiles[0].compression_name is None
# --- Tests for insert method ---
def test_insert_single(azure_ai_search_instance):
"""Test inserting a single vector."""
instance, mock_search_client, _ = azure_ai_search_instance
vectors = [[0.1, 0.2, 0.3]]
payloads = [{"user_id": "user1", "run_id": "run1", "agent_id": "agent1"}]
ids = ["doc1"]
# Fix: Include status_code: 201 in mock response
mock_search_client.upload_documents.return_value = [{"status": True, "id": "doc1", "status_code": 201}]
instance.insert(vectors, payloads, ids)
# Verify upload_documents was called correctly
mock_search_client.upload_documents.assert_called_once()
args, _ = mock_search_client.upload_documents.call_args
documents = args[0]
# Verify document structure
assert len(documents) == 1
assert documents[0]["id"] == "doc1"
assert documents[0]["vector"] == [0.1, 0.2, 0.3]
assert documents[0]["payload"] == json.dumps(payloads[0])
assert documents[0]["user_id"] == "user1"
assert documents[0]["run_id"] == "run1"
assert documents[0]["agent_id"] == "agent1"
def test_insert_multiple(azure_ai_search_instance):
"""Test inserting multiple vectors in one call."""
instance, mock_search_client, _ = azure_ai_search_instance
# Create multiple vectors
num_docs = 3
vectors = [[float(i) / 10, float(i + 1) / 10, float(i + 2) / 10] for i in range(num_docs)]
payloads = [{"user_id": f"user{i}", "content": f"Test content {i}"} for i in range(num_docs)]
ids = [f"doc{i}" for i in range(num_docs)]
# Configure mock to return success for all documents (fix: add status_code 201)
mock_search_client.upload_documents.return_value = [
{"status": True, "id": id_val, "status_code": 201} for id_val in ids
]
# Insert the documents
instance.insert(vectors, payloads, ids)
# Verify upload_documents was called with correct documents
mock_search_client.upload_documents.assert_called_once()
args, _ = mock_search_client.upload_documents.call_args
documents = args[0]
# Verify all documents were included
assert len(documents) == num_docs
# Check first document
assert documents[0]["id"] == "doc0"
assert documents[0]["vector"] == [0.0, 0.1, 0.2]
assert documents[0]["payload"] == json.dumps(payloads[0])
assert documents[0]["user_id"] == "user0"
# Check last document
assert documents[2]["id"] == "doc2"
assert documents[2]["vector"] == [0.2, 0.3, 0.4]
assert documents[2]["payload"] == json.dumps(payloads[2])
assert documents[2]["user_id"] == "user2"
def test_insert_with_error(azure_ai_search_instance):
"""Test insert when Azure returns an error for one or more documents."""
instance, mock_search_client, _ = azure_ai_search_instance
# Configure mock to return an error for one document
mock_search_client.upload_documents.return_value = [{"status": False, "id": "doc1", "errorMessage": "Azure error"}]
vectors = [[0.1, 0.2, 0.3]]
payloads = [{"user_id": "user1"}]
ids = ["doc1"]
# Insert should raise an exception
with pytest.raises(Exception) as exc_info:
instance.insert(vectors, payloads, ids)
assert "Insert failed for document doc1" in str(exc_info.value)
# Configure mock to return mixed success/failure for multiple documents
mock_search_client.upload_documents.return_value = [
{"status": True, "id": "doc1"}, # This should not cause failure
{"status": False, "id": "doc2", "errorMessage": "Azure error"},
]
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"user_id": "user1"}, {"user_id": "user2"}]
ids = ["doc1", "doc2"]
# Insert should raise an exception, but now check for doc2 failure
with pytest.raises(Exception) as exc_info:
instance.insert(vectors, payloads, ids)
assert "Insert failed for document doc2" in str(exc_info.value) or "Insert failed for document doc1" in str(
exc_info.value
)
def test_insert_with_missing_payload_fields(azure_ai_search_instance):
"""Test inserting with payloads missing some of the expected fields."""
instance, mock_search_client, _ = azure_ai_search_instance
vectors = [[0.1, 0.2, 0.3]]
payloads = [{"content": "Some content without user_id, run_id, or agent_id"}]
ids = ["doc1"]
# Mock successful response with a proper status_code
mock_search_client.upload_documents.return_value = [
{"id": "doc1", "status_code": 201} # Simulating a successful response
]
instance.insert(vectors, payloads, ids)
# Verify upload_documents was called correctly
mock_search_client.upload_documents.assert_called_once()
args, _ = mock_search_client.upload_documents.call_args
documents = args[0]
# Verify document has payload but not the extra fields
assert len(documents) == 1
assert documents[0]["id"] == "doc1"
assert documents[0]["vector"] == [0.1, 0.2, 0.3]
assert documents[0]["payload"] == json.dumps(payloads[0])
assert "user_id" not in documents[0]
assert "run_id" not in documents[0]
assert "agent_id" not in documents[0]
def test_insert_with_http_error(azure_ai_search_instance):
"""Test insert when Azure client throws an HTTP error."""
instance, mock_search_client, _ = azure_ai_search_instance
# Configure mock to raise an HttpResponseError
mock_search_client.upload_documents.side_effect = HttpResponseError("Azure service error")
vectors = [[0.1, 0.2, 0.3]]
payloads = [{"user_id": "user1"}]
ids = ["doc1"]
# Insert should propagate the HTTP error
with pytest.raises(HttpResponseError) as exc_info:
instance.insert(vectors, payloads, ids)
assert "Azure service error" in str(exc_info.value)
# --- Tests for search method ---
def test_search_basic(azure_ai_search_instance):
"""Test basic vector search without filters."""
instance, mock_search_client, _ = azure_ai_search_instance
# Ensure instance has a default vector_filter_mode
instance.vector_filter_mode = "preFilter"
# Configure mock to return search results
mock_search_client.search.return_value = [
{
"id": "doc1",
"@search.score": 0.95,
"payload": json.dumps({"content": "Test content"}),
}
]
# Search with a vector
query_text = "test query" # Add a query string
query_vector = [0.1, 0.2, 0.3]
results = instance.search(query_text, query_vector, limit=5) # Pass the query string
# Verify search was called correctly
mock_search_client.search.assert_called_once()
_, kwargs = mock_search_client.search.call_args
# Check parameters
assert len(kwargs["vector_queries"]) == 1
assert kwargs["vector_queries"][0].vector == query_vector
assert kwargs["vector_queries"][0].k_nearest_neighbors == 5
assert kwargs["vector_queries"][0].fields == "vector"
assert kwargs["filter"] is None # No filters
assert kwargs["top"] == 5
assert kwargs["vector_filter_mode"] == "preFilter" # Now correctly set
# Check results
assert len(results) == 1
assert results[0].id == "doc1"
assert results[0].score == 0.95
assert results[0].payload == {"content": "Test content"}
def test_init_with_valid_api_key(mock_clients):
"""Test __init__ with a valid API key and all required parameters."""
mock_search_client, mock_index_client, mock_azure_key_credential = mock_clients
instance = AzureAISearch(
service_name="test-service",
collection_name="test-index",
api_key="test-api-key",
embedding_model_dims=128,
compression_type="scalar",
use_float16=True,
hybrid_search=True,
vector_filter_mode="preFilter",
)
# Check attributes
assert instance.service_name == "test-service"
assert instance.api_key == "test-api-key"
assert instance.index_name == "test-index"
assert instance.collection_name == "test-index"
assert instance.embedding_model_dims == 128
assert instance.compression_type == "scalar"
assert instance.use_float16 is True
assert instance.hybrid_search is True
assert instance.vector_filter_mode == "preFilter"
# Check that AzureKeyCredential was used
mock_azure_key_credential.assert_called_with("test-api-key")
# Check that user agent was set
mock_search_client._client._config.user_agent_policy.add_user_agent.assert_called_with("mem0")
mock_index_client._client._config.user_agent_policy.add_user_agent.assert_called_with("mem0")
# Check that create_col was called if collection does not exist
mock_index_client.create_or_update_index.assert_called_once()
def test_init_with_default_api_key_triggers_default_credential(monkeypatch, mock_clients):
"""Test __init__ uses DefaultAzureCredential if api_key is None or placeholder."""
mock_search_client, mock_index_client, mock_azure_key_credential = mock_clients
# Patch DefaultAzureCredential to a mock so we can check if it's called
with patch("mem0.vector_stores.azure_ai_search.DefaultAzureCredential") as mock_default_cred:
# Test with api_key=None
AzureAISearch(
service_name="test-service",
collection_name="test-index",
api_key=None,
embedding_model_dims=64,
)
mock_default_cred.assert_called_once()
# Test with api_key=""
AzureAISearch(
service_name="test-service",
collection_name="test-index",
api_key="",
embedding_model_dims=64,
)
assert mock_default_cred.call_count == 2
# Test with api_key="your-api-key"
AzureAISearch(
service_name="test-service",
collection_name="test-index",
api_key="your-api-key",
embedding_model_dims=64,
)
assert mock_default_cred.call_count == 3
def test_init_sets_compression_type_to_none_if_unspecified(mock_clients):
"""Test __init__ sets compression_type to 'none' if not specified."""
mock_search_client, mock_index_client, _ = mock_clients
instance = AzureAISearch(
service_name="test-service",
collection_name="test-index",
api_key="test-api-key",
embedding_model_dims=32,
)
assert instance.compression_type == "none"
def test_init_does_not_create_col_if_collection_exists(mock_clients):
"""Test __init__ does not call create_col if collection already exists."""
mock_search_client, mock_index_client, _ = mock_clients
# Simulate collection already exists
mock_index_client.list_index_names.return_value = ["test-index"]
AzureAISearch(
service_name="test-service",
collection_name="test-index",
api_key="test-api-key",
embedding_model_dims=16,
)
# create_or_update_index should not be called since collection exists
mock_index_client.create_or_update_index.assert_not_called()
def test_init_calls_create_col_if_collection_missing(mock_clients):
"""Test __init__ calls create_col if collection does not exist."""
mock_search_client, mock_index_client, _ = mock_clients
# Simulate collection does not exist
mock_index_client.list_index_names.return_value = []
AzureAISearch(
service_name="test-service",
collection_name="missing-index",
api_key="test-api-key",
embedding_model_dims=16,
)
mock_index_client.create_or_update_index.assert_called_once()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_azure_mysql.py | tests/vector_stores/test_azure_mysql.py | import json
import pytest
from unittest.mock import Mock, patch
from mem0.vector_stores.azure_mysql import AzureMySQL, OutputData
@pytest.fixture
def mock_connection_pool():
"""Create a mock connection pool."""
pool = Mock()
conn = Mock()
cursor = Mock()
# Setup cursor mock
cursor.fetchall = Mock(return_value=[])
cursor.fetchone = Mock(return_value=None)
cursor.execute = Mock()
cursor.executemany = Mock()
cursor.close = Mock()
# Setup connection mock
conn.cursor = Mock(return_value=cursor)
conn.commit = Mock()
conn.rollback = Mock()
conn.close = Mock()
# Setup pool mock
pool.connection = Mock(return_value=conn)
pool.close = Mock()
return pool
@pytest.fixture
def azure_mysql_instance(mock_connection_pool):
"""Create an AzureMySQL instance with mocked connection pool."""
with patch('mem0.vector_stores.azure_mysql.PooledDB') as mock_pooled_db:
mock_pooled_db.return_value = mock_connection_pool
instance = AzureMySQL(
host="test-server.mysql.database.azure.com",
port=3306,
user="testuser",
password="testpass",
database="testdb",
collection_name="test_collection",
embedding_model_dims=128,
use_azure_credential=False,
ssl_disabled=True,
)
instance.connection_pool = mock_connection_pool
return instance
def test_azure_mysql_init(mock_connection_pool):
"""Test AzureMySQL initialization."""
with patch('mem0.vector_stores.azure_mysql.PooledDB') as mock_pooled_db:
mock_pooled_db.return_value = mock_connection_pool
instance = AzureMySQL(
host="test-server.mysql.database.azure.com",
port=3306,
user="testuser",
password="testpass",
database="testdb",
collection_name="test_collection",
embedding_model_dims=128,
)
assert instance.host == "test-server.mysql.database.azure.com"
assert instance.port == 3306
assert instance.user == "testuser"
assert instance.database == "testdb"
assert instance.collection_name == "test_collection"
assert instance.embedding_model_dims == 128
def test_create_col(azure_mysql_instance):
"""Test collection creation."""
azure_mysql_instance.create_col(name="new_collection", vector_size=256)
# Verify that execute was called (table creation)
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
assert cursor.execute.called
def test_insert(azure_mysql_instance):
"""Test vector insertion."""
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"text": "test1"}, {"text": "test2"}]
ids = ["id1", "id2"]
azure_mysql_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
assert cursor.executemany.called
def test_search(azure_mysql_instance):
"""Test vector search."""
# Mock the database response
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
cursor.fetchall = Mock(return_value=[
{
'id': 'id1',
'vector': json.dumps([0.1, 0.2, 0.3]),
'payload': json.dumps({"text": "test1"})
},
{
'id': 'id2',
'vector': json.dumps([0.4, 0.5, 0.6]),
'payload': json.dumps({"text": "test2"})
}
])
query_vector = [0.2, 0.3, 0.4]
results = azure_mysql_instance.search(query="test", vectors=query_vector, limit=5)
assert isinstance(results, list)
assert cursor.execute.called
def test_delete(azure_mysql_instance):
"""Test vector deletion."""
azure_mysql_instance.delete(vector_id="test_id")
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
assert cursor.execute.called
def test_update(azure_mysql_instance):
"""Test vector update."""
new_vector = [0.7, 0.8, 0.9]
new_payload = {"text": "updated"}
azure_mysql_instance.update(vector_id="test_id", vector=new_vector, payload=new_payload)
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
assert cursor.execute.called
def test_get(azure_mysql_instance):
"""Test retrieving a vector by ID."""
# Mock the database response
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
cursor.fetchone = Mock(return_value={
'id': 'test_id',
'vector': json.dumps([0.1, 0.2, 0.3]),
'payload': json.dumps({"text": "test"})
})
result = azure_mysql_instance.get(vector_id="test_id")
assert result is not None
assert isinstance(result, OutputData)
assert result.id == "test_id"
def test_list_cols(azure_mysql_instance):
"""Test listing collections."""
# Mock the database response
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
cursor.fetchall = Mock(return_value=[
{"Tables_in_testdb": "collection1"},
{"Tables_in_testdb": "collection2"}
])
collections = azure_mysql_instance.list_cols()
assert isinstance(collections, list)
assert len(collections) == 2
def test_delete_col(azure_mysql_instance):
"""Test collection deletion."""
azure_mysql_instance.delete_col()
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
assert cursor.execute.called
def test_col_info(azure_mysql_instance):
"""Test getting collection information."""
# Mock the database response
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
cursor.fetchone = Mock(return_value={
'name': 'test_collection',
'count': 100,
'size_mb': 1.5
})
info = azure_mysql_instance.col_info()
assert isinstance(info, dict)
assert cursor.execute.called
def test_list(azure_mysql_instance):
"""Test listing vectors."""
# Mock the database response
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
cursor.fetchall = Mock(return_value=[
{
'id': 'id1',
'vector': json.dumps([0.1, 0.2, 0.3]),
'payload': json.dumps({"text": "test1"})
}
])
results = azure_mysql_instance.list(limit=10)
assert isinstance(results, list)
assert len(results) > 0
def test_reset(azure_mysql_instance):
"""Test resetting the collection."""
azure_mysql_instance.reset()
conn = azure_mysql_instance.connection_pool.connection()
cursor = conn.cursor()
# Should call execute at least twice (drop and create)
assert cursor.execute.call_count >= 2
@pytest.mark.skipif(True, reason="Requires Azure credentials")
def test_azure_credential_authentication():
"""Test Azure DefaultAzureCredential authentication."""
with patch('mem0.vector_stores.azure_mysql.DefaultAzureCredential') as mock_cred:
mock_token = Mock()
mock_token.token = "test_token"
mock_cred.return_value.get_token.return_value = mock_token
instance = AzureMySQL(
host="test-server.mysql.database.azure.com",
port=3306,
user="testuser",
password=None,
database="testdb",
collection_name="test_collection",
embedding_model_dims=128,
use_azure_credential=True,
)
assert instance.password == "test_token"
def test_output_data_model():
"""Test OutputData model."""
data = OutputData(
id="test_id",
score=0.95,
payload={"text": "test"}
)
assert data.id == "test_id"
assert data.score == 0.95
assert data.payload == {"text": "test"}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_weaviate.py | tests/vector_stores/test_weaviate.py | import os
import uuid
import httpx
import unittest
from unittest.mock import MagicMock, patch
import dotenv
import weaviate
from weaviate.exceptions import UnexpectedStatusCodeException
from mem0.vector_stores.weaviate import Weaviate
class TestWeaviateDB(unittest.TestCase):
@classmethod
def setUpClass(cls):
dotenv.load_dotenv()
cls.original_env = {
"WEAVIATE_CLUSTER_URL": os.getenv("WEAVIATE_CLUSTER_URL", "http://localhost:8080"),
"WEAVIATE_API_KEY": os.getenv("WEAVIATE_API_KEY", "test_api_key"),
}
os.environ["WEAVIATE_CLUSTER_URL"] = "http://localhost:8080"
os.environ["WEAVIATE_API_KEY"] = "test_api_key"
def setUp(self):
self.client_mock = MagicMock(spec=weaviate.WeaviateClient)
self.client_mock.collections = MagicMock()
self.client_mock.collections.exists.return_value = False
self.client_mock.collections.create.return_value = None
self.client_mock.collections.delete.return_value = None
patcher = patch("mem0.vector_stores.weaviate.weaviate.connect_to_local", return_value=self.client_mock)
self.mock_weaviate = patcher.start()
self.addCleanup(patcher.stop)
self.weaviate_db = Weaviate(
collection_name="test_collection",
embedding_model_dims=1536,
cluster_url=os.getenv("WEAVIATE_CLUSTER_URL"),
auth_client_secret=os.getenv("WEAVIATE_API_KEY"),
additional_headers={"X-OpenAI-Api-Key": "test_key"},
)
self.client_mock.reset_mock()
@classmethod
def tearDownClass(cls):
for key, value in cls.original_env.items():
if value is not None:
os.environ[key] = value
else:
os.environ.pop(key, None)
def tearDown(self):
self.client_mock.reset_mock()
def test_create_col(self):
self.client_mock.collections.exists.return_value = False
self.weaviate_db.create_col(vector_size=1536)
self.client_mock.collections.create.assert_called_once()
self.client_mock.reset_mock()
self.client_mock.collections.exists.return_value = True
self.weaviate_db.create_col(vector_size=1536)
self.client_mock.collections.create.assert_not_called()
def test_insert(self):
self.client_mock.batch = MagicMock()
self.client_mock.batch.fixed_size.return_value.__enter__.return_value = MagicMock()
self.client_mock.collections.get.return_value.data.insert_many.return_value = {
"results": [{"id": "id1"}, {"id": "id2"}]
}
vectors = [[0.1] * 1536, [0.2] * 1536]
payloads = [{"key1": "value1"}, {"key2": "value2"}]
ids = [str(uuid.uuid4()), str(uuid.uuid4())]
self.weaviate_db.insert(vectors=vectors, payloads=payloads, ids=ids)
def test_get(self):
valid_uuid = str(uuid.uuid4())
mock_response = MagicMock()
mock_response.properties = {
"hash": "abc123",
"created_at": "2025-03-08T12:00:00Z",
"updated_at": "2025-03-08T13:00:00Z",
"user_id": "user_123",
"agent_id": "agent_456",
"run_id": "run_789",
"data": {"key": "value"},
"category": "test",
}
mock_response.uuid = valid_uuid
self.client_mock.collections.get.return_value.query.fetch_object_by_id.return_value = mock_response
result = self.weaviate_db.get(vector_id=valid_uuid)
assert result.id == valid_uuid
expected_payload = mock_response.properties.copy()
expected_payload["id"] = valid_uuid
assert result.payload == expected_payload
def test_get_not_found(self):
mock_response = httpx.Response(status_code=404, json={"error": "Not found"})
self.client_mock.collections.get.return_value.data.get_by_id.side_effect = UnexpectedStatusCodeException(
"Not found", mock_response
)
def test_search(self):
mock_objects = [{"uuid": "id1", "properties": {"key1": "value1"}, "metadata": {"distance": 0.2}}]
mock_response = MagicMock()
mock_response.objects = []
for obj in mock_objects:
mock_obj = MagicMock()
mock_obj.uuid = obj["uuid"]
mock_obj.properties = obj["properties"]
mock_obj.metadata = MagicMock()
mock_obj.metadata.distance = obj["metadata"]["distance"]
mock_response.objects.append(mock_obj)
mock_hybrid = MagicMock()
self.client_mock.collections.get.return_value.query.hybrid = mock_hybrid
mock_hybrid.return_value = mock_response
vectors = [[0.1] * 1536]
results = self.weaviate_db.search(query="", vectors=vectors, limit=5)
mock_hybrid.assert_called_once()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, "id1")
self.assertEqual(results[0].score, 0.8)
def test_delete(self):
self.weaviate_db.delete(vector_id="id1")
self.client_mock.collections.get.return_value.data.delete_by_id.assert_called_once_with("id1")
def test_list(self):
mock_objects = []
mock_obj1 = MagicMock()
mock_obj1.uuid = "id1"
mock_obj1.properties = {"key1": "value1"}
mock_objects.append(mock_obj1)
mock_obj2 = MagicMock()
mock_obj2.uuid = "id2"
mock_obj2.properties = {"key2": "value2"}
mock_objects.append(mock_obj2)
mock_response = MagicMock()
mock_response.objects = mock_objects
mock_fetch = MagicMock()
self.client_mock.collections.get.return_value.query.fetch_objects = mock_fetch
mock_fetch.return_value = mock_response
results = self.weaviate_db.list(limit=10)
mock_fetch.assert_called_once()
# Verify results
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0]), 2)
self.assertEqual(results[0][0].id, "id1")
self.assertEqual(results[0][0].payload["key1"], "value1")
self.assertEqual(results[0][1].id, "id2")
self.assertEqual(results[0][1].payload["key2"], "value2")
def test_list_cols(self):
mock_collection1 = MagicMock()
mock_collection1.name = "collection1"
mock_collection2 = MagicMock()
mock_collection2.name = "collection2"
self.client_mock.collections.list_all.return_value = [mock_collection1, mock_collection2]
result = self.weaviate_db.list_cols()
expected = {"collections": [{"name": "collection1"}, {"name": "collection2"}]}
assert result == expected
self.client_mock.collections.list_all.assert_called_once()
def test_delete_col(self):
self.weaviate_db.delete_col()
self.client_mock.collections.delete.assert_called_once_with("test_collection")
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_s3_vectors.py | tests/vector_stores/test_s3_vectors.py | from mem0.configs.vector_stores.s3_vectors import S3VectorsConfig
import pytest
from botocore.exceptions import ClientError
from mem0.memory.main import Memory
from mem0.vector_stores.s3_vectors import S3Vectors
BUCKET_NAME = "test-bucket"
INDEX_NAME = "test-index"
EMBEDDING_DIMS = 1536
REGION = "us-east-1"
@pytest.fixture
def mock_boto_client(mocker):
"""Fixture to mock the boto3 S3Vectors client."""
mock_client = mocker.MagicMock()
mocker.patch("boto3.client", return_value=mock_client)
return mock_client
@pytest.fixture
def mock_embedder(mocker):
mock_embedder = mocker.MagicMock()
mock_embedder.return_value.embed.return_value = [0.1, 0.2, 0.3]
mocker.patch("mem0.utils.factory.EmbedderFactory.create", mock_embedder)
return mock_embedder
@pytest.fixture
def mock_llm(mocker):
mock_llm = mocker.MagicMock()
mocker.patch("mem0.utils.factory.LlmFactory.create", mock_llm)
mocker.patch("mem0.memory.storage.SQLiteManager", mocker.MagicMock())
return mock_llm
def test_initialization_creates_resources(mock_boto_client):
"""Test that bucket and index are created if they don't exist."""
not_found_error = ClientError(
{"Error": {"Code": "NotFoundException"}}, "OperationName"
)
mock_boto_client.get_vector_bucket.side_effect = not_found_error
mock_boto_client.get_index.side_effect = not_found_error
S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
region_name=REGION,
)
mock_boto_client.create_vector_bucket.assert_called_once_with(
vectorBucketName=BUCKET_NAME
)
mock_boto_client.create_index.assert_called_once_with(
vectorBucketName=BUCKET_NAME,
indexName=INDEX_NAME,
dataType="float32",
dimension=EMBEDDING_DIMS,
distanceMetric="cosine",
)
def test_initialization_uses_existing_resources(mock_boto_client):
"""Test that existing bucket and index are used if found."""
mock_boto_client.get_vector_bucket.return_value = {}
mock_boto_client.get_index.return_value = {}
S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
region_name=REGION,
)
mock_boto_client.create_vector_bucket.assert_not_called()
mock_boto_client.create_index.assert_not_called()
def test_memory_initialization_with_config(mock_boto_client, mock_llm, mock_embedder):
"""Test Memory initialization with S3Vectors from config."""
# check that Attribute error is not raised
mock_boto_client.get_vector_bucket.return_value = {}
mock_boto_client.get_index.return_value = {}
config = {
"vector_store": {
"provider": "s3_vectors",
"config": {
"vector_bucket_name": BUCKET_NAME,
"collection_name": INDEX_NAME,
"embedding_model_dims": EMBEDDING_DIMS,
"distance_metric": "cosine",
"region_name": REGION,
},
}
}
try:
memory = Memory.from_config(config)
assert memory.vector_store is not None
assert isinstance(memory.vector_store, S3Vectors)
assert isinstance(memory.config.vector_store.config, S3VectorsConfig)
except AttributeError:
pytest.fail("Memory initialization failed")
def test_insert(mock_boto_client):
"""Test inserting vectors."""
store = S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
)
vectors = [[0.1, 0.2], [0.3, 0.4]]
payloads = [{"meta": "data1"}, {"meta": "data2"}]
ids = ["id1", "id2"]
store.insert(vectors, payloads, ids)
mock_boto_client.put_vectors.assert_called_once_with(
vectorBucketName=BUCKET_NAME,
indexName=INDEX_NAME,
vectors=[
{
"key": "id1",
"data": {"float32": [0.1, 0.2]},
"metadata": {"meta": "data1"},
},
{
"key": "id2",
"data": {"float32": [0.3, 0.4]},
"metadata": {"meta": "data2"},
},
],
)
def test_search(mock_boto_client):
"""Test searching for vectors."""
mock_boto_client.query_vectors.return_value = {
"vectors": [{"key": "id1", "distance": 0.9, "metadata": {"meta": "data1"}}]
}
store = S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
)
query_vector = [0.1, 0.2]
results = store.search(query="test", vectors=query_vector, limit=1)
mock_boto_client.query_vectors.assert_called_once()
assert len(results) == 1
assert results[0].id == "id1"
assert results[0].score == 0.9
def test_get(mock_boto_client):
"""Test retrieving a vector by ID."""
mock_boto_client.get_vectors.return_value = {
"vectors": [{"key": "id1", "metadata": {"meta": "data1"}}]
}
store = S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
)
result = store.get("id1")
mock_boto_client.get_vectors.assert_called_once_with(
vectorBucketName=BUCKET_NAME,
indexName=INDEX_NAME,
keys=["id1"],
returnData=False,
returnMetadata=True,
)
assert result.id == "id1"
assert result.payload["meta"] == "data1"
def test_delete(mock_boto_client):
"""Test deleting a vector."""
store = S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
)
store.delete("id1")
mock_boto_client.delete_vectors.assert_called_once_with(
vectorBucketName=BUCKET_NAME, indexName=INDEX_NAME, keys=["id1"]
)
def test_reset(mock_boto_client):
"""Test resetting the vector index."""
# GIVEN: The index does not exist, so it gets created on init and reset
not_found_error = ClientError(
{"Error": {"Code": "NotFoundException"}}, "OperationName"
)
mock_boto_client.get_index.side_effect = not_found_error
# WHEN: The store is initialized
store = S3Vectors(
vector_bucket_name=BUCKET_NAME,
collection_name=INDEX_NAME,
embedding_model_dims=EMBEDDING_DIMS,
)
# THEN: The index is created once during initialization
assert mock_boto_client.create_index.call_count == 1
# WHEN: The store is reset
store.reset()
# THEN: The index is deleted and then created again
mock_boto_client.delete_index.assert_called_once_with(
vectorBucketName=BUCKET_NAME, indexName=INDEX_NAME
)
assert mock_boto_client.create_index.call_count == 2
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_opensearch.py | tests/vector_stores/test_opensearch.py | import os
import threading
import unittest
from unittest.mock import MagicMock, patch
import dotenv
try:
from opensearchpy import AWSV4SignerAuth, OpenSearch
except ImportError:
raise ImportError("OpenSearch requires extra dependencies. Install with `pip install opensearch-py`") from None
from mem0 import Memory
from mem0.configs.base import MemoryConfig
from mem0.vector_stores.opensearch import OpenSearchDB
# Mock classes for testing OpenSearch with AWS authentication
class MockFieldInfo:
"""Mock pydantic field info."""
def __init__(self, default=None):
self.default = default
class MockOpenSearchConfig:
model_fields = {
'collection_name': MockFieldInfo(default="default_collection"),
'host': MockFieldInfo(default="localhost"),
'port': MockFieldInfo(default=9200),
'embedding_model_dims': MockFieldInfo(default=1536),
'http_auth': MockFieldInfo(default=None),
'auth': MockFieldInfo(default=None),
'credentials': MockFieldInfo(default=None),
'connection_class': MockFieldInfo(default=None),
'use_ssl': MockFieldInfo(default=False),
'verify_certs': MockFieldInfo(default=False),
}
def __init__(self, collection_name="test_collection", include_auth=True, **kwargs):
self.collection_name = collection_name
self.host = kwargs.get("host", "localhost")
self.port = kwargs.get("port", 9200)
self.embedding_model_dims = kwargs.get("embedding_model_dims", 1536)
self.use_ssl = kwargs.get("use_ssl", True)
self.verify_certs = kwargs.get("verify_certs", True)
if any(field in kwargs for field in ["http_auth", "auth", "credentials", "connection_class"]):
self.http_auth = kwargs.get("http_auth")
self.auth = kwargs.get("auth")
self.credentials = kwargs.get("credentials")
self.connection_class = kwargs.get("connection_class")
elif include_auth:
self.http_auth = MockAWSAuth()
self.auth = MockAWSAuth()
self.credentials = {"key": "value"}
self.connection_class = MockConnectionClass()
else:
self.http_auth = None
self.auth = None
self.credentials = None
self.connection_class = None
class MockAWSAuth:
def __init__(self):
self._lock = threading.Lock()
self.region = "us-east-1"
def __deepcopy__(self, memo):
raise TypeError("cannot pickle '_thread.lock' object")
class MockConnectionClass:
def __init__(self):
self._state = {"connected": False}
def __deepcopy__(self, memo):
raise TypeError("cannot pickle connection state")
class TestOpenSearchDB(unittest.TestCase):
@classmethod
def setUpClass(cls):
dotenv.load_dotenv()
cls.original_env = {
"OS_URL": os.getenv("OS_URL", "http://localhost:9200"),
"OS_USERNAME": os.getenv("OS_USERNAME", "test_user"),
"OS_PASSWORD": os.getenv("OS_PASSWORD", "test_password"),
}
os.environ["OS_URL"] = "http://localhost"
os.environ["OS_USERNAME"] = "test_user"
os.environ["OS_PASSWORD"] = "test_password"
def setUp(self):
self.client_mock = MagicMock(spec=OpenSearch)
self.client_mock.indices = MagicMock()
self.client_mock.indices.exists = MagicMock(return_value=False)
self.client_mock.indices.create = MagicMock()
self.client_mock.indices.delete = MagicMock()
self.client_mock.indices.get_alias = MagicMock()
self.client_mock.indices.refresh = MagicMock()
self.client_mock.get = MagicMock()
self.client_mock.update = MagicMock()
self.client_mock.delete = MagicMock()
self.client_mock.search = MagicMock()
self.client_mock.index = MagicMock(return_value={"_id": "doc1"})
patcher = patch("mem0.vector_stores.opensearch.OpenSearch", return_value=self.client_mock)
self.mock_os = patcher.start()
self.addCleanup(patcher.stop)
self.os_db = OpenSearchDB(
host=os.getenv("OS_URL"),
port=9200,
collection_name="test_collection",
embedding_model_dims=1536,
user=os.getenv("OS_USERNAME"),
password=os.getenv("OS_PASSWORD"),
verify_certs=False,
use_ssl=False,
)
self.client_mock.reset_mock()
@classmethod
def tearDownClass(cls):
for key, value in cls.original_env.items():
if value is not None:
os.environ[key] = value
else:
os.environ.pop(key, None)
def tearDown(self):
self.client_mock.reset_mock()
def test_create_index(self):
self.client_mock.indices.exists.return_value = False
self.os_db.create_index()
self.client_mock.indices.create.assert_called_once()
create_args = self.client_mock.indices.create.call_args[1]
self.assertEqual(create_args["index"], "test_collection")
mappings = create_args["body"]["mappings"]["properties"]
self.assertEqual(mappings["vector_field"]["type"], "knn_vector")
self.assertEqual(mappings["vector_field"]["dimension"], 1536)
self.client_mock.reset_mock()
self.client_mock.indices.exists.return_value = True
self.os_db.create_index()
self.client_mock.indices.create.assert_not_called()
def test_insert(self):
vectors = [[0.1] * 1536, [0.2] * 1536]
payloads = [{"key1": "value1"}, {"key2": "value2"}]
ids = ["id1", "id2"]
# Mock the index method
self.client_mock.index = MagicMock()
results = self.os_db.insert(vectors=vectors, payloads=payloads, ids=ids)
# Verify index was called twice (once for each vector)
self.assertEqual(self.client_mock.index.call_count, 2)
# Check first call
first_call = self.client_mock.index.call_args_list[0]
self.assertEqual(first_call[1]["index"], "test_collection")
self.assertEqual(first_call[1]["body"]["vector_field"], vectors[0])
self.assertEqual(first_call[1]["body"]["payload"], payloads[0])
self.assertEqual(first_call[1]["body"]["id"], ids[0])
# Check second call
second_call = self.client_mock.index.call_args_list[1]
self.assertEqual(second_call[1]["index"], "test_collection")
self.assertEqual(second_call[1]["body"]["vector_field"], vectors[1])
self.assertEqual(second_call[1]["body"]["payload"], payloads[1])
self.assertEqual(second_call[1]["body"]["id"], ids[1])
# Check results
self.assertEqual(len(results), 2)
self.assertEqual(results[0].id, "id1")
self.assertEqual(results[0].payload, payloads[0])
self.assertEqual(results[1].id, "id2")
self.assertEqual(results[1].payload, payloads[1])
def test_get(self):
mock_response = {"hits": {"hits": [{"_id": "doc1", "_source": {"id": "id1", "payload": {"key1": "value1"}}}]}}
self.client_mock.search.return_value = mock_response
result = self.os_db.get("id1")
self.client_mock.search.assert_called_once()
search_args = self.client_mock.search.call_args[1]
self.assertEqual(search_args["index"], "test_collection")
self.assertIsNotNone(result)
self.assertEqual(result.id, "id1")
self.assertEqual(result.payload, {"key1": "value1"})
# Test when no results are found
self.client_mock.search.return_value = {"hits": {"hits": []}}
result = self.os_db.get("nonexistent")
self.assertIsNone(result)
def test_update(self):
vector = [0.3] * 1536
payload = {"key3": "value3"}
mock_search_response = {"hits": {"hits": [{"_id": "doc1", "_source": {"id": "id1"}}]}}
self.client_mock.search.return_value = mock_search_response
self.os_db.update("id1", vector=vector, payload=payload)
self.client_mock.update.assert_called_once()
update_args = self.client_mock.update.call_args[1]
self.assertEqual(update_args["index"], "test_collection")
self.assertEqual(update_args["id"], "doc1")
self.assertEqual(update_args["body"], {"doc": {"vector_field": vector, "payload": payload}})
def test_list_cols(self):
self.client_mock.indices.get_alias.return_value = {"test_collection": {}}
result = self.os_db.list_cols()
self.client_mock.indices.get_alias.assert_called_once()
self.assertEqual(result, ["test_collection"])
def test_search(self):
mock_response = {
"hits": {
"hits": [
{
"_id": "id1",
"_score": 0.8,
"_source": {"vector_field": [0.1] * 1536, "id": "id1", "payload": {"key1": "value1"}},
}
]
}
}
self.client_mock.search.return_value = mock_response
vectors = [[0.1] * 1536]
results = self.os_db.search(query="", vectors=vectors, limit=5)
self.client_mock.search.assert_called_once()
search_args = self.client_mock.search.call_args[1]
self.assertEqual(search_args["index"], "test_collection")
body = search_args["body"]
self.assertIn("knn", body["query"])
self.assertIn("vector_field", body["query"]["knn"])
self.assertEqual(body["query"]["knn"]["vector_field"]["vector"], vectors)
self.assertEqual(body["query"]["knn"]["vector_field"]["k"], 10)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, "id1")
self.assertEqual(results[0].score, 0.8)
self.assertEqual(results[0].payload, {"key1": "value1"})
def test_delete(self):
mock_search_response = {"hits": {"hits": [{"_id": "doc1", "_source": {"id": "id1"}}]}}
self.client_mock.search.return_value = mock_search_response
self.os_db.delete(vector_id="id1")
self.client_mock.delete.assert_called_once_with(index="test_collection", id="doc1")
def test_delete_col(self):
self.os_db.delete_col()
self.client_mock.indices.delete.assert_called_once_with(index="test_collection")
def test_init_with_http_auth(self):
mock_credentials = MagicMock()
mock_signer = AWSV4SignerAuth(mock_credentials, "us-east-1", "es")
with patch("mem0.vector_stores.opensearch.OpenSearch") as mock_opensearch:
OpenSearchDB(
host="localhost",
port=9200,
collection_name="test_collection",
embedding_model_dims=1536,
http_auth=mock_signer,
verify_certs=True,
use_ssl=True,
)
# Verify OpenSearch was initialized with correct params
mock_opensearch.assert_called_once_with(
hosts=[{"host": "localhost", "port": 9200}],
http_auth=mock_signer,
use_ssl=True,
verify_certs=True,
connection_class=unittest.mock.ANY,
pool_maxsize=20,
)
# Tests for OpenSearch config deepcopy with AWS authentication (Issue #3464)
@patch('mem0.utils.factory.EmbedderFactory.create')
@patch('mem0.utils.factory.VectorStoreFactory.create')
@patch('mem0.utils.factory.LlmFactory.create')
@patch('mem0.memory.storage.SQLiteManager')
def test_safe_deepcopy_config_handles_opensearch_auth(mock_sqlite, mock_llm_factory, mock_vector_factory, mock_embedder_factory):
"""Test that _safe_deepcopy_config handles OpenSearch configs with AWS auth objects gracefully."""
mock_embedder_factory.return_value = MagicMock()
mock_vector_store = MagicMock()
mock_vector_factory.return_value = mock_vector_store
mock_llm_factory.return_value = MagicMock()
mock_sqlite.return_value = MagicMock()
from mem0.memory.main import _safe_deepcopy_config
config_with_auth = MockOpenSearchConfig(collection_name="opensearch_test", include_auth=True)
safe_config = _safe_deepcopy_config(config_with_auth)
assert safe_config.http_auth is None
assert safe_config.auth is None
assert safe_config.credentials is None
assert safe_config.connection_class is None
assert safe_config.collection_name == "opensearch_test"
assert safe_config.host == "localhost"
assert safe_config.port == 9200
assert safe_config.embedding_model_dims == 1536
assert safe_config.use_ssl is True
assert safe_config.verify_certs is True
@patch('mem0.utils.factory.EmbedderFactory.create')
@patch('mem0.utils.factory.VectorStoreFactory.create')
@patch('mem0.utils.factory.LlmFactory.create')
@patch('mem0.memory.storage.SQLiteManager')
def test_safe_deepcopy_config_normal_configs(mock_sqlite, mock_llm_factory, mock_vector_factory, mock_embedder_factory):
"""Test that _safe_deepcopy_config handles normal OpenSearch configs without auth."""
mock_embedder_factory.return_value = MagicMock()
mock_vector_store = MagicMock()
mock_vector_factory.return_value = mock_vector_store
mock_llm_factory.return_value = MagicMock()
mock_sqlite.return_value = MagicMock()
from mem0.memory.main import _safe_deepcopy_config
config_without_auth = MockOpenSearchConfig(collection_name="normal_test", include_auth=False)
safe_config = _safe_deepcopy_config(config_without_auth)
assert safe_config.collection_name == "normal_test"
assert safe_config.host == "localhost"
assert safe_config.port == 9200
assert safe_config.embedding_model_dims == 1536
assert safe_config.use_ssl is True
assert safe_config.verify_certs is True
@patch('mem0.utils.factory.EmbedderFactory.create')
@patch('mem0.utils.factory.VectorStoreFactory.create')
@patch('mem0.utils.factory.LlmFactory.create')
@patch('mem0.memory.storage.SQLiteManager')
def test_memory_initialization_opensearch_aws_auth(mock_sqlite, mock_llm_factory, mock_vector_factory, mock_embedder_factory):
"""Test that Memory initialization works with OpenSearch configs containing AWS auth."""
mock_embedder_factory.return_value = MagicMock()
mock_vector_store = MagicMock()
mock_vector_factory.return_value = mock_vector_store
mock_llm_factory.return_value = MagicMock()
mock_sqlite.return_value = MagicMock()
config = MemoryConfig()
config.vector_store.provider = "opensearch"
config.vector_store.config = MockOpenSearchConfig(collection_name="mem0_test", include_auth=True)
memory = Memory(config)
assert memory is not None
assert memory.config.vector_store.provider == "opensearch"
assert mock_vector_factory.call_count >= 2
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_qdrant.py | tests/vector_stores/test_qdrant.py | import unittest
import uuid
from unittest.mock import MagicMock
from qdrant_client import QdrantClient
from qdrant_client.models import (
Distance,
Filter,
PointIdsList,
PointStruct,
VectorParams,
)
from mem0.vector_stores.qdrant import Qdrant
class TestQdrant(unittest.TestCase):
def setUp(self):
self.client_mock = MagicMock(spec=QdrantClient)
self.qdrant = Qdrant(
collection_name="test_collection",
embedding_model_dims=128,
client=self.client_mock,
path="test_path",
on_disk=True,
)
def test_create_col(self):
self.client_mock.get_collections.return_value = MagicMock(collections=[])
self.qdrant.create_col(vector_size=128, on_disk=True)
expected_config = VectorParams(size=128, distance=Distance.COSINE, on_disk=True)
self.client_mock.create_collection.assert_called_with(
collection_name="test_collection", vectors_config=expected_config
)
def test_insert(self):
vectors = [[0.1, 0.2], [0.3, 0.4]]
payloads = [{"key": "value1"}, {"key": "value2"}]
ids = [str(uuid.uuid4()), str(uuid.uuid4())]
self.qdrant.insert(vectors=vectors, payloads=payloads, ids=ids)
self.client_mock.upsert.assert_called_once()
points = self.client_mock.upsert.call_args[1]["points"]
self.assertEqual(len(points), 2)
for point in points:
self.assertIsInstance(point, PointStruct)
self.assertEqual(points[0].payload, payloads[0])
def test_search(self):
vectors = [[0.1, 0.2]]
mock_point = MagicMock(id=str(uuid.uuid4()), score=0.95, payload={"key": "value"})
self.client_mock.query_points.return_value = MagicMock(points=[mock_point])
results = self.qdrant.search(query="", vectors=vectors, limit=1)
self.client_mock.query_points.assert_called_once_with(
collection_name="test_collection",
query=vectors,
query_filter=None,
limit=1,
)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].payload, {"key": "value"})
self.assertEqual(results[0].score, 0.95)
def test_search_with_filters(self):
"""Test search with agent_id and run_id filters."""
vectors = [[0.1, 0.2]]
mock_point = MagicMock(
id=str(uuid.uuid4()),
score=0.95,
payload={"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
)
self.client_mock.query_points.return_value = MagicMock(points=[mock_point])
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = self.qdrant.search(query="", vectors=vectors, limit=1, filters=filters)
# Verify that _create_filter was called and query_filter was passed
self.client_mock.query_points.assert_called_once()
call_args = self.client_mock.query_points.call_args[1]
self.assertEqual(call_args["collection_name"], "test_collection")
self.assertEqual(call_args["query"], vectors)
self.assertEqual(call_args["limit"], 1)
# Verify that a Filter object was created
query_filter = call_args["query_filter"]
self.assertIsInstance(query_filter, Filter)
self.assertEqual(len(query_filter.must), 3) # user_id, agent_id, run_id
self.assertEqual(len(results), 1)
self.assertEqual(results[0].payload["user_id"], "alice")
self.assertEqual(results[0].payload["agent_id"], "agent1")
self.assertEqual(results[0].payload["run_id"], "run1")
def test_search_with_single_filter(self):
"""Test search with single filter."""
vectors = [[0.1, 0.2]]
mock_point = MagicMock(
id=str(uuid.uuid4()),
score=0.95,
payload={"user_id": "alice"}
)
self.client_mock.query_points.return_value = MagicMock(points=[mock_point])
filters = {"user_id": "alice"}
results = self.qdrant.search(query="", vectors=vectors, limit=1, filters=filters)
# Verify that a Filter object was created with single condition
call_args = self.client_mock.query_points.call_args[1]
query_filter = call_args["query_filter"]
self.assertIsInstance(query_filter, Filter)
self.assertEqual(len(query_filter.must), 1) # Only user_id
self.assertEqual(len(results), 1)
self.assertEqual(results[0].payload["user_id"], "alice")
def test_search_with_no_filters(self):
"""Test search with no filters."""
vectors = [[0.1, 0.2]]
mock_point = MagicMock(id=str(uuid.uuid4()), score=0.95, payload={"key": "value"})
self.client_mock.query_points.return_value = MagicMock(points=[mock_point])
results = self.qdrant.search(query="", vectors=vectors, limit=1, filters=None)
call_args = self.client_mock.query_points.call_args[1]
self.assertIsNone(call_args["query_filter"])
self.assertEqual(len(results), 1)
def test_create_filter_multiple_filters(self):
"""Test _create_filter with multiple filters."""
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
result = self.qdrant._create_filter(filters)
self.assertIsInstance(result, Filter)
self.assertEqual(len(result.must), 3)
# Check that all conditions are present
conditions = [cond.key for cond in result.must]
self.assertIn("user_id", conditions)
self.assertIn("agent_id", conditions)
self.assertIn("run_id", conditions)
def test_create_filter_single_filter(self):
"""Test _create_filter with single filter."""
filters = {"user_id": "alice"}
result = self.qdrant._create_filter(filters)
self.assertIsInstance(result, Filter)
self.assertEqual(len(result.must), 1)
self.assertEqual(result.must[0].key, "user_id")
self.assertEqual(result.must[0].match.value, "alice")
def test_create_filter_no_filters(self):
"""Test _create_filter with no filters."""
result = self.qdrant._create_filter(None)
self.assertIsNone(result)
result = self.qdrant._create_filter({})
self.assertIsNone(result)
def test_create_filter_with_range_values(self):
"""Test _create_filter with range values."""
filters = {"user_id": "alice", "count": {"gte": 5, "lte": 10}}
result = self.qdrant._create_filter(filters)
self.assertIsInstance(result, Filter)
self.assertEqual(len(result.must), 2)
# Check that range condition is created
range_conditions = [cond for cond in result.must if hasattr(cond, 'range') and cond.range is not None]
self.assertEqual(len(range_conditions), 1)
self.assertEqual(range_conditions[0].key, "count")
# Check that string condition is created
string_conditions = [cond for cond in result.must if hasattr(cond, 'match') and cond.match is not None]
self.assertEqual(len(string_conditions), 1)
self.assertEqual(string_conditions[0].key, "user_id")
def test_delete(self):
vector_id = str(uuid.uuid4())
self.qdrant.delete(vector_id=vector_id)
self.client_mock.delete.assert_called_once_with(
collection_name="test_collection",
points_selector=PointIdsList(points=[vector_id]),
)
def test_update(self):
vector_id = str(uuid.uuid4())
updated_vector = [0.2, 0.3]
updated_payload = {"key": "updated_value"}
self.qdrant.update(vector_id=vector_id, vector=updated_vector, payload=updated_payload)
self.client_mock.upsert.assert_called_once()
point = self.client_mock.upsert.call_args[1]["points"][0]
self.assertEqual(point.id, vector_id)
self.assertEqual(point.vector, updated_vector)
self.assertEqual(point.payload, updated_payload)
def test_get(self):
vector_id = str(uuid.uuid4())
self.client_mock.retrieve.return_value = [{"id": vector_id, "payload": {"key": "value"}}]
result = self.qdrant.get(vector_id=vector_id)
self.client_mock.retrieve.assert_called_once_with(
collection_name="test_collection", ids=[vector_id], with_payload=True
)
self.assertEqual(result["id"], vector_id)
self.assertEqual(result["payload"], {"key": "value"})
def test_list_cols(self):
self.client_mock.get_collections.return_value = MagicMock(collections=[{"name": "test_collection"}])
result = self.qdrant.list_cols()
self.assertEqual(result.collections[0]["name"], "test_collection")
def test_list_with_filters(self):
"""Test list with agent_id and run_id filters."""
mock_point = MagicMock(
id=str(uuid.uuid4()),
score=0.95,
payload={"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
)
self.client_mock.scroll.return_value = [mock_point]
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = self.qdrant.list(filters=filters, limit=10)
# Verify that _create_filter was called and scroll_filter was passed
self.client_mock.scroll.assert_called_once()
call_args = self.client_mock.scroll.call_args[1]
self.assertEqual(call_args["collection_name"], "test_collection")
self.assertEqual(call_args["limit"], 10)
# Verify that a Filter object was created
scroll_filter = call_args["scroll_filter"]
self.assertIsInstance(scroll_filter, Filter)
self.assertEqual(len(scroll_filter.must), 3) # user_id, agent_id, run_id
# The list method returns the result directly
self.assertEqual(len(results), 1)
self.assertEqual(results[0].payload["user_id"], "alice")
self.assertEqual(results[0].payload["agent_id"], "agent1")
self.assertEqual(results[0].payload["run_id"], "run1")
def test_list_with_single_filter(self):
"""Test list with single filter."""
mock_point = MagicMock(
id=str(uuid.uuid4()),
score=0.95,
payload={"user_id": "alice"}
)
self.client_mock.scroll.return_value = [mock_point]
filters = {"user_id": "alice"}
results = self.qdrant.list(filters=filters, limit=10)
# Verify that a Filter object was created with single condition
call_args = self.client_mock.scroll.call_args[1]
scroll_filter = call_args["scroll_filter"]
self.assertIsInstance(scroll_filter, Filter)
self.assertEqual(len(scroll_filter.must), 1) # Only user_id
# The list method returns the result directly
self.assertEqual(len(results), 1)
self.assertEqual(results[0].payload["user_id"], "alice")
def test_list_with_no_filters(self):
"""Test list with no filters."""
mock_point = MagicMock(id=str(uuid.uuid4()), score=0.95, payload={"key": "value"})
self.client_mock.scroll.return_value = [mock_point]
results = self.qdrant.list(filters=None, limit=10)
call_args = self.client_mock.scroll.call_args[1]
self.assertIsNone(call_args["scroll_filter"])
# The list method returns the result directly
self.assertEqual(len(results), 1)
def test_delete_col(self):
self.qdrant.delete_col()
self.client_mock.delete_collection.assert_called_once_with(collection_name="test_collection")
def test_col_info(self):
self.qdrant.col_info()
self.client_mock.get_collection.assert_called_once_with(collection_name="test_collection")
def tearDown(self):
del self.qdrant
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_milvus.py | tests/vector_stores/test_milvus.py | """
Unit tests for Milvus vector store implementation.
These tests verify:
1. Correct type handling for vector dimensions
2. Batch insert functionality
3. Filter creation for metadata queries
4. Update/upsert operations
"""
import pytest
from unittest.mock import MagicMock, patch
from mem0.vector_stores.milvus import MilvusDB
from mem0.configs.vector_stores.milvus import MetricType
class TestMilvusDB:
"""Test suite for MilvusDB vector store."""
@pytest.fixture
def mock_milvus_client(self):
"""Mock MilvusClient to avoid requiring actual Milvus instance."""
with patch('mem0.vector_stores.milvus.MilvusClient') as mock_client:
mock_instance = MagicMock()
mock_instance.has_collection.return_value = False
mock_client.return_value = mock_instance
yield mock_instance
@pytest.fixture
def milvus_db(self, mock_milvus_client):
"""Create MilvusDB instance with mocked client."""
return MilvusDB(
url="http://localhost:19530",
token="test_token",
collection_name="test_collection",
embedding_model_dims=1536, # Should be int, not str
metric_type=MetricType.COSINE,
db_name="test_db"
)
def test_initialization_with_int_dims(self, mock_milvus_client):
"""Test that vector dimensions are correctly handled as integers."""
db = MilvusDB(
url="http://localhost:19530",
token="test_token",
collection_name="test_collection",
embedding_model_dims=1536, # Integer
metric_type=MetricType.COSINE,
db_name="test_db"
)
assert db.embedding_model_dims == 1536
assert isinstance(db.embedding_model_dims, int)
def test_create_col_with_int_vector_size(self, milvus_db, mock_milvus_client):
"""Test collection creation with integer vector size (bug fix validation)."""
# Collection was already created in __init__, but let's verify the call
mock_milvus_client.create_collection.assert_called_once()
call_args = mock_milvus_client.create_collection.call_args
# Verify schema was created properly
assert call_args is not None
def test_batch_insert(self, milvus_db, mock_milvus_client):
"""Test that insert uses batch operation instead of loop (performance fix)."""
ids = ["id1", "id2", "id3"]
vectors = [[0.1] * 1536, [0.2] * 1536, [0.3] * 1536]
payloads = [{"user_id": "alice"}, {"user_id": "bob"}, {"user_id": "charlie"}]
milvus_db.insert(ids, vectors, payloads)
# Verify insert was called once with all data (batch), not 3 times
assert mock_milvus_client.insert.call_count == 1
# Verify the data structure
call_args = mock_milvus_client.insert.call_args
inserted_data = call_args[1]['data']
assert len(inserted_data) == 3
assert inserted_data[0]['id'] == 'id1'
assert inserted_data[1]['id'] == 'id2'
assert inserted_data[2]['id'] == 'id3'
def test_create_filter_string_value(self, milvus_db):
"""Test filter creation for string metadata values."""
filters = {"user_id": "alice"}
filter_str = milvus_db._create_filter(filters)
assert filter_str == '(metadata["user_id"] == "alice")'
def test_create_filter_numeric_value(self, milvus_db):
"""Test filter creation for numeric metadata values."""
filters = {"age": 25}
filter_str = milvus_db._create_filter(filters)
assert filter_str == '(metadata["age"] == 25)'
def test_create_filter_multiple_conditions(self, milvus_db):
"""Test filter creation with multiple conditions."""
filters = {"user_id": "alice", "category": "work"}
filter_str = milvus_db._create_filter(filters)
# Should join with 'and'
assert 'metadata["user_id"] == "alice"' in filter_str
assert 'metadata["category"] == "work"' in filter_str
assert ' and ' in filter_str
def test_search_with_filters(self, milvus_db, mock_milvus_client):
"""Test search with metadata filters (reproduces user's bug scenario)."""
# Setup mock return value
mock_milvus_client.search.return_value = [[
{"id": "mem1", "distance": 0.8, "entity": {"metadata": {"user_id": "alice"}}}
]]
query_vector = [0.1] * 1536
filters = {"user_id": "alice"}
results = milvus_db.search(
query="test query",
vectors=query_vector,
limit=5,
filters=filters
)
# Verify search was called with correct filter
call_args = mock_milvus_client.search.call_args
assert call_args[1]['filter'] == '(metadata["user_id"] == "alice")'
# Verify results are parsed correctly
assert len(results) == 1
assert results[0].id == "mem1"
assert results[0].score == 0.8
def test_search_different_user_ids(self, milvus_db, mock_milvus_client):
"""Test that search works with different user_ids (reproduces reported bug)."""
# This test validates the fix for: "Error with different user_ids"
# Mock return for first user
mock_milvus_client.search.return_value = [[
{"id": "mem1", "distance": 0.9, "entity": {"metadata": {"user_id": "milvus_user"}}}
]]
results1 = milvus_db.search("test", [0.1] * 1536, filters={"user_id": "milvus_user"})
assert len(results1) == 1
# Mock return for second user
mock_milvus_client.search.return_value = [[
{"id": "mem2", "distance": 0.85, "entity": {"metadata": {"user_id": "bob"}}}
]]
# This should not raise "Unsupported Field type: 0" error
results2 = milvus_db.search("test", [0.2] * 1536, filters={"user_id": "bob"})
assert len(results2) == 1
def test_update_uses_upsert(self, milvus_db, mock_milvus_client):
"""Test that update correctly uses upsert operation."""
vector_id = "test_id"
vector = [0.1] * 1536
payload = {"user_id": "alice", "data": "Updated memory"}
milvus_db.update(vector_id=vector_id, vector=vector, payload=payload)
# Verify upsert was called (not delete+insert)
mock_milvus_client.upsert.assert_called_once()
call_args = mock_milvus_client.upsert.call_args
assert call_args[1]['collection_name'] == "test_collection"
assert call_args[1]['data']['id'] == vector_id
assert call_args[1]['data']['vectors'] == vector
assert call_args[1]['data']['metadata'] == payload
def test_delete(self, milvus_db, mock_milvus_client):
"""Test vector deletion."""
vector_id = "test_id"
milvus_db.delete(vector_id)
mock_milvus_client.delete.assert_called_once_with(
collection_name="test_collection",
ids=vector_id
)
def test_get(self, milvus_db, mock_milvus_client):
"""Test retrieving a vector by ID."""
vector_id = "test_id"
mock_milvus_client.get.return_value = [
{"id": vector_id, "metadata": {"user_id": "alice"}}
]
result = milvus_db.get(vector_id)
assert result.id == vector_id
assert result.payload == {"user_id": "alice"}
assert result.score is None
def test_list_with_filters(self, milvus_db, mock_milvus_client):
"""Test listing memories with filters."""
mock_milvus_client.query.return_value = [
{"id": "mem1", "metadata": {"user_id": "alice"}},
{"id": "mem2", "metadata": {"user_id": "alice"}}
]
results = milvus_db.list(filters={"user_id": "alice"}, limit=10)
# Verify query was called with filter
call_args = mock_milvus_client.query.call_args
assert call_args[1]['filter'] == '(metadata["user_id"] == "alice")'
assert call_args[1]['limit'] == 10
# Verify results
assert len(results[0]) == 2
def test_parse_output(self, milvus_db):
"""Test output data parsing."""
raw_data = [
{
"id": "mem1",
"distance": 0.9,
"entity": {"metadata": {"user_id": "alice"}}
},
{
"id": "mem2",
"distance": 0.85,
"entity": {"metadata": {"user_id": "bob"}}
}
]
parsed = milvus_db._parse_output(raw_data)
assert len(parsed) == 2
assert parsed[0].id == "mem1"
assert parsed[0].score == 0.9
assert parsed[0].payload == {"user_id": "alice"}
assert parsed[1].id == "mem2"
assert parsed[1].score == 0.85
def test_collection_already_exists(self, mock_milvus_client):
"""Test that existing collection is not recreated."""
mock_milvus_client.has_collection.return_value = True
MilvusDB(
url="http://localhost:19530",
token="test_token",
collection_name="existing_collection",
embedding_model_dims=1536,
metric_type=MetricType.L2,
db_name="test_db"
)
# create_collection should not be called
mock_milvus_client.create_collection.assert_not_called()
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_cassandra.py | tests/vector_stores/test_cassandra.py | import json
import pytest
from unittest.mock import Mock, patch
from mem0.vector_stores.cassandra import CassandraDB, OutputData
@pytest.fixture
def mock_session():
"""Create a mock Cassandra session."""
session = Mock()
session.execute = Mock(return_value=Mock())
session.prepare = Mock(return_value=Mock())
session.set_keyspace = Mock()
return session
@pytest.fixture
def mock_cluster(mock_session):
"""Create a mock Cassandra cluster."""
cluster = Mock()
cluster.connect = Mock(return_value=mock_session)
cluster.shutdown = Mock()
return cluster
@pytest.fixture
def cassandra_instance(mock_cluster, mock_session):
"""Create a CassandraDB instance with mocked cluster."""
with patch('mem0.vector_stores.cassandra.Cluster') as mock_cluster_class:
mock_cluster_class.return_value = mock_cluster
instance = CassandraDB(
contact_points=['127.0.0.1'],
port=9042,
username='testuser',
password='testpass',
keyspace='test_keyspace',
collection_name='test_collection',
embedding_model_dims=128,
)
instance.session = mock_session
return instance
def test_cassandra_init(mock_cluster, mock_session):
"""Test CassandraDB initialization."""
with patch('mem0.vector_stores.cassandra.Cluster') as mock_cluster_class:
mock_cluster_class.return_value = mock_cluster
instance = CassandraDB(
contact_points=['127.0.0.1'],
port=9042,
username='testuser',
password='testpass',
keyspace='test_keyspace',
collection_name='test_collection',
embedding_model_dims=128,
)
assert instance.contact_points == ['127.0.0.1']
assert instance.port == 9042
assert instance.username == 'testuser'
assert instance.keyspace == 'test_keyspace'
assert instance.collection_name == 'test_collection'
assert instance.embedding_model_dims == 128
def test_create_col(cassandra_instance):
"""Test collection creation."""
cassandra_instance.create_col(name="new_collection", vector_size=256)
# Verify that execute was called (table creation)
assert cassandra_instance.session.execute.called
def test_insert(cassandra_instance):
"""Test vector insertion."""
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"text": "test1"}, {"text": "test2"}]
ids = ["id1", "id2"]
# Mock prepared statement
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
assert cassandra_instance.session.prepare.called
assert cassandra_instance.session.execute.called
def test_search(cassandra_instance):
"""Test vector search."""
# Mock the database response
mock_row1 = Mock()
mock_row1.id = 'id1'
mock_row1.vector = [0.1, 0.2, 0.3]
mock_row1.payload = json.dumps({"text": "test1"})
mock_row2 = Mock()
mock_row2.id = 'id2'
mock_row2.vector = [0.4, 0.5, 0.6]
mock_row2.payload = json.dumps({"text": "test2"})
cassandra_instance.session.execute = Mock(return_value=[mock_row1, mock_row2])
query_vector = [0.2, 0.3, 0.4]
results = cassandra_instance.search(query="test", vectors=query_vector, limit=5)
assert isinstance(results, list)
assert len(results) <= 5
assert cassandra_instance.session.execute.called
def test_delete(cassandra_instance):
"""Test vector deletion."""
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.delete(vector_id="test_id")
assert cassandra_instance.session.prepare.called
assert cassandra_instance.session.execute.called
def test_update(cassandra_instance):
"""Test vector update."""
new_vector = [0.7, 0.8, 0.9]
new_payload = {"text": "updated"}
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.update(vector_id="test_id", vector=new_vector, payload=new_payload)
assert cassandra_instance.session.prepare.called
assert cassandra_instance.session.execute.called
def test_get(cassandra_instance):
"""Test retrieving a vector by ID."""
# Mock the database response
mock_row = Mock()
mock_row.id = 'test_id'
mock_row.vector = [0.1, 0.2, 0.3]
mock_row.payload = json.dumps({"text": "test"})
mock_result = Mock()
mock_result.one = Mock(return_value=mock_row)
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.session.execute = Mock(return_value=mock_result)
result = cassandra_instance.get(vector_id="test_id")
assert result is not None
assert isinstance(result, OutputData)
assert result.id == "test_id"
def test_list_cols(cassandra_instance):
"""Test listing collections."""
# Mock the database response
mock_row1 = Mock()
mock_row1.table_name = "collection1"
mock_row2 = Mock()
mock_row2.table_name = "collection2"
cassandra_instance.session.execute = Mock(return_value=[mock_row1, mock_row2])
collections = cassandra_instance.list_cols()
assert isinstance(collections, list)
assert len(collections) == 2
assert "collection1" in collections
def test_delete_col(cassandra_instance):
"""Test collection deletion."""
cassandra_instance.delete_col()
assert cassandra_instance.session.execute.called
def test_col_info(cassandra_instance):
"""Test getting collection information."""
# Mock the database response
mock_row = Mock()
mock_row.count = 100
mock_result = Mock()
mock_result.one = Mock(return_value=mock_row)
cassandra_instance.session.execute = Mock(return_value=mock_result)
info = cassandra_instance.col_info()
assert isinstance(info, dict)
assert 'name' in info
assert 'keyspace' in info
def test_list(cassandra_instance):
"""Test listing vectors."""
# Mock the database response
mock_row = Mock()
mock_row.id = 'id1'
mock_row.vector = [0.1, 0.2, 0.3]
mock_row.payload = json.dumps({"text": "test1"})
cassandra_instance.session.execute = Mock(return_value=[mock_row])
results = cassandra_instance.list(limit=10)
assert isinstance(results, list)
assert len(results) > 0
def test_reset(cassandra_instance):
"""Test resetting the collection."""
cassandra_instance.reset()
assert cassandra_instance.session.execute.called
def test_astra_db_connection(mock_cluster, mock_session):
"""Test connection with DataStax Astra DB secure connect bundle."""
with patch('mem0.vector_stores.cassandra.Cluster') as mock_cluster_class:
mock_cluster_class.return_value = mock_cluster
instance = CassandraDB(
contact_points=['127.0.0.1'],
port=9042,
username='testuser',
password='testpass',
keyspace='test_keyspace',
collection_name='test_collection',
embedding_model_dims=128,
secure_connect_bundle='/path/to/bundle.zip'
)
assert instance.secure_connect_bundle == '/path/to/bundle.zip'
def test_search_with_filters(cassandra_instance):
"""Test vector search with filters."""
# Mock the database response
mock_row1 = Mock()
mock_row1.id = 'id1'
mock_row1.vector = [0.1, 0.2, 0.3]
mock_row1.payload = json.dumps({"text": "test1", "category": "A"})
mock_row2 = Mock()
mock_row2.id = 'id2'
mock_row2.vector = [0.4, 0.5, 0.6]
mock_row2.payload = json.dumps({"text": "test2", "category": "B"})
cassandra_instance.session.execute = Mock(return_value=[mock_row1, mock_row2])
query_vector = [0.2, 0.3, 0.4]
results = cassandra_instance.search(
query="test",
vectors=query_vector,
limit=5,
filters={"category": "A"}
)
assert isinstance(results, list)
# Should only return filtered results
for result in results:
assert result.payload.get("category") == "A"
def test_output_data_model():
"""Test OutputData model."""
data = OutputData(
id="test_id",
score=0.95,
payload={"text": "test"}
)
assert data.id == "test_id"
assert data.score == 0.95
assert data.payload == {"text": "test"}
def test_insert_without_ids(cassandra_instance):
"""Test vector insertion without providing IDs."""
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"text": "test1"}, {"text": "test2"}]
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.insert(vectors=vectors, payloads=payloads)
assert cassandra_instance.session.prepare.called
assert cassandra_instance.session.execute.called
def test_insert_without_payloads(cassandra_instance):
"""Test vector insertion without providing payloads."""
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
ids = ["id1", "id2"]
mock_prepared = Mock()
cassandra_instance.session.prepare = Mock(return_value=mock_prepared)
cassandra_instance.insert(vectors=vectors, ids=ids)
assert cassandra_instance.session.prepare.called
assert cassandra_instance.session.execute.called
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_databricks.py | tests/vector_stores/test_databricks.py | from types import SimpleNamespace
from unittest.mock import MagicMock, patch
from databricks.sdk.service.vectorsearch import VectorIndexType, QueryVectorIndexResponse, ResultManifest, ResultData, ColumnInfo
from mem0.vector_stores.databricks import Databricks
import pytest
# ---------------------- Fixtures ---------------------- #
def _make_status(state="SUCCEEDED", error=None):
return SimpleNamespace(state=SimpleNamespace(value=state), error=error)
def _make_exec_response(state="SUCCEEDED", error=None):
return SimpleNamespace(status=_make_status(state, error))
@pytest.fixture
def mock_workspace_client():
"""Patch WorkspaceClient and provide a fully mocked client with required sub-clients."""
with patch("mem0.vector_stores.databricks.WorkspaceClient") as mock_wc_cls:
mock_wc = MagicMock(name="WorkspaceClient")
# warehouses.list -> iterable of objects with name/id
warehouse_obj = SimpleNamespace(name="test-warehouse", id="wh-123")
mock_wc.warehouses.list.return_value = [warehouse_obj]
# vector search endpoints
mock_wc.vector_search_endpoints.get_endpoint.side_effect = [Exception("not found"), MagicMock()]
mock_wc.vector_search_endpoints.create_endpoint_and_wait.return_value = None
# tables.exists
exists_obj = SimpleNamespace(table_exists=False)
mock_wc.tables.exists.return_value = exists_obj
mock_wc.tables.create.return_value = None
mock_wc.table_constraints.create.return_value = None
# vector_search_indexes list/create/query/delete
mock_wc.vector_search_indexes.list_indexes.return_value = []
mock_wc.vector_search_indexes.create_index.return_value = SimpleNamespace(name="catalog.schema.mem0")
mock_wc.vector_search_indexes.query_index.return_value = SimpleNamespace(result=SimpleNamespace(data_array=[]))
mock_wc.vector_search_indexes.delete_index.return_value = None
mock_wc.vector_search_indexes.get_index.return_value = SimpleNamespace(name="mem0")
# statement execution
mock_wc.statement_execution.execute_statement.return_value = _make_exec_response()
mock_wc_cls.return_value = mock_wc
yield mock_wc
@pytest.fixture
def db_instance_delta(mock_workspace_client):
return Databricks(
workspace_url="https://test",
access_token="tok",
endpoint_name="vs-endpoint",
catalog="catalog",
schema="schema",
table_name="table",
collection_name="mem0",
warehouse_name="test-warehouse",
index_type=VectorIndexType.DELTA_SYNC,
embedding_model_endpoint_name="embedding-endpoint",
)
@pytest.fixture
def db_instance_direct(mock_workspace_client):
# For DIRECT_ACCESS we want table exists path to skip creation; adjust mock first
mock_workspace_client.tables.exists.return_value = SimpleNamespace(table_exists=True)
return Databricks(
workspace_url="https://test",
access_token="tok",
endpoint_name="vs-endpoint",
catalog="catalog",
schema="schema",
table_name="table",
collection_name="mem0",
warehouse_name="test-warehouse",
index_type=VectorIndexType.DIRECT_ACCESS,
embedding_dimension=4,
embedding_model_endpoint_name="embedding-endpoint",
)
# ---------------------- Initialization Tests ---------------------- #
def test_initialization_delta_sync(db_instance_delta, mock_workspace_client):
# Endpoint ensure called (first attempt get_endpoint fails then create)
mock_workspace_client.vector_search_endpoints.create_endpoint_and_wait.assert_called_once()
# Table creation sequence
mock_workspace_client.tables.create.assert_called_once()
# Index created with expected args
assert (
mock_workspace_client.vector_search_indexes.create_index.call_args.kwargs["index_type"]
== VectorIndexType.DELTA_SYNC
)
assert mock_workspace_client.vector_search_indexes.create_index.call_args.kwargs["primary_key"] == "memory_id"
def test_initialization_direct_access(db_instance_direct, mock_workspace_client):
# DIRECT_ACCESS should include embedding column
assert "embedding" in db_instance_direct.column_names
assert (
mock_workspace_client.vector_search_indexes.create_index.call_args.kwargs["index_type"]
== VectorIndexType.DIRECT_ACCESS
)
def test_create_col_invalid_type(mock_workspace_client):
# Force invalid type by manually constructing and calling create_col after monkeypatching index_type
inst = Databricks(
workspace_url="https://test",
access_token="tok",
endpoint_name="vs-endpoint",
catalog="catalog",
schema="schema",
table_name="table",
collection_name="mem0",
warehouse_name="test-warehouse",
index_type=VectorIndexType.DELTA_SYNC,
)
inst.index_type = "BAD_TYPE"
with pytest.raises(ValueError):
inst.create_col()
# ---------------------- Insert Tests ---------------------- #
def test_insert_generates_sql(db_instance_direct, mock_workspace_client):
vectors = [[0.1, 0.2, 0.3, 0.4]]
payloads = [
{
"data": "hello world",
"user_id": "u1",
"agent_id": "a1",
"run_id": "r1",
"metadata": '{"topic":"greeting"}',
"hash": "h1",
}
]
ids = ["id1"]
db_instance_direct.insert(vectors=vectors, payloads=payloads, ids=ids)
args, kwargs = mock_workspace_client.statement_execution.execute_statement.call_args
sql = kwargs["statement"] if "statement" in kwargs else args[0]
assert "INSERT INTO" in sql
assert "catalog.schema.table" in sql
assert "id1" in sql
# Embedding list rendered
assert "array(0.1, 0.2, 0.3, 0.4)" in sql
# ---------------------- Search Tests ---------------------- #
def test_search_delta_sync_text(db_instance_delta, mock_workspace_client):
# Simulate query results
row = [
"id1",
"hash1",
"agent1",
"run1",
"user1",
"memory text",
'{"topic":"greeting"}',
"2024-01-01T00:00:00",
"2024-01-01T00:00:00",
0.42,
]
mock_workspace_client.vector_search_indexes.query_index.return_value = SimpleNamespace(
result=SimpleNamespace(data_array=[row])
)
results = db_instance_delta.search(query="hello", vectors=None, limit=1)
mock_workspace_client.vector_search_indexes.query_index.assert_called_once()
assert len(results) == 1
assert results[0].id == "id1"
assert results[0].score == 0.42
assert results[0].payload["data"] == "memory text"
def test_search_direct_access_vector(db_instance_direct, mock_workspace_client):
row = [
"id2",
"hash2",
"agent2",
"run2",
"user2",
"memory two",
'{"topic":"info"}',
"2024-01-02T00:00:00",
"2024-01-02T00:00:00",
[0.1, 0.2, 0.3, 0.4],
0.77,
]
mock_workspace_client.vector_search_indexes.query_index.return_value = SimpleNamespace(
result=SimpleNamespace(data_array=[row])
)
results = db_instance_direct.search(query="", vectors=[0.1, 0.2, 0.3, 0.4], limit=1)
assert len(results) == 1
assert results[0].id == "id2"
assert results[0].score == 0.77
def test_search_missing_params_raises(db_instance_delta):
with pytest.raises(ValueError):
db_instance_delta.search(query="", vectors=[0.1, 0.2]) # DELTA_SYNC requires query text
# ---------------------- Delete Tests ---------------------- #
def test_delete_vector(db_instance_delta, mock_workspace_client):
db_instance_delta.delete("id-delete")
args, kwargs = mock_workspace_client.statement_execution.execute_statement.call_args
sql = kwargs.get("statement") or args[0]
assert "DELETE FROM" in sql and "id-delete" in sql
# ---------------------- Update Tests ---------------------- #
def test_update_vector(db_instance_direct, mock_workspace_client):
db_instance_direct.update(
vector_id="id-upd",
vector=[0.4, 0.5, 0.6, 0.7],
payload={"custom": "val", "user_id": "skip"}, # user_id should be excluded
)
args, kwargs = mock_workspace_client.statement_execution.execute_statement.call_args
sql = kwargs.get("statement") or args[0]
assert "UPDATE" in sql and "id-upd" in sql
assert "embedding = [0.4, 0.5, 0.6, 0.7]" in sql
assert "custom = 'val'" in sql
assert "user_id" not in sql # excluded
# ---------------------- Get Tests ---------------------- #
def test_get_vector(db_instance_delta, mock_workspace_client):
mock_workspace_client.vector_search_indexes.query_index.return_value = QueryVectorIndexResponse(
manifest=ResultManifest(columns=[
ColumnInfo(name="memory_id"),
ColumnInfo(name="hash"),
ColumnInfo(name="agent_id"),
ColumnInfo(name="run_id"),
ColumnInfo(name="user_id"),
ColumnInfo(name="memory"),
ColumnInfo(name="metadata"),
ColumnInfo(name="created_at"),
ColumnInfo(name="updated_at"),
ColumnInfo(name="score"),
]),
result=ResultData(
data_array=[
[
"id-get",
"h",
"a",
"r",
"u",
"some memory",
'{"tag":"x"}',
"2024-01-01T00:00:00",
"2024-01-01T00:00:00",
"0.99",
]
]
)
)
res = db_instance_delta.get("id-get")
assert res.id == "id-get"
assert res.payload["data"] == "some memory"
assert res.payload["tag"] == "x"
# ---------------------- Collection Info / Listing Tests ---------------------- #
def test_list_cols(db_instance_delta, mock_workspace_client):
mock_workspace_client.vector_search_indexes.list_indexes.return_value = [
SimpleNamespace(name="catalog.schema.mem0"),
SimpleNamespace(name="catalog.schema.other"),
]
cols = db_instance_delta.list_cols()
assert "catalog.schema.mem0" in cols and "catalog.schema.other" in cols
def test_col_info(db_instance_delta):
info = db_instance_delta.col_info()
assert info["name"] == "mem0"
assert any(col.name == "memory_id" for col in info["fields"])
def test_list_memories(db_instance_delta, mock_workspace_client):
mock_workspace_client.vector_search_indexes.query_index.return_value = QueryVectorIndexResponse(
manifest=ResultManifest(columns=[
ColumnInfo(name="memory_id"),
ColumnInfo(name="hash"),
ColumnInfo(name="agent_id"),
ColumnInfo(name="run_id"),
ColumnInfo(name="user_id"),
ColumnInfo(name="memory"),
ColumnInfo(name="metadata"),
ColumnInfo(name="created_at"),
ColumnInfo(name="updated_at"),
ColumnInfo(name="score"),
]),
result=ResultData(
data_array=[
[
"id-get",
"h",
"a",
"r",
"u",
"some memory",
'{"tag":"x"}',
"2024-01-01T00:00:00",
"2024-01-01T00:00:00",
"0.99",
]
]
)
)
res = db_instance_delta.list(limit=1)
assert isinstance(res, list)
assert len(res[0]) == 1
assert res[0][0].id == "id-get"
# ---------------------- Reset Tests ---------------------- #
def test_reset(db_instance_delta, mock_workspace_client):
# Make delete raise to exercise fallback path then allow recreation
mock_workspace_client.vector_search_indexes.delete_index.side_effect = [Exception("fail fq"), None, None]
with patch.object(db_instance_delta, "create_col", wraps=db_instance_delta.create_col) as create_spy:
db_instance_delta.reset()
assert create_spy.called
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_neptune_analytics.py | tests/vector_stores/test_neptune_analytics.py | import logging
import os
import sys
import pytest
from dotenv import load_dotenv
from mem0.utils.factory import VectorStoreFactory
load_dotenv()
# Configure logging
logging.getLogger("mem0.vector.neptune.main").setLevel(logging.INFO)
logging.getLogger("mem0.vector.neptune.base").setLevel(logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logging.basicConfig(
format="%(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
stream=sys.stdout,
)
# Test constants
EMBEDDING_MODEL_DIMS = 1024
VECTOR_1 = [-0.1] * EMBEDDING_MODEL_DIMS
VECTOR_2 = [-0.2] * EMBEDDING_MODEL_DIMS
VECTOR_3 = [-0.3] * EMBEDDING_MODEL_DIMS
SAMPLE_PAYLOADS = [
{"test_text": "text_value", "another_field": "field_2_value"},
{"test_text": "text_value_BBBB"},
{"test_text": "text_value_CCCC"}
]
@pytest.mark.skipif(not os.getenv("RUN_TEST_NEPTUNE_ANALYTICS"), reason="Only run with RUN_TEST_NEPTUNE_ANALYTICS is true")
class TestNeptuneAnalyticsOperations:
"""Test basic CRUD operations."""
@pytest.fixture
def na_instance(self):
"""Create Neptune Analytics vector store instance for testing."""
config = {
"endpoint": f"neptune-graph://{os.getenv('GRAPH_ID')}",
"collection_name": "test",
}
return VectorStoreFactory.create("neptune", config)
def test_insert_and_list(self, na_instance):
"""Test vector insertion and listing."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1, VECTOR_2, VECTOR_3],
ids=["A", "B", "C"],
payloads=SAMPLE_PAYLOADS
)
list_result = na_instance.list()[0]
assert len(list_result) == 3
assert "label" not in list_result[0].payload
def test_get(self, na_instance):
"""Test retrieving a specific vector."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1],
ids=["A"],
payloads=[SAMPLE_PAYLOADS[0]]
)
vector_a = na_instance.get("A")
assert vector_a.id == "A"
assert vector_a.score is None
assert vector_a.payload["test_text"] == "text_value"
assert vector_a.payload["another_field"] == "field_2_value"
assert "label" not in vector_a.payload
def test_update(self, na_instance):
"""Test updating vector payload."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1],
ids=["A"],
payloads=[SAMPLE_PAYLOADS[0]]
)
na_instance.update(vector_id="A", payload={"updated_payload_str": "update_str"})
vector_a = na_instance.get("A")
assert vector_a.id == "A"
assert vector_a.score is None
assert vector_a.payload["updated_payload_str"] == "update_str"
assert "label" not in vector_a.payload
def test_delete(self, na_instance):
"""Test deleting a specific vector."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1],
ids=["A"],
payloads=[SAMPLE_PAYLOADS[0]]
)
size_before = na_instance.list()[0]
assert len(size_before) == 1
na_instance.delete("A")
size_after = na_instance.list()[0]
assert len(size_after) == 0
def test_search(self, na_instance):
"""Test vector similarity search."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1, VECTOR_2, VECTOR_3],
ids=["A", "B", "C"],
payloads=SAMPLE_PAYLOADS
)
result = na_instance.search(query="", vectors=VECTOR_1, limit=1)
assert len(result) == 1
assert "label" not in result[0].payload
def test_reset(self, na_instance):
"""Test resetting the collection."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1, VECTOR_2, VECTOR_3],
ids=["A", "B", "C"],
payloads=SAMPLE_PAYLOADS
)
list_result = na_instance.list()[0]
assert len(list_result) == 3
na_instance.reset()
list_result = na_instance.list()[0]
assert len(list_result) == 0
def test_delete_col(self, na_instance):
"""Test deleting the entire collection."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1, VECTOR_2, VECTOR_3],
ids=["A", "B", "C"],
payloads=SAMPLE_PAYLOADS
)
list_result = na_instance.list()[0]
assert len(list_result) == 3
na_instance.delete_col()
list_result = na_instance.list()[0]
assert len(list_result) == 0
def test_list_cols(self, na_instance):
"""Test listing collections."""
na_instance.reset()
na_instance.insert(
vectors=[VECTOR_1, VECTOR_2, VECTOR_3],
ids=["A", "B", "C"],
payloads=SAMPLE_PAYLOADS
)
result = na_instance.list_cols()
assert result == ["MEM0_VECTOR_test"]
def test_invalid_endpoint_format(self):
"""Test that invalid endpoint format raises ValueError."""
config = {
"endpoint": f"xxx://{os.getenv('GRAPH_ID')}",
"collection_name": "test",
}
with pytest.raises(ValueError):
VectorStoreFactory.create("neptune", config)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_faiss.py | tests/vector_stores/test_faiss.py | import os
import tempfile
from unittest.mock import Mock, patch
import faiss
import numpy as np
import pytest
from mem0.vector_stores.faiss import FAISS, OutputData
@pytest.fixture
def mock_faiss_index():
index = Mock(spec=faiss.IndexFlatL2)
index.d = 128 # Dimension of the vectors
index.ntotal = 0 # Number of vectors in the index
return index
@pytest.fixture
def faiss_instance(mock_faiss_index):
with tempfile.TemporaryDirectory() as temp_dir:
# Mock the faiss index creation
with patch("faiss.IndexFlatL2", return_value=mock_faiss_index):
# Mock the faiss.write_index function
with patch("faiss.write_index"):
# Create a FAISS instance with a temporary directory
faiss_store = FAISS(
collection_name="test_collection",
path=os.path.join(temp_dir, "test_faiss"),
distance_strategy="euclidean",
)
# Set up the mock index
faiss_store.index = mock_faiss_index
yield faiss_store
def test_create_col(faiss_instance, mock_faiss_index):
# Test creating a collection with euclidean distance
with patch("faiss.IndexFlatL2", return_value=mock_faiss_index) as mock_index_flat_l2:
with patch("faiss.write_index"):
faiss_instance.create_col(name="new_collection")
mock_index_flat_l2.assert_called_once_with(faiss_instance.embedding_model_dims)
# Test creating a collection with inner product distance
with patch("faiss.IndexFlatIP", return_value=mock_faiss_index) as mock_index_flat_ip:
with patch("faiss.write_index"):
faiss_instance.create_col(name="new_collection", distance="inner_product")
mock_index_flat_ip.assert_called_once_with(faiss_instance.embedding_model_dims)
def test_insert(faiss_instance, mock_faiss_index):
# Prepare test data
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"name": "vector1"}, {"name": "vector2"}]
ids = ["id1", "id2"]
# Mock the numpy array conversion
with patch("numpy.array", return_value=np.array(vectors, dtype=np.float32)) as mock_np_array:
# Mock index.add
mock_faiss_index.add.return_value = None
# Call insert
faiss_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
# Verify numpy.array was called
mock_np_array.assert_called_once_with(vectors, dtype=np.float32)
# Verify index.add was called
mock_faiss_index.add.assert_called_once()
# Verify docstore and index_to_id were updated
assert faiss_instance.docstore["id1"] == {"name": "vector1"}
assert faiss_instance.docstore["id2"] == {"name": "vector2"}
assert faiss_instance.index_to_id[0] == "id1"
assert faiss_instance.index_to_id[1] == "id2"
def test_search(faiss_instance, mock_faiss_index):
# Prepare test data
query_vector = [0.1, 0.2, 0.3]
# Setup the docstore and index_to_id mapping
faiss_instance.docstore = {"id1": {"name": "vector1"}, "id2": {"name": "vector2"}}
faiss_instance.index_to_id = {0: "id1", 1: "id2"}
# First, create the mock for the search return values
search_scores = np.array([[0.9, 0.8]])
search_indices = np.array([[0, 1]])
mock_faiss_index.search.return_value = (search_scores, search_indices)
# Then patch numpy.array only for the query vector conversion
with patch("numpy.array") as mock_np_array:
mock_np_array.return_value = np.array(query_vector, dtype=np.float32)
# Then patch _parse_output to return the expected results
expected_results = [
OutputData(id="id1", score=0.9, payload={"name": "vector1"}),
OutputData(id="id2", score=0.8, payload={"name": "vector2"}),
]
with patch.object(faiss_instance, "_parse_output", return_value=expected_results):
# Call search
results = faiss_instance.search(query="test query", vectors=query_vector, limit=2)
# Verify numpy.array was called (but we don't check exact call arguments since it's complex)
assert mock_np_array.called
# Verify index.search was called
mock_faiss_index.search.assert_called_once()
# Verify results
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].score == 0.9
assert results[0].payload == {"name": "vector1"}
assert results[1].id == "id2"
assert results[1].score == 0.8
assert results[1].payload == {"name": "vector2"}
def test_search_with_filters(faiss_instance, mock_faiss_index):
# Prepare test data
query_vector = [0.1, 0.2, 0.3]
# Setup the docstore and index_to_id mapping
faiss_instance.docstore = {"id1": {"name": "vector1", "category": "A"}, "id2": {"name": "vector2", "category": "B"}}
faiss_instance.index_to_id = {0: "id1", 1: "id2"}
# First set up the search return values
search_scores = np.array([[0.9, 0.8]])
search_indices = np.array([[0, 1]])
mock_faiss_index.search.return_value = (search_scores, search_indices)
# Patch numpy.array for query vector conversion
with patch("numpy.array") as mock_np_array:
mock_np_array.return_value = np.array(query_vector, dtype=np.float32)
# Directly mock the _parse_output method to return our expected values
# We're simulating that _parse_output filters to just the first result
all_results = [
OutputData(id="id1", score=0.9, payload={"name": "vector1", "category": "A"}),
OutputData(id="id2", score=0.8, payload={"name": "vector2", "category": "B"}),
]
# Replace the _apply_filters method to handle our test case
with patch.object(faiss_instance, "_parse_output", return_value=all_results):
with patch.object(faiss_instance, "_apply_filters", side_effect=lambda p, f: p.get("category") == "A"):
# Call search with filters
results = faiss_instance.search(
query="test query", vectors=query_vector, limit=2, filters={"category": "A"}
)
# Verify numpy.array was called
assert mock_np_array.called
# Verify index.search was called
mock_faiss_index.search.assert_called_once()
# Verify filtered results - since we've mocked everything,
# we should get just the result we want
assert len(results) == 1
assert results[0].id == "id1"
assert results[0].score == 0.9
assert results[0].payload == {"name": "vector1", "category": "A"}
def test_delete(faiss_instance):
# Setup the docstore and index_to_id mapping
faiss_instance.docstore = {"id1": {"name": "vector1"}, "id2": {"name": "vector2"}}
faiss_instance.index_to_id = {0: "id1", 1: "id2"}
# Call delete
faiss_instance.delete(vector_id="id1")
# Verify the vector was removed from docstore and index_to_id
assert "id1" not in faiss_instance.docstore
assert 0 not in faiss_instance.index_to_id
assert "id2" in faiss_instance.docstore
assert 1 in faiss_instance.index_to_id
def test_update(faiss_instance, mock_faiss_index):
# Setup the docstore and index_to_id mapping
faiss_instance.docstore = {"id1": {"name": "vector1"}, "id2": {"name": "vector2"}}
faiss_instance.index_to_id = {0: "id1", 1: "id2"}
# Test updating payload only
faiss_instance.update(vector_id="id1", payload={"name": "updated_vector1"})
assert faiss_instance.docstore["id1"] == {"name": "updated_vector1"}
# Test updating vector
# This requires mocking the delete and insert methods
with patch.object(faiss_instance, "delete") as mock_delete:
with patch.object(faiss_instance, "insert") as mock_insert:
new_vector = [0.7, 0.8, 0.9]
faiss_instance.update(vector_id="id2", vector=new_vector)
# Verify delete and insert were called
# Match the actual call signature (positional arg instead of keyword)
mock_delete.assert_called_once_with("id2")
mock_insert.assert_called_once()
def test_get(faiss_instance):
# Setup the docstore
faiss_instance.docstore = {"id1": {"name": "vector1"}, "id2": {"name": "vector2"}}
# Test getting an existing vector
result = faiss_instance.get(vector_id="id1")
assert result.id == "id1"
assert result.payload == {"name": "vector1"}
assert result.score is None
# Test getting a non-existent vector
result = faiss_instance.get(vector_id="id3")
assert result is None
def test_list(faiss_instance):
# Setup the docstore
faiss_instance.docstore = {
"id1": {"name": "vector1", "category": "A"},
"id2": {"name": "vector2", "category": "B"},
"id3": {"name": "vector3", "category": "A"},
}
# Test listing all vectors
results = faiss_instance.list()
# Fix the expected result - the list method returns a list of lists
assert len(results[0]) == 3
# Test listing with a limit
results = faiss_instance.list(limit=2)
assert len(results[0]) == 2
# Test listing with filters
results = faiss_instance.list(filters={"category": "A"})
assert len(results[0]) == 2
for result in results[0]:
assert result.payload["category"] == "A"
def test_col_info(faiss_instance, mock_faiss_index):
# Mock index attributes
mock_faiss_index.ntotal = 5
mock_faiss_index.d = 128
# Get collection info
info = faiss_instance.col_info()
# Verify the returned info
assert info["name"] == "test_collection"
assert info["count"] == 5
assert info["dimension"] == 128
assert info["distance"] == "euclidean"
def test_delete_col(faiss_instance):
# Mock the os.remove function
with patch("os.remove") as mock_remove:
with patch("os.path.exists", return_value=True):
# Call delete_col
faiss_instance.delete_col()
# Verify os.remove was called twice (for index and docstore files)
assert mock_remove.call_count == 2
# Verify the internal state was reset
assert faiss_instance.index is None
assert faiss_instance.docstore == {}
assert faiss_instance.index_to_id == {}
def test_normalize_L2(faiss_instance, mock_faiss_index):
# Setup a FAISS instance with normalize_L2=True
faiss_instance.normalize_L2 = True
# Prepare test data
vectors = [[0.1, 0.2, 0.3]]
# Mock numpy array conversion
# Mock numpy array conversion
with patch("numpy.array", return_value=np.array(vectors, dtype=np.float32)):
# Mock faiss.normalize_L2
with patch("faiss.normalize_L2") as mock_normalize:
# Call insert
faiss_instance.insert(vectors=vectors, ids=["id1"])
# Verify faiss.normalize_L2 was called
mock_normalize.assert_called_once()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_chroma.py | tests/vector_stores/test_chroma.py | from unittest.mock import Mock, patch
import pytest
from mem0.vector_stores.chroma import ChromaDB
@pytest.fixture
def mock_chromadb_client():
with patch("chromadb.Client") as mock_client:
yield mock_client
@pytest.fixture
def chromadb_instance(mock_chromadb_client):
mock_collection = Mock()
mock_chromadb_client.return_value.get_or_create_collection.return_value = mock_collection
return ChromaDB(collection_name="test_collection", client=mock_chromadb_client.return_value)
def test_insert_vectors(chromadb_instance, mock_chromadb_client):
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"name": "vector1"}, {"name": "vector2"}]
ids = ["id1", "id2"]
chromadb_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
chromadb_instance.collection.add.assert_called_once_with(ids=ids, embeddings=vectors, metadatas=payloads)
def test_search_vectors(chromadb_instance, mock_chromadb_client):
mock_result = {
"ids": [["id1", "id2"]],
"distances": [[0.1, 0.2]],
"metadatas": [[{"name": "vector1"}, {"name": "vector2"}]],
}
chromadb_instance.collection.query.return_value = mock_result
vectors = [[0.1, 0.2, 0.3]]
results = chromadb_instance.search(query="", vectors=vectors, limit=2)
chromadb_instance.collection.query.assert_called_once_with(query_embeddings=vectors, where=None, n_results=2)
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].score == 0.1
assert results[0].payload == {"name": "vector1"}
def test_search_vectors_with_filters(chromadb_instance, mock_chromadb_client):
"""Test search with agent_id and run_id filters."""
mock_result = {
"ids": [["id1"]],
"distances": [[0.1]],
"metadatas": [[{"name": "vector1", "user_id": "alice", "agent_id": "agent1", "run_id": "run1"}]],
}
chromadb_instance.collection.query.return_value = mock_result
vectors = [[0.1, 0.2, 0.3]]
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = chromadb_instance.search(query="", vectors=vectors, limit=2, filters=filters)
# Verify that _generate_where_clause was called with the filters
expected_where = {"$and": [{"user_id": {"$eq": "alice"}}, {"agent_id": {"$eq": "agent1"}}, {"run_id": {"$eq": "run1"}}]}
chromadb_instance.collection.query.assert_called_once_with(
query_embeddings=vectors, where=expected_where, n_results=2
)
assert len(results) == 1
assert results[0].id == "id1"
assert results[0].payload["user_id"] == "alice"
assert results[0].payload["agent_id"] == "agent1"
assert results[0].payload["run_id"] == "run1"
def test_search_vectors_with_single_filter(chromadb_instance, mock_chromadb_client):
"""Test search with single filter (should not use $and)."""
mock_result = {
"ids": [["id1"]],
"distances": [[0.1]],
"metadatas": [[{"name": "vector1", "user_id": "alice"}]],
}
chromadb_instance.collection.query.return_value = mock_result
vectors = [[0.1, 0.2, 0.3]]
filters = {"user_id": "alice"}
results = chromadb_instance.search(query="", vectors=vectors, limit=2, filters=filters)
# Verify that single filter is passed with $eq operator
expected_where = {"user_id": {"$eq": "alice"}}
chromadb_instance.collection.query.assert_called_once_with(
query_embeddings=vectors, where=expected_where, n_results=2
)
assert len(results) == 1
assert results[0].payload["user_id"] == "alice"
def test_search_vectors_with_no_filters(chromadb_instance, mock_chromadb_client):
"""Test search with no filters."""
mock_result = {
"ids": [["id1"]],
"distances": [[0.1]],
"metadatas": [[{"name": "vector1"}]],
}
chromadb_instance.collection.query.return_value = mock_result
vectors = [[0.1, 0.2, 0.3]]
results = chromadb_instance.search(query="", vectors=vectors, limit=2, filters=None)
chromadb_instance.collection.query.assert_called_once_with(
query_embeddings=vectors, where=None, n_results=2
)
assert len(results) == 1
def test_delete_vector(chromadb_instance):
vector_id = "id1"
chromadb_instance.delete(vector_id=vector_id)
chromadb_instance.collection.delete.assert_called_once_with(ids=vector_id)
def test_update_vector(chromadb_instance):
vector_id = "id1"
new_vector = [0.7, 0.8, 0.9]
new_payload = {"name": "updated_vector"}
chromadb_instance.update(vector_id=vector_id, vector=new_vector, payload=new_payload)
chromadb_instance.collection.update.assert_called_once_with(
ids=vector_id, embeddings=new_vector, metadatas=new_payload
)
def test_get_vector(chromadb_instance):
mock_result = {
"ids": [["id1"]],
"distances": [[0.1]],
"metadatas": [[{"name": "vector1"}]],
}
chromadb_instance.collection.get.return_value = mock_result
result = chromadb_instance.get(vector_id="id1")
chromadb_instance.collection.get.assert_called_once_with(ids=["id1"])
assert result.id == "id1"
assert result.score == 0.1
assert result.payload == {"name": "vector1"}
def test_list_vectors(chromadb_instance):
mock_result = {
"ids": [["id1", "id2"]],
"distances": [[0.1, 0.2]],
"metadatas": [[{"name": "vector1"}, {"name": "vector2"}]],
}
chromadb_instance.collection.get.return_value = mock_result
results = chromadb_instance.list(limit=2)
chromadb_instance.collection.get.assert_called_once_with(where=None, limit=2)
assert len(results[0]) == 2
assert results[0][0].id == "id1"
assert results[0][1].id == "id2"
def test_list_vectors_with_filters(chromadb_instance):
"""Test list with agent_id and run_id filters."""
mock_result = {
"ids": [["id1"]],
"distances": [[0.1]],
"metadatas": [[{"name": "vector1", "user_id": "alice", "agent_id": "agent1", "run_id": "run1"}]],
}
chromadb_instance.collection.get.return_value = mock_result
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = chromadb_instance.list(filters=filters, limit=2)
# Verify that _generate_where_clause was called with the filters
expected_where = {"$and": [{"user_id": {"$eq": "alice"}}, {"agent_id": {"$eq": "agent1"}}, {"run_id": {"$eq": "run1"}}]}
chromadb_instance.collection.get.assert_called_once_with(where=expected_where, limit=2)
assert len(results[0]) == 1
assert results[0][0].payload["user_id"] == "alice"
assert results[0][0].payload["agent_id"] == "agent1"
assert results[0][0].payload["run_id"] == "run1"
def test_list_vectors_with_single_filter(chromadb_instance):
"""Test list with single filter (should not use $and)."""
mock_result = {
"ids": [["id1"]],
"distances": [[0.1]],
"metadatas": [[{"name": "vector1", "user_id": "alice"}]],
}
chromadb_instance.collection.get.return_value = mock_result
filters = {"user_id": "alice"}
results = chromadb_instance.list(filters=filters, limit=2)
# Verify that single filter is passed with $eq operator
expected_where = {"user_id": {"$eq": "alice"}}
chromadb_instance.collection.get.assert_called_once_with(where=expected_where, limit=2)
assert len(results[0]) == 1
assert results[0][0].payload["user_id"] == "alice"
def test_generate_where_clause_multiple_filters():
"""Test _generate_where_clause with multiple filters."""
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
result = ChromaDB._generate_where_clause(filters)
# ChromaDB accepts filters in {"$and": [{"field": {"$eq": "value"}}, ...]} format
expected = {"$and": [{"user_id": {"$eq": "alice"}}, {"agent_id": {"$eq": "agent1"}}, {"run_id": {"$eq": "run1"}}]}
assert result == expected
def test_generate_where_clause_single_filter():
"""Test _generate_where_clause with single filter."""
filters = {"user_id": "alice"}
result = ChromaDB._generate_where_clause(filters)
# ChromaDB accepts single filters in {"field": {"$eq": "value"}} format
expected = {"user_id": {"$eq": "alice"}}
assert result == expected
def test_generate_where_clause_no_filters():
"""Test _generate_where_clause with no filters."""
result = ChromaDB._generate_where_clause(None)
assert result == {}
result = ChromaDB._generate_where_clause({})
assert result == {}
def test_generate_where_clause_non_string_values():
"""Test _generate_where_clause with non-string values."""
filters = {"user_id": "alice", "count": 5, "active": True}
result = ChromaDB._generate_where_clause(filters)
# ChromaDB accepts non-string values in filters
expected = {"$and": [{"user_id": {"$eq": "alice"}}, {"count": {"$eq": 5}}, {"active": {"$eq": True}}]}
assert result == expected
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_valkey.py | tests/vector_stores/test_valkey.py | import json
from datetime import datetime
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
import pytz
from valkey.exceptions import ResponseError
from mem0.vector_stores.valkey import ValkeyDB
@pytest.fixture
def mock_valkey_client():
"""Create a mock Valkey client."""
with patch("valkey.from_url") as mock_client:
# Mock the ft method
mock_ft = MagicMock()
mock_client.return_value.ft = MagicMock(return_value=mock_ft)
mock_client.return_value.execute_command = MagicMock()
mock_client.return_value.hset = MagicMock()
mock_client.return_value.hgetall = MagicMock()
mock_client.return_value.delete = MagicMock()
yield mock_client.return_value
@pytest.fixture
def valkey_db(mock_valkey_client):
"""Create a ValkeyDB instance with a mock client."""
# Initialize the ValkeyDB with test parameters
valkey_db = ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
)
# Replace the client with our mock
valkey_db.client = mock_valkey_client
return valkey_db
def test_search_filter_syntax(valkey_db, mock_valkey_client):
"""Test that the search filter syntax is correctly formatted for Valkey."""
# Mock search results
mock_doc = MagicMock()
mock_doc.memory_id = "test_id"
mock_doc.hash = "test_hash"
mock_doc.memory = "test_data"
mock_doc.created_at = str(int(datetime.now().timestamp()))
mock_doc.metadata = json.dumps({"key": "value"})
mock_doc.vector_score = "0.5"
mock_results = MagicMock()
mock_results.docs = [mock_doc]
mock_ft = mock_valkey_client.ft.return_value
mock_ft.search.return_value = mock_results
# Test with user_id filter
valkey_db.search(
query="test query",
vectors=np.random.rand(1536).tolist(),
limit=5,
filters={"user_id": "test_user"},
)
# Check that the search was called with the correct filter syntax
args, kwargs = mock_ft.search.call_args
assert "@user_id:{test_user}" in args[0]
assert "=>[KNN" in args[0]
# Test with multiple filters
valkey_db.search(
query="test query",
vectors=np.random.rand(1536).tolist(),
limit=5,
filters={"user_id": "test_user", "agent_id": "test_agent"},
)
# Check that the search was called with the correct filter syntax
args, kwargs = mock_ft.search.call_args
assert "@user_id:{test_user}" in args[0]
assert "@agent_id:{test_agent}" in args[0]
assert "=>[KNN" in args[0]
def test_search_without_filters(valkey_db, mock_valkey_client):
"""Test search without filters."""
# Mock search results
mock_doc = MagicMock()
mock_doc.memory_id = "test_id"
mock_doc.hash = "test_hash"
mock_doc.memory = "test_data"
mock_doc.created_at = str(int(datetime.now().timestamp()))
mock_doc.metadata = json.dumps({"key": "value"})
mock_doc.vector_score = "0.5"
mock_results = MagicMock()
mock_results.docs = [mock_doc]
mock_ft = mock_valkey_client.ft.return_value
mock_ft.search.return_value = mock_results
# Test without filters
results = valkey_db.search(
query="test query",
vectors=np.random.rand(1536).tolist(),
limit=5,
)
# Check that the search was called with the correct syntax
args, kwargs = mock_ft.search.call_args
assert "*=>[KNN" in args[0]
# Check that results are processed correctly
assert len(results) == 1
assert results[0].id == "test_id"
assert results[0].payload["hash"] == "test_hash"
assert results[0].payload["data"] == "test_data"
assert "created_at" in results[0].payload
def test_insert(valkey_db, mock_valkey_client):
"""Test inserting vectors."""
# Prepare test data
vectors = [np.random.rand(1536).tolist()]
payloads = [{"hash": "test_hash", "data": "test_data", "user_id": "test_user"}]
ids = ["test_id"]
# Call insert
valkey_db.insert(vectors=vectors, payloads=payloads, ids=ids)
# Check that hset was called with the correct arguments
mock_valkey_client.hset.assert_called_once()
args, kwargs = mock_valkey_client.hset.call_args
assert args[0] == "mem0:test_collection:test_id"
assert "memory_id" in kwargs["mapping"]
assert kwargs["mapping"]["memory_id"] == "test_id"
assert kwargs["mapping"]["hash"] == "test_hash"
assert kwargs["mapping"]["memory"] == "test_data"
assert kwargs["mapping"]["user_id"] == "test_user"
assert "created_at" in kwargs["mapping"]
assert "embedding" in kwargs["mapping"]
def test_insert_handles_missing_created_at(valkey_db, mock_valkey_client):
"""Test inserting vectors with missing created_at field."""
# Prepare test data
vectors = [np.random.rand(1536).tolist()]
payloads = [{"hash": "test_hash", "data": "test_data"}] # No created_at
ids = ["test_id"]
# Call insert
valkey_db.insert(vectors=vectors, payloads=payloads, ids=ids)
# Check that hset was called with the correct arguments
mock_valkey_client.hset.assert_called_once()
args, kwargs = mock_valkey_client.hset.call_args
assert "created_at" in kwargs["mapping"] # Should be added automatically
def test_delete(valkey_db, mock_valkey_client):
"""Test deleting a vector."""
# Call delete
valkey_db.delete("test_id")
# Check that delete was called with the correct key
mock_valkey_client.delete.assert_called_once_with("mem0:test_collection:test_id")
def test_update(valkey_db, mock_valkey_client):
"""Test updating a vector."""
# Prepare test data
vector = np.random.rand(1536).tolist()
payload = {
"hash": "test_hash",
"data": "updated_data",
"created_at": datetime.now(pytz.timezone("UTC")).isoformat(),
"user_id": "test_user",
}
# Call update
valkey_db.update(vector_id="test_id", vector=vector, payload=payload)
# Check that hset was called with the correct arguments
mock_valkey_client.hset.assert_called_once()
args, kwargs = mock_valkey_client.hset.call_args
assert args[0] == "mem0:test_collection:test_id"
assert kwargs["mapping"]["memory_id"] == "test_id"
assert kwargs["mapping"]["memory"] == "updated_data"
def test_update_handles_missing_created_at(valkey_db, mock_valkey_client):
"""Test updating vectors with missing created_at field."""
# Prepare test data
vector = np.random.rand(1536).tolist()
payload = {"hash": "test_hash", "data": "updated_data"} # No created_at
# Call update
valkey_db.update(vector_id="test_id", vector=vector, payload=payload)
# Check that hset was called with the correct arguments
mock_valkey_client.hset.assert_called_once()
args, kwargs = mock_valkey_client.hset.call_args
assert "created_at" in kwargs["mapping"] # Should be added automatically
def test_get(valkey_db, mock_valkey_client):
"""Test getting a vector."""
# Mock hgetall to return a vector
mock_valkey_client.hgetall.return_value = {
"memory_id": "test_id",
"hash": "test_hash",
"memory": "test_data",
"created_at": str(int(datetime.now().timestamp())),
"metadata": json.dumps({"key": "value"}),
"user_id": "test_user",
}
# Call get
result = valkey_db.get("test_id")
# Check that hgetall was called with the correct key
mock_valkey_client.hgetall.assert_called_once_with("mem0:test_collection:test_id")
# Check the result
assert result.id == "test_id"
assert result.payload["hash"] == "test_hash"
assert result.payload["data"] == "test_data"
assert "created_at" in result.payload
assert result.payload["key"] == "value" # From metadata
assert result.payload["user_id"] == "test_user"
def test_get_not_found(valkey_db, mock_valkey_client):
"""Test getting a vector that doesn't exist."""
# Mock hgetall to return empty dict (not found)
mock_valkey_client.hgetall.return_value = {}
# Call get should raise KeyError
with pytest.raises(KeyError, match="Vector with ID test_id not found"):
valkey_db.get("test_id")
def test_list_cols(valkey_db, mock_valkey_client):
"""Test listing collections."""
# Reset the mock to clear previous calls
mock_valkey_client.execute_command.reset_mock()
# Mock execute_command to return list of indices
mock_valkey_client.execute_command.return_value = ["test_collection", "another_collection"]
# Call list_cols
result = valkey_db.list_cols()
# Check that execute_command was called with the correct command
mock_valkey_client.execute_command.assert_called_with("FT._LIST")
# Check the result
assert result == ["test_collection", "another_collection"]
def test_delete_col(valkey_db, mock_valkey_client):
"""Test deleting a collection."""
# Reset the mock to clear previous calls
mock_valkey_client.execute_command.reset_mock()
# Test successful deletion
result = valkey_db.delete_col()
assert result is True
# Check that execute_command was called with the correct command
mock_valkey_client.execute_command.assert_called_once_with("FT.DROPINDEX", "test_collection")
# Test error handling - real errors should still raise
mock_valkey_client.execute_command.side_effect = ResponseError("Error dropping index")
with pytest.raises(ResponseError, match="Error dropping index"):
valkey_db.delete_col()
# Test idempotent behavior - "Unknown index name" should return False, not raise
mock_valkey_client.execute_command.side_effect = ResponseError("Unknown index name")
result = valkey_db.delete_col()
assert result is False
def test_context_aware_logging(valkey_db, mock_valkey_client):
"""Test that _drop_index handles different log levels correctly."""
# Mock "Unknown index name" error
mock_valkey_client.execute_command.side_effect = ResponseError("Unknown index name")
# Test silent mode - should not log anything (we can't easily test log output, but ensure no exception)
result = valkey_db._drop_index("test_collection", log_level="silent")
assert result is False
# Test info mode - should not raise exception
result = valkey_db._drop_index("test_collection", log_level="info")
assert result is False
# Test default mode - should not raise exception
result = valkey_db._drop_index("test_collection")
assert result is False
def test_col_info(valkey_db, mock_valkey_client):
"""Test getting collection info."""
# Mock ft().info() to return index info
mock_ft = mock_valkey_client.ft.return_value
# Reset the mock to clear previous calls
mock_ft.info.reset_mock()
mock_ft.info.return_value = {"index_name": "test_collection", "num_docs": 100}
# Call col_info
result = valkey_db.col_info()
# Check that ft().info() was called
assert mock_ft.info.called
# Check the result
assert result["index_name"] == "test_collection"
assert result["num_docs"] == 100
def test_create_col(valkey_db, mock_valkey_client):
"""Test creating a new collection."""
# Call create_col
valkey_db.create_col(name="new_collection", vector_size=768, distance="IP")
# Check that execute_command was called to create the index
assert mock_valkey_client.execute_command.called
args = mock_valkey_client.execute_command.call_args[0]
assert args[0] == "FT.CREATE"
assert args[1] == "new_collection"
# Check that the distance metric was set correctly
distance_metric_index = args.index("DISTANCE_METRIC")
assert args[distance_metric_index + 1] == "IP"
# Check that the vector size was set correctly
dim_index = args.index("DIM")
assert args[dim_index + 1] == "768"
def test_list(valkey_db, mock_valkey_client):
"""Test listing vectors."""
# Mock search results
mock_doc = MagicMock()
mock_doc.memory_id = "test_id"
mock_doc.hash = "test_hash"
mock_doc.memory = "test_data"
mock_doc.created_at = str(int(datetime.now().timestamp()))
mock_doc.metadata = json.dumps({"key": "value"})
mock_doc.vector_score = "0.5" # Add missing vector_score
mock_results = MagicMock()
mock_results.docs = [mock_doc]
mock_ft = mock_valkey_client.ft.return_value
mock_ft.search.return_value = mock_results
# Call list
results = valkey_db.list(filters={"user_id": "test_user"}, limit=10)
# Check that search was called with the correct arguments
mock_ft.search.assert_called_once()
args, kwargs = mock_ft.search.call_args
# Now expects full search query with KNN part due to dummy vector approach
assert "@user_id:{test_user}" in args[0]
assert "=>[KNN" in args[0]
# Verify the results format
assert len(results) == 1
assert len(results[0]) == 1
assert results[0][0].id == "test_id"
# Check the results
assert len(results) == 1 # One list of results
assert len(results[0]) == 1 # One result in the list
assert results[0][0].id == "test_id"
assert results[0][0].payload["hash"] == "test_hash"
assert results[0][0].payload["data"] == "test_data"
def test_search_error_handling(valkey_db, mock_valkey_client):
"""Test search error handling when query fails."""
# Mock search to fail with an error
mock_ft = mock_valkey_client.ft.return_value
mock_ft.search.side_effect = ResponseError("Invalid filter expression")
# Call search should raise the error
with pytest.raises(ResponseError, match="Invalid filter expression"):
valkey_db.search(
query="test query",
vectors=np.random.rand(1536).tolist(),
limit=5,
filters={"user_id": "test_user"},
)
# Check that search was called once
assert mock_ft.search.call_count == 1
def test_drop_index_error_handling(valkey_db, mock_valkey_client):
"""Test error handling when dropping an index."""
# Reset the mock to clear previous calls
mock_valkey_client.execute_command.reset_mock()
# Test 1: Real error (not "Unknown index name") should raise
mock_valkey_client.execute_command.side_effect = ResponseError("Error dropping index")
with pytest.raises(ResponseError, match="Error dropping index"):
valkey_db._drop_index("test_collection")
# Test 2: "Unknown index name" with default log_level should return False
mock_valkey_client.execute_command.side_effect = ResponseError("Unknown index name")
result = valkey_db._drop_index("test_collection")
assert result is False
# Test 3: "Unknown index name" with silent log_level should return False
mock_valkey_client.execute_command.side_effect = ResponseError("Unknown index name")
result = valkey_db._drop_index("test_collection", log_level="silent")
assert result is False
# Test 4: "Unknown index name" with info log_level should return False
mock_valkey_client.execute_command.side_effect = ResponseError("Unknown index name")
result = valkey_db._drop_index("test_collection", log_level="info")
assert result is False
# Test 5: Successful deletion should return True
mock_valkey_client.execute_command.side_effect = None # Reset to success
result = valkey_db._drop_index("test_collection")
assert result is True
def test_reset(valkey_db, mock_valkey_client):
"""Test resetting an index."""
# Mock delete_col and _create_index
with (
patch.object(valkey_db, "delete_col", return_value=True) as mock_delete_col,
patch.object(valkey_db, "_create_index") as mock_create_index,
):
# Call reset
result = valkey_db.reset()
# Check that delete_col and _create_index were called
mock_delete_col.assert_called_once()
mock_create_index.assert_called_once_with(1536)
# Check the result
assert result is True
def test_build_list_query(valkey_db):
"""Test building a list query with and without filters."""
# Test without filters
query = valkey_db._build_list_query(None)
assert query == "*"
# Test with empty filters
query = valkey_db._build_list_query({})
assert query == "*"
# Test with filters
query = valkey_db._build_list_query({"user_id": "test_user"})
assert query == "@user_id:{test_user}"
# Test with multiple filters
query = valkey_db._build_list_query({"user_id": "test_user", "agent_id": "test_agent"})
assert "@user_id:{test_user}" in query
assert "@agent_id:{test_agent}" in query
def test_process_document_fields(valkey_db):
"""Test processing document fields from hash results."""
# Create a mock result with all fields
result = {
"memory_id": "test_id",
"hash": "test_hash",
"memory": "test_data",
"created_at": "1625097600", # 2021-07-01 00:00:00 UTC
"updated_at": "1625184000", # 2021-07-02 00:00:00 UTC
"user_id": "test_user",
"agent_id": "test_agent",
"metadata": json.dumps({"key": "value"}),
}
# Process the document fields
payload, memory_id = valkey_db._process_document_fields(result, "default_id")
# Check the results
assert memory_id == "test_id"
assert payload["hash"] == "test_hash"
assert payload["data"] == "test_data" # memory renamed to data
assert "created_at" in payload
assert "updated_at" in payload
assert payload["user_id"] == "test_user"
assert payload["agent_id"] == "test_agent"
assert payload["key"] == "value" # From metadata
# Test with missing fields
result = {
# No memory_id
"hash": "test_hash",
# No memory
# No created_at
}
# Process the document fields
payload, memory_id = valkey_db._process_document_fields(result, "default_id")
# Check the results
assert memory_id == "default_id" # Should use default_id
assert payload["hash"] == "test_hash"
assert "data" in payload # Should have default value
assert "created_at" in payload # Should have default value
def test_init_connection_error():
"""Test that initialization handles connection errors."""
# Mock the from_url to raise an exception
with patch("valkey.from_url") as mock_from_url:
mock_from_url.side_effect = Exception("Connection failed")
# Initialize ValkeyDB should raise the exception
with pytest.raises(Exception, match="Connection failed"):
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
)
def test_build_search_query(valkey_db):
"""Test building search queries with different filter scenarios."""
# Test with no filters
knn_part = "[KNN 5 @embedding $vec_param AS vector_score]"
query = valkey_db._build_search_query(knn_part)
assert query == f"*=>{knn_part}"
# Test with empty filters
query = valkey_db._build_search_query(knn_part, {})
assert query == f"*=>{knn_part}"
# Test with None values in filters
query = valkey_db._build_search_query(knn_part, {"user_id": None})
assert query == f"*=>{knn_part}"
# Test with single filter
query = valkey_db._build_search_query(knn_part, {"user_id": "test_user"})
assert query == f"@user_id:{{test_user}} =>{knn_part}"
# Test with multiple filters
query = valkey_db._build_search_query(knn_part, {"user_id": "test_user", "agent_id": "test_agent"})
assert "@user_id:{test_user}" in query
assert "@agent_id:{test_agent}" in query
assert f"=>{knn_part}" in query
def test_get_error_handling(valkey_db, mock_valkey_client):
"""Test error handling in the get method."""
# Mock hgetall to raise an exception
mock_valkey_client.hgetall.side_effect = Exception("Unexpected error")
# Call get should raise the exception
with pytest.raises(Exception, match="Unexpected error"):
valkey_db.get("test_id")
def test_list_error_handling(valkey_db, mock_valkey_client):
"""Test error handling in the list method."""
# Mock search to raise an exception
mock_ft = mock_valkey_client.ft.return_value
mock_ft.search.side_effect = Exception("Unexpected error")
# Call list should return empty result on error
results = valkey_db.list(filters={"user_id": "test_user"})
# Check that the result is an empty list
assert results == [[]]
def test_create_index_other_error():
"""Test that initialization handles other errors during index creation."""
# Mock the execute_command to raise a different error
with patch("valkey.from_url") as mock_client:
mock_client.return_value.execute_command.side_effect = ResponseError("Some other error")
mock_client.return_value.ft = MagicMock()
mock_client.return_value.ft.return_value.info.side_effect = ResponseError("not found")
# Initialize ValkeyDB should raise the exception
with pytest.raises(ResponseError, match="Some other error"):
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
)
def test_create_col_error(valkey_db, mock_valkey_client):
"""Test error handling in create_col method."""
# Mock execute_command to raise an exception
mock_valkey_client.execute_command.side_effect = Exception("Failed to create index")
# Call create_col should raise the exception
with pytest.raises(Exception, match="Failed to create index"):
valkey_db.create_col(name="new_collection", vector_size=768)
def test_list_cols_error(valkey_db, mock_valkey_client):
"""Test error handling in list_cols method."""
# Reset the mock to clear previous calls
mock_valkey_client.execute_command.reset_mock()
# Mock execute_command to raise an exception
mock_valkey_client.execute_command.side_effect = Exception("Failed to list indices")
# Call list_cols should raise the exception
with pytest.raises(Exception, match="Failed to list indices"):
valkey_db.list_cols()
def test_col_info_error(valkey_db, mock_valkey_client):
"""Test error handling in col_info method."""
# Mock ft().info() to raise an exception
mock_ft = mock_valkey_client.ft.return_value
mock_ft.info.side_effect = Exception("Failed to get index info")
# Call col_info should raise the exception
with pytest.raises(Exception, match="Failed to get index info"):
valkey_db.col_info()
# Additional tests to improve coverage
def test_invalid_index_type():
"""Test validation of invalid index type."""
with pytest.raises(ValueError, match="Invalid index_type: invalid. Must be 'hnsw' or 'flat'"):
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
index_type="invalid",
)
def test_index_existence_check_error(mock_valkey_client):
"""Test error handling when checking index existence."""
# Mock ft().info() to raise a ResponseError that's not "not found"
mock_ft = MagicMock()
mock_ft.info.side_effect = ResponseError("Some other error")
mock_valkey_client.ft.return_value = mock_ft
with patch("valkey.from_url", return_value=mock_valkey_client):
with pytest.raises(ResponseError):
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
)
def test_flat_index_creation(mock_valkey_client):
"""Test creation of FLAT index type."""
mock_ft = MagicMock()
# Mock the info method to raise ResponseError with "not found" to trigger index creation
mock_ft.info.side_effect = ResponseError("Index not found")
mock_valkey_client.ft.return_value = mock_ft
with patch("valkey.from_url", return_value=mock_valkey_client):
# Mock the execute_command to avoid the actual exception
mock_valkey_client.execute_command.return_value = None
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
index_type="flat",
)
# Verify that execute_command was called (index creation)
assert mock_valkey_client.execute_command.called
def test_index_creation_error(mock_valkey_client):
"""Test error handling during index creation."""
mock_ft = MagicMock()
mock_ft.info.side_effect = ResponseError("Unknown index name") # Index doesn't exist
mock_valkey_client.ft.return_value = mock_ft
mock_valkey_client.execute_command.side_effect = Exception("Failed to create index")
with patch("valkey.from_url", return_value=mock_valkey_client):
with pytest.raises(Exception, match="Failed to create index"):
ValkeyDB(
valkey_url="valkey://localhost:6379",
collection_name="test_collection",
embedding_model_dims=1536,
)
def test_insert_missing_required_field(valkey_db, mock_valkey_client):
"""Test error handling when inserting vector with missing required field."""
# Mock hset to raise KeyError (missing required field)
mock_valkey_client.hset.side_effect = KeyError("missing_field")
# This should not raise an exception but should log the error
valkey_db.insert(vectors=[np.random.rand(1536).tolist()], payloads=[{"memory": "test"}], ids=["test_id"])
def test_insert_general_error(valkey_db, mock_valkey_client):
"""Test error handling for general exceptions during insert."""
# Mock hset to raise a general exception
mock_valkey_client.hset.side_effect = Exception("Database error")
with pytest.raises(Exception, match="Database error"):
valkey_db.insert(vectors=[np.random.rand(1536).tolist()], payloads=[{"memory": "test"}], ids=["test_id"])
def test_search_with_invalid_metadata(valkey_db, mock_valkey_client):
"""Test search with invalid JSON metadata."""
# Mock search results with invalid JSON metadata
mock_doc = MagicMock()
mock_doc.memory_id = "test_id"
mock_doc.hash = "test_hash"
mock_doc.memory = "test_data"
mock_doc.created_at = str(int(datetime.now().timestamp()))
mock_doc.metadata = "invalid_json" # Invalid JSON
mock_doc.vector_score = "0.5"
mock_result = MagicMock()
mock_result.docs = [mock_doc]
mock_valkey_client.ft.return_value.search.return_value = mock_result
# Should handle invalid JSON gracefully
results = valkey_db.search(query="test query", vectors=np.random.rand(1536).tolist(), limit=5)
assert len(results) == 1
def test_search_with_hnsw_ef_runtime(valkey_db, mock_valkey_client):
"""Test search with HNSW ef_runtime parameter."""
valkey_db.index_type = "hnsw"
valkey_db.hnsw_ef_runtime = 20
mock_result = MagicMock()
mock_result.docs = []
mock_valkey_client.ft.return_value.search.return_value = mock_result
valkey_db.search(query="test query", vectors=np.random.rand(1536).tolist(), limit=5)
# Verify the search was called
assert mock_valkey_client.ft.return_value.search.called
def test_delete_error(valkey_db, mock_valkey_client):
"""Test error handling during vector deletion."""
mock_valkey_client.delete.side_effect = Exception("Delete failed")
with pytest.raises(Exception, match="Delete failed"):
valkey_db.delete("test_id")
def test_update_missing_required_field(valkey_db, mock_valkey_client):
"""Test error handling when updating vector with missing required field."""
mock_valkey_client.hset.side_effect = KeyError("missing_field")
# This should not raise an exception but should log the error
valkey_db.update(vector_id="test_id", vector=np.random.rand(1536).tolist(), payload={"memory": "updated"})
def test_update_general_error(valkey_db, mock_valkey_client):
"""Test error handling for general exceptions during update."""
mock_valkey_client.hset.side_effect = Exception("Update failed")
with pytest.raises(Exception, match="Update failed"):
valkey_db.update(vector_id="test_id", vector=np.random.rand(1536).tolist(), payload={"memory": "updated"})
def test_get_with_binary_data_and_unicode_error(valkey_db, mock_valkey_client):
"""Test get method with binary data that fails UTF-8 decoding."""
# Mock result with binary data that can't be decoded
mock_result = {
"memory_id": "test_id",
"hash": b"\xff\xfe", # Invalid UTF-8 bytes
"memory": "test_memory",
"created_at": "1234567890",
"updated_at": "invalid_timestamp",
"metadata": "{}",
"embedding": b"binary_embedding_data",
}
mock_valkey_client.hgetall.return_value = mock_result
result = valkey_db.get("test_id")
# Should handle binary data gracefully
assert result.id == "test_id"
assert result.payload["data"] == "test_memory"
def test_get_with_invalid_timestamps(valkey_db, mock_valkey_client):
"""Test get method with invalid timestamp values."""
mock_result = {
"memory_id": "test_id",
"hash": "test_hash",
"memory": "test_memory",
"created_at": "invalid_timestamp",
"updated_at": "also_invalid",
"metadata": "{}",
"embedding": b"binary_data",
}
mock_valkey_client.hgetall.return_value = mock_result
result = valkey_db.get("test_id")
# Should handle invalid timestamps gracefully
assert result.id == "test_id"
assert "created_at" in result.payload
def test_get_with_invalid_metadata_json(valkey_db, mock_valkey_client):
"""Test get method with invalid JSON metadata."""
mock_result = {
"memory_id": "test_id",
"hash": "test_hash",
"memory": "test_memory",
"created_at": "1234567890",
"updated_at": "1234567890",
"metadata": "invalid_json{", # Invalid JSON
"embedding": b"binary_data",
}
mock_valkey_client.hgetall.return_value = mock_result
result = valkey_db.get("test_id")
# Should handle invalid JSON gracefully
assert result.id == "test_id"
def test_list_with_missing_fields_and_defaults(valkey_db, mock_valkey_client):
"""Test list method with documents missing various fields."""
# Mock search results with missing fields but valid timestamps
mock_doc1 = MagicMock()
mock_doc1.memory_id = "fallback_id"
mock_doc1.hash = "test_hash" # Provide valid hash
mock_doc1.memory = "test_memory" # Provide valid memory
mock_doc1.created_at = str(int(datetime.now().timestamp())) # Valid timestamp
mock_doc1.updated_at = str(int(datetime.now().timestamp())) # Valid timestamp
mock_doc1.metadata = json.dumps({"key": "value"}) # Valid JSON
mock_doc1.vector_score = "0.5"
mock_result = MagicMock()
mock_result.docs = [mock_doc1]
mock_valkey_client.ft.return_value.search.return_value = mock_result
results = valkey_db.list()
# Should handle the search-based list approach
assert len(results) == 1
inner_results = results[0]
assert len(inner_results) == 1
result = inner_results[0]
assert result.id == "fallback_id"
assert "hash" in result.payload
assert "data" in result.payload # memory is renamed to data
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_vertex_ai_vector_search.py | tests/vector_stores/test_vertex_ai_vector_search.py | from unittest.mock import Mock, patch
import pytest
from google.api_core import exceptions
from google.cloud.aiplatform.matching_engine.matching_engine_index_endpoint import (
Namespace,
)
from mem0.configs.vector_stores.vertex_ai_vector_search import (
GoogleMatchingEngineConfig,
)
from mem0.vector_stores.vertex_ai_vector_search import GoogleMatchingEngine
@pytest.fixture
def mock_vertex_ai():
with (
patch("google.cloud.aiplatform.MatchingEngineIndex") as mock_index,
patch("google.cloud.aiplatform.MatchingEngineIndexEndpoint") as mock_endpoint,
patch("google.cloud.aiplatform.init") as mock_init,
):
mock_index_instance = Mock()
mock_endpoint_instance = Mock()
yield {
"index": mock_index_instance,
"endpoint": mock_endpoint_instance,
"init": mock_init,
"index_class": mock_index,
"endpoint_class": mock_endpoint,
}
@pytest.fixture
def config():
return GoogleMatchingEngineConfig(
project_id="test-project",
project_number="123456789",
region="us-central1",
endpoint_id="test-endpoint",
index_id="test-index",
deployment_index_id="test-deployment",
collection_name="test-collection",
vector_search_api_endpoint="test.vertexai.goog",
)
@pytest.fixture
def vector_store(config, mock_vertex_ai):
mock_vertex_ai["index_class"].return_value = mock_vertex_ai["index"]
mock_vertex_ai["endpoint_class"].return_value = mock_vertex_ai["endpoint"]
return GoogleMatchingEngine(**config.model_dump())
def test_initialization(vector_store, mock_vertex_ai, config):
"""Test proper initialization of GoogleMatchingEngine"""
mock_vertex_ai["init"].assert_called_once_with(project=config.project_id, location=config.region)
expected_index_path = f"projects/{config.project_number}/locations/{config.region}/indexes/{config.index_id}"
mock_vertex_ai["index_class"].assert_called_once_with(index_name=expected_index_path)
def test_insert_vectors(vector_store, mock_vertex_ai):
"""Test inserting vectors with payloads"""
vectors = [[0.1, 0.2, 0.3]]
payloads = [{"name": "test", "user_id": "user1"}]
ids = ["test-id"]
vector_store.insert(vectors=vectors, payloads=payloads, ids=ids)
mock_vertex_ai["index"].upsert_datapoints.assert_called_once()
call_args = mock_vertex_ai["index"].upsert_datapoints.call_args[1]
assert len(call_args["datapoints"]) == 1
datapoint_str = str(call_args["datapoints"][0])
assert "test-id" in datapoint_str
assert "0.1" in datapoint_str and "0.2" in datapoint_str and "0.3" in datapoint_str
def test_search_vectors(vector_store, mock_vertex_ai):
"""Test searching vectors with filters"""
vectors = [[0.1, 0.2, 0.3]]
filters = {"user_id": "test_user"}
mock_datapoint = Mock()
mock_datapoint.datapoint_id = "test-id"
mock_datapoint.feature_vector = vectors
mock_restrict = Mock()
mock_restrict.namespace = "user_id"
mock_restrict.allow_list = ["test_user"]
mock_restrict.name = "user_id"
mock_restrict.allow_tokens = ["test_user"]
mock_datapoint.restricts = [mock_restrict]
mock_neighbor = Mock()
mock_neighbor.id = "test-id"
mock_neighbor.distance = 0.1
mock_neighbor.datapoint = mock_datapoint
mock_neighbor.restricts = [mock_restrict]
mock_vertex_ai["endpoint"].find_neighbors.return_value = [[mock_neighbor]]
results = vector_store.search(query="", vectors=vectors, filters=filters, limit=1)
mock_vertex_ai["endpoint"].find_neighbors.assert_called_once_with(
deployed_index_id=vector_store.deployment_index_id,
queries=[vectors],
num_neighbors=1,
filter=[Namespace("user_id", ["test_user"], [])],
return_full_datapoint=True,
)
assert len(results) == 1
assert results[0].id == "test-id"
assert results[0].score == 0.1
assert results[0].payload == {"user_id": "test_user"}
def test_delete(vector_store, mock_vertex_ai):
"""Test deleting vectors"""
vector_id = "test-id"
remove_mock = Mock()
with patch.object(GoogleMatchingEngine, "delete", wraps=vector_store.delete) as delete_spy:
with patch.object(vector_store.index, "remove_datapoints", remove_mock):
vector_store.delete(ids=[vector_id])
delete_spy.assert_called_once_with(ids=[vector_id])
remove_mock.assert_called_once_with(datapoint_ids=[vector_id])
def test_error_handling(vector_store, mock_vertex_ai):
"""Test error handling during operations"""
mock_vertex_ai["index"].upsert_datapoints.side_effect = exceptions.InvalidArgument("Invalid request")
with pytest.raises(Exception) as exc_info:
vector_store.insert(vectors=[[0.1, 0.2, 0.3]], payloads=[{"name": "test"}], ids=["test-id"])
assert isinstance(exc_info.value, exceptions.InvalidArgument)
assert "Invalid request" in str(exc_info.value)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_mongodb.py | tests/vector_stores/test_mongodb.py | from unittest.mock import MagicMock, patch
import pytest
from mem0.vector_stores.mongodb import MongoDB
@pytest.fixture
@patch("mem0.vector_stores.mongodb.MongoClient")
def mongo_vector_fixture(mock_mongo_client):
mock_client = mock_mongo_client.return_value
mock_db = mock_client["test_db"]
mock_collection = mock_db["test_collection"]
mock_collection.list_search_indexes.return_value = []
mock_collection.aggregate.return_value = []
mock_collection.find_one.return_value = None
# Create a proper mock cursor
mock_cursor = MagicMock()
mock_cursor.limit.return_value = mock_cursor
mock_collection.find.return_value = mock_cursor
mock_db.list_collection_names.return_value = []
mongo_vector = MongoDB(
db_name="test_db",
collection_name="test_collection",
embedding_model_dims=1536,
mongo_uri="mongodb://username:password@localhost:27017",
)
return mongo_vector, mock_collection, mock_db
def test_initalize_create_col(mongo_vector_fixture):
mongo_vector, mock_collection, mock_db = mongo_vector_fixture
assert mongo_vector.collection_name == "test_collection"
assert mongo_vector.embedding_model_dims == 1536
assert mongo_vector.db_name == "test_db"
# Verify create_col being called
mock_db.list_collection_names.assert_called_once()
mock_collection.insert_one.assert_called_once_with({"_id": 0, "placeholder": True})
mock_collection.delete_one.assert_called_once_with({"_id": 0})
assert mongo_vector.index_name == "test_collection_vector_index"
mock_collection.list_search_indexes.assert_called_once_with(name="test_collection_vector_index")
mock_collection.create_search_index.assert_called_once()
args, _ = mock_collection.create_search_index.call_args
search_index_model = args[0].document
assert search_index_model == {
"name": "test_collection_vector_index",
"definition": {
"mappings": {
"dynamic": False,
"fields": {
"embedding": {
"type": "knnVector",
"dimensions": 1536,
"similarity": "cosine",
}
},
}
},
}
assert mongo_vector.collection == mock_collection
def test_insert(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
vectors = [[0.1] * 1536, [0.2] * 1536]
payloads = [{"name": "vector1"}, {"name": "vector2"}]
ids = ["id1", "id2"]
mongo_vector.insert(vectors, payloads, ids)
expected_records = [
({"_id": ids[0], "embedding": vectors[0], "payload": payloads[0]}),
({"_id": ids[1], "embedding": vectors[1], "payload": payloads[1]}),
]
mock_collection.insert_many.assert_called_once_with(expected_records)
def test_search(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
query_vector = [0.1] * 1536
mock_collection.aggregate.return_value = [
{"_id": "id1", "score": 0.9, "payload": {"key": "value1"}},
{"_id": "id2", "score": 0.8, "payload": {"key": "value2"}},
]
mock_collection.list_search_indexes.return_value = ["test_collection_vector_index"]
results = mongo_vector.search("query_str", query_vector, limit=2)
mock_collection.list_search_indexes.assert_called_with(name="test_collection_vector_index")
mock_collection.aggregate.assert_called_once_with(
[
{
"$vectorSearch": {
"index": "test_collection_vector_index",
"limit": 2,
"numCandidates": 2,
"queryVector": query_vector,
"path": "embedding",
},
},
{"$set": {"score": {"$meta": "vectorSearchScore"}}},
{"$project": {"embedding": 0}},
]
)
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].score == 0.9
assert results[0].payload == {"key": "value1"}
def test_search_with_filters(mongo_vector_fixture):
"""Test search with agent_id and run_id filters."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
query_vector = [0.1] * 1536
mock_collection.aggregate.return_value = [
{"_id": "id1", "score": 0.9, "payload": {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}},
]
mock_collection.list_search_indexes.return_value = ["test_collection_vector_index"]
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = mongo_vector.search("query_str", query_vector, limit=2, filters=filters)
# Verify that the aggregation pipeline includes the filter stage
mock_collection.aggregate.assert_called_once()
pipeline = mock_collection.aggregate.call_args[0][0]
# Check that the pipeline has the expected stages
assert len(pipeline) == 4 # vectorSearch, match, set, project
# Check that the match stage is present with the correct filters
match_stage = pipeline[1]
assert "$match" in match_stage
assert match_stage["$match"]["$and"] == [
{"payload.user_id": "alice"},
{"payload.agent_id": "agent1"},
{"payload.run_id": "run1"}
]
assert len(results) == 1
assert results[0].payload["user_id"] == "alice"
assert results[0].payload["agent_id"] == "agent1"
assert results[0].payload["run_id"] == "run1"
def test_search_with_single_filter(mongo_vector_fixture):
"""Test search with single filter."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
query_vector = [0.1] * 1536
mock_collection.aggregate.return_value = [
{"_id": "id1", "score": 0.9, "payload": {"user_id": "alice"}},
]
mock_collection.list_search_indexes.return_value = ["test_collection_vector_index"]
filters = {"user_id": "alice"}
results = mongo_vector.search("query_str", query_vector, limit=2, filters=filters)
# Verify that the aggregation pipeline includes the filter stage
mock_collection.aggregate.assert_called_once()
pipeline = mock_collection.aggregate.call_args[0][0]
# Check that the match stage is present with the correct filter
match_stage = pipeline[1]
assert "$match" in match_stage
assert match_stage["$match"]["$and"] == [{"payload.user_id": "alice"}]
assert len(results) == 1
assert results[0].payload["user_id"] == "alice"
def test_search_with_no_filters(mongo_vector_fixture):
"""Test search with no filters."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
query_vector = [0.1] * 1536
mock_collection.aggregate.return_value = [
{"_id": "id1", "score": 0.9, "payload": {"key": "value1"}},
]
mock_collection.list_search_indexes.return_value = ["test_collection_vector_index"]
results = mongo_vector.search("query_str", query_vector, limit=2, filters=None)
# Verify that the aggregation pipeline does not include the filter stage
mock_collection.aggregate.assert_called_once()
pipeline = mock_collection.aggregate.call_args[0][0]
# Check that the pipeline has only the expected stages (no match stage)
assert len(pipeline) == 3 # vectorSearch, set, project
assert len(results) == 1
def test_delete(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
vector_id = "id1"
mock_collection.delete_one.return_value = MagicMock(deleted_count=1)
# Reset the mock to clear calls from fixture setup
mock_collection.delete_one.reset_mock()
mongo_vector.delete(vector_id=vector_id)
mock_collection.delete_one.assert_called_once_with({"_id": vector_id})
def test_update(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
vector_id = "id1"
updated_vector = [0.3] * 1536
updated_payload = {"name": "updated_vector"}
mock_collection.update_one.return_value = MagicMock(matched_count=1)
mongo_vector.update(vector_id=vector_id, vector=updated_vector, payload=updated_payload)
mock_collection.update_one.assert_called_once_with(
{"_id": vector_id}, {"$set": {"embedding": updated_vector, "payload": updated_payload}}
)
def test_get(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
vector_id = "id1"
mock_collection.find_one.return_value = {"_id": vector_id, "payload": {"key": "value"}}
result = mongo_vector.get(vector_id=vector_id)
mock_collection.find_one.assert_called_once_with({"_id": vector_id})
assert result.id == vector_id
assert result.payload == {"key": "value"}
def test_list_cols(mongo_vector_fixture):
mongo_vector, _, mock_db = mongo_vector_fixture
mock_db.list_collection_names.return_value = ["collection1", "collection2"]
# Reset the mock to clear calls from fixture setup
mock_db.list_collection_names.reset_mock()
result = mongo_vector.list_cols()
mock_db.list_collection_names.assert_called_once()
assert result == ["collection1", "collection2"]
def test_delete_col(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
mongo_vector.delete_col()
mock_collection.drop.assert_called_once()
def test_col_info(mongo_vector_fixture):
mongo_vector, mock_collection, mock_db = mongo_vector_fixture
mock_db.command.return_value = {"count": 10, "size": 1024}
result = mongo_vector.col_info()
mock_db.command.assert_called_once_with("collstats", "test_collection")
assert result["name"] == "test_collection"
assert result["count"] == 10
assert result["size"] == 1024
def test_list(mongo_vector_fixture):
mongo_vector, mock_collection, _ = mongo_vector_fixture
# Mock the cursor to return the expected data
mock_cursor = mock_collection.find.return_value
mock_cursor.__iter__.return_value = [
{"_id": "id1", "payload": {"key": "value1"}},
{"_id": "id2", "payload": {"key": "value2"}},
]
results = mongo_vector.list(limit=2)
mock_collection.find.assert_called_once_with({})
mock_cursor.limit.assert_called_once_with(2)
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].payload == {"key": "value1"}
def test_list_with_filters(mongo_vector_fixture):
"""Test list with agent_id and run_id filters."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
# Mock the cursor to return the expected data
mock_cursor = mock_collection.find.return_value
mock_cursor.__iter__.return_value = [
{"_id": "id1", "payload": {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}},
]
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = mongo_vector.list(filters=filters, limit=2)
# Verify that the find method was called with the correct query
expected_query = {
"$and": [
{"payload.user_id": "alice"},
{"payload.agent_id": "agent1"},
{"payload.run_id": "run1"}
]
}
mock_collection.find.assert_called_once_with(expected_query)
mock_cursor.limit.assert_called_once_with(2)
assert len(results) == 1
assert results[0].payload["user_id"] == "alice"
assert results[0].payload["agent_id"] == "agent1"
assert results[0].payload["run_id"] == "run1"
def test_list_with_single_filter(mongo_vector_fixture):
"""Test list with single filter."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
# Mock the cursor to return the expected data
mock_cursor = mock_collection.find.return_value
mock_cursor.__iter__.return_value = [
{"_id": "id1", "payload": {"user_id": "alice"}},
]
filters = {"user_id": "alice"}
results = mongo_vector.list(filters=filters, limit=2)
# Verify that the find method was called with the correct query
expected_query = {
"$and": [
{"payload.user_id": "alice"}
]
}
mock_collection.find.assert_called_once_with(expected_query)
mock_cursor.limit.assert_called_once_with(2)
assert len(results) == 1
assert results[0].payload["user_id"] == "alice"
def test_list_with_no_filters(mongo_vector_fixture):
"""Test list with no filters."""
mongo_vector, mock_collection, _ = mongo_vector_fixture
# Mock the cursor to return the expected data
mock_cursor = mock_collection.find.return_value
mock_cursor.__iter__.return_value = [
{"_id": "id1", "payload": {"key": "value1"}},
]
results = mongo_vector.list(filters=None, limit=2)
# Verify that the find method was called with empty query
mock_collection.find.assert_called_once_with({})
mock_cursor.limit.assert_called_once_with(2)
assert len(results) == 1
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_supabase.py | tests/vector_stores/test_supabase.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.vector_stores.supabase import IndexMeasure, IndexMethod
from mem0.vector_stores.supabase import Supabase
@pytest.fixture
def mock_vecs_client():
with patch("vecs.create_client") as mock_client:
yield mock_client
@pytest.fixture
def mock_collection():
collection = Mock()
collection.name = "test_collection"
collection.vectors = 100
collection.dimension = 1536
collection.index_method = "hnsw"
collection.distance_metric = "cosine_distance"
collection.describe.return_value = collection
return collection
@pytest.fixture
def supabase_instance(mock_vecs_client, mock_collection):
# Set up the mock client to return our mock collection
mock_vecs_client.return_value.get_or_create_collection.return_value = mock_collection
mock_vecs_client.return_value.list_collections.return_value = ["test_collection"]
instance = Supabase(
connection_string="postgresql://user:password@localhost:5432/test",
collection_name="test_collection",
embedding_model_dims=1536,
index_method=IndexMethod.HNSW,
index_measure=IndexMeasure.COSINE,
)
# Manually set the collection attribute since we're mocking the initialization
instance.collection = mock_collection
return instance
def test_create_col(supabase_instance, mock_vecs_client, mock_collection):
supabase_instance.create_col(1536)
mock_vecs_client.return_value.get_or_create_collection.assert_called_with(name="test_collection", dimension=1536)
mock_collection.create_index.assert_called_with(method="hnsw", measure="cosine_distance")
def test_insert_vectors(supabase_instance, mock_collection):
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"name": "vector1"}, {"name": "vector2"}]
ids = ["id1", "id2"]
supabase_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
expected_records = [("id1", [0.1, 0.2, 0.3], {"name": "vector1"}), ("id2", [0.4, 0.5, 0.6], {"name": "vector2"})]
mock_collection.upsert.assert_called_once_with(expected_records)
def test_search_vectors(supabase_instance, mock_collection):
mock_results = [("id1", 0.9, {"name": "vector1"}), ("id2", 0.8, {"name": "vector2"})]
mock_collection.query.return_value = mock_results
vectors = [[0.1, 0.2, 0.3]]
filters = {"category": "test"}
results = supabase_instance.search(query="", vectors=vectors, limit=2, filters=filters)
mock_collection.query.assert_called_once_with(
data=vectors, limit=2, filters={"category": {"$eq": "test"}}, include_metadata=True, include_value=True
)
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].score == 0.9
assert results[0].payload == {"name": "vector1"}
def test_delete_vector(supabase_instance, mock_collection):
vector_id = "id1"
supabase_instance.delete(vector_id=vector_id)
mock_collection.delete.assert_called_once_with([("id1",)])
def test_update_vector(supabase_instance, mock_collection):
vector_id = "id1"
new_vector = [0.7, 0.8, 0.9]
new_payload = {"name": "updated_vector"}
supabase_instance.update(vector_id=vector_id, vector=new_vector, payload=new_payload)
mock_collection.upsert.assert_called_once_with([("id1", new_vector, new_payload)])
def test_get_vector(supabase_instance, mock_collection):
# Create a Mock object to represent the record
mock_record = Mock()
mock_record.id = "id1"
mock_record.metadata = {"name": "vector1"}
mock_record.values = [0.1, 0.2, 0.3]
# Set the fetch return value to a list containing our mock record
mock_collection.fetch.return_value = [mock_record]
result = supabase_instance.get(vector_id="id1")
mock_collection.fetch.assert_called_once_with([("id1",)])
assert result.id == "id1"
assert result.payload == {"name": "vector1"}
def test_list_vectors(supabase_instance, mock_collection):
mock_query_results = [("id1", 0.9, {}), ("id2", 0.8, {})]
mock_fetch_results = [("id1", [0.1, 0.2, 0.3], {"name": "vector1"}), ("id2", [0.4, 0.5, 0.6], {"name": "vector2"})]
mock_collection.query.return_value = mock_query_results
mock_collection.fetch.return_value = mock_fetch_results
results = supabase_instance.list(limit=2, filters={"category": "test"})
assert len(results[0]) == 2
assert results[0][0].id == "id1"
assert results[0][0].payload == {"name": "vector1"}
assert results[0][1].id == "id2"
assert results[0][1].payload == {"name": "vector2"}
def test_col_info(supabase_instance, mock_collection):
info = supabase_instance.col_info()
assert info == {
"name": "test_collection",
"count": 100,
"dimension": 1536,
"index": {"method": "hnsw", "metric": "cosine_distance"},
}
def test_preprocess_filters(supabase_instance):
# Test single filter
single_filter = {"category": "test"}
assert supabase_instance._preprocess_filters(single_filter) == {"category": {"$eq": "test"}}
# Test multiple filters
multi_filter = {"category": "test", "type": "document"}
assert supabase_instance._preprocess_filters(multi_filter) == {
"$and": [{"category": {"$eq": "test"}}, {"type": {"$eq": "document"}}]
}
# Test None filters
assert supabase_instance._preprocess_filters(None) is None
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_langchain_vector_store.py | tests/vector_stores/test_langchain_vector_store.py | from unittest.mock import Mock, patch
import pytest
from langchain_community.vectorstores import VectorStore
from mem0.vector_stores.langchain import Langchain
@pytest.fixture
def mock_langchain_client():
with patch("langchain_community.vectorstores.VectorStore") as mock_client:
yield mock_client
@pytest.fixture
def langchain_instance(mock_langchain_client):
mock_client = Mock(spec=VectorStore)
return Langchain(client=mock_client, collection_name="test_collection")
def test_insert_vectors(langchain_instance):
# Test data
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"data": "text1", "name": "vector1"}, {"data": "text2", "name": "vector2"}]
ids = ["id1", "id2"]
# Test with add_embeddings method
langchain_instance.client.add_embeddings = Mock()
langchain_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
langchain_instance.client.add_embeddings.assert_called_once_with(embeddings=vectors, metadatas=payloads, ids=ids)
# Test with add_texts method
delattr(langchain_instance.client, "add_embeddings") # Remove attribute completely
langchain_instance.client.add_texts = Mock()
langchain_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
langchain_instance.client.add_texts.assert_called_once_with(texts=["text1", "text2"], metadatas=payloads, ids=ids)
# Test with empty payloads
langchain_instance.client.add_texts.reset_mock()
langchain_instance.insert(vectors=vectors, payloads=None, ids=ids)
langchain_instance.client.add_texts.assert_called_once_with(texts=["", ""], metadatas=None, ids=ids)
def test_search_vectors(langchain_instance):
# Mock search results
mock_docs = [Mock(metadata={"name": "vector1"}, id="id1"), Mock(metadata={"name": "vector2"}, id="id2")]
langchain_instance.client.similarity_search_by_vector.return_value = mock_docs
# Test search without filters
vectors = [[0.1, 0.2, 0.3]]
results = langchain_instance.search(query="", vectors=vectors, limit=2)
langchain_instance.client.similarity_search_by_vector.assert_called_once_with(embedding=vectors, k=2)
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].payload == {"name": "vector1"}
assert results[1].id == "id2"
assert results[1].payload == {"name": "vector2"}
# Test search with filters
filters = {"name": "vector1"}
langchain_instance.search(query="", vectors=vectors, limit=2, filters=filters)
langchain_instance.client.similarity_search_by_vector.assert_called_with(embedding=vectors, k=2, filter=filters)
def test_search_vectors_with_agent_id_run_id_filters(langchain_instance):
"""Test search with agent_id and run_id filters."""
# Mock search results
mock_docs = [
Mock(metadata={"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}, id="id1"),
Mock(metadata={"user_id": "bob", "agent_id": "agent2", "run_id": "run2"}, id="id2")
]
langchain_instance.client.similarity_search_by_vector.return_value = mock_docs
vectors = [[0.1, 0.2, 0.3]]
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = langchain_instance.search(query="", vectors=vectors, limit=2, filters=filters)
# Verify that filters were passed to the underlying vector store
langchain_instance.client.similarity_search_by_vector.assert_called_once_with(
embedding=vectors, k=2, filter=filters
)
assert len(results) == 2
assert results[0].payload["user_id"] == "alice"
assert results[0].payload["agent_id"] == "agent1"
assert results[0].payload["run_id"] == "run1"
def test_search_vectors_with_single_filter(langchain_instance):
"""Test search with single filter."""
# Mock search results
mock_docs = [Mock(metadata={"user_id": "alice"}, id="id1")]
langchain_instance.client.similarity_search_by_vector.return_value = mock_docs
vectors = [[0.1, 0.2, 0.3]]
filters = {"user_id": "alice"}
results = langchain_instance.search(query="", vectors=vectors, limit=2, filters=filters)
# Verify that filters were passed to the underlying vector store
langchain_instance.client.similarity_search_by_vector.assert_called_once_with(
embedding=vectors, k=2, filter=filters
)
assert len(results) == 1
assert results[0].payload["user_id"] == "alice"
def test_search_vectors_with_no_filters(langchain_instance):
"""Test search with no filters."""
# Mock search results
mock_docs = [Mock(metadata={"name": "vector1"}, id="id1")]
langchain_instance.client.similarity_search_by_vector.return_value = mock_docs
vectors = [[0.1, 0.2, 0.3]]
results = langchain_instance.search(query="", vectors=vectors, limit=2, filters=None)
# Verify that no filters were passed to the underlying vector store
langchain_instance.client.similarity_search_by_vector.assert_called_once_with(
embedding=vectors, k=2
)
assert len(results) == 1
def test_get_vector(langchain_instance):
# Mock get result
mock_doc = Mock(metadata={"name": "vector1"}, id="id1")
langchain_instance.client.get_by_ids.return_value = [mock_doc]
# Test get existing vector
result = langchain_instance.get("id1")
langchain_instance.client.get_by_ids.assert_called_once_with(["id1"])
assert result is not None
assert result.id == "id1"
assert result.payload == {"name": "vector1"}
# Test get non-existent vector
langchain_instance.client.get_by_ids.return_value = []
result = langchain_instance.get("non_existent_id")
assert result is None
def test_list_with_filters(langchain_instance):
"""Test list with agent_id and run_id filters."""
# Mock the _collection.get method
mock_collection = Mock()
mock_collection.get.return_value = {
"ids": [["id1"]],
"metadatas": [[{"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}]],
"documents": [["test document"]]
}
langchain_instance.client._collection = mock_collection
filters = {"user_id": "alice", "agent_id": "agent1", "run_id": "run1"}
results = langchain_instance.list(filters=filters, limit=10)
# Verify that the collection.get method was called with the correct filters
mock_collection.get.assert_called_once_with(where=filters, limit=10)
# Verify the results
assert len(results) == 1
assert len(results[0]) == 1
assert results[0][0].payload["user_id"] == "alice"
assert results[0][0].payload["agent_id"] == "agent1"
assert results[0][0].payload["run_id"] == "run1"
def test_list_with_single_filter(langchain_instance):
"""Test list with single filter."""
# Mock the _collection.get method
mock_collection = Mock()
mock_collection.get.return_value = {
"ids": [["id1"]],
"metadatas": [[{"user_id": "alice"}]],
"documents": [["test document"]]
}
langchain_instance.client._collection = mock_collection
filters = {"user_id": "alice"}
results = langchain_instance.list(filters=filters, limit=10)
# Verify that the collection.get method was called with the correct filter
mock_collection.get.assert_called_once_with(where=filters, limit=10)
# Verify the results
assert len(results) == 1
assert len(results[0]) == 1
assert results[0][0].payload["user_id"] == "alice"
def test_list_with_no_filters(langchain_instance):
"""Test list with no filters."""
# Mock the _collection.get method
mock_collection = Mock()
mock_collection.get.return_value = {
"ids": [["id1"]],
"metadatas": [[{"name": "vector1"}]],
"documents": [["test document"]]
}
langchain_instance.client._collection = mock_collection
results = langchain_instance.list(filters=None, limit=10)
# Verify that the collection.get method was called with no filters
mock_collection.get.assert_called_once_with(where=None, limit=10)
# Verify the results
assert len(results) == 1
assert len(results[0]) == 1
assert results[0][0].payload["name"] == "vector1"
def test_list_with_exception(langchain_instance):
"""Test list when an exception occurs."""
# Mock the _collection.get method to raise an exception
mock_collection = Mock()
mock_collection.get.side_effect = Exception("Test exception")
langchain_instance.client._collection = mock_collection
results = langchain_instance.list(filters={"user_id": "alice"}, limit=10)
# Verify that an empty list is returned when an exception occurs
assert results == []
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_upstash_vector.py | tests/vector_stores/test_upstash_vector.py | from dataclasses import dataclass
from typing import Dict, List, Optional
from unittest.mock import MagicMock, call, patch
import pytest
from mem0.vector_stores.upstash_vector import UpstashVector
@dataclass
class QueryResult:
id: str
score: Optional[float]
vector: Optional[List[float]] = None
metadata: Optional[Dict] = None
data: Optional[str] = None
@pytest.fixture
def mock_index():
with patch("upstash_vector.Index") as mock_index:
yield mock_index
@pytest.fixture
def upstash_instance(mock_index):
return UpstashVector(client=mock_index.return_value, collection_name="ns")
@pytest.fixture
def upstash_instance_with_embeddings(mock_index):
return UpstashVector(client=mock_index.return_value, collection_name="ns", enable_embeddings=True)
def test_insert_vectors(upstash_instance, mock_index):
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"name": "vector1"}, {"name": "vector2"}]
ids = ["id1", "id2"]
upstash_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
upstash_instance.client.upsert.assert_called_once_with(
vectors=[
{"id": "id1", "vector": [0.1, 0.2, 0.3], "metadata": {"name": "vector1"}},
{"id": "id2", "vector": [0.4, 0.5, 0.6], "metadata": {"name": "vector2"}},
],
namespace="ns",
)
def test_search_vectors(upstash_instance, mock_index):
mock_result = [
QueryResult(id="id1", score=0.1, vector=None, metadata={"name": "vector1"}, data=None),
QueryResult(id="id2", score=0.2, vector=None, metadata={"name": "vector2"}, data=None),
]
upstash_instance.client.query_many.return_value = [mock_result]
vectors = [[0.1, 0.2, 0.3]]
results = upstash_instance.search(
query="hello world",
vectors=vectors,
limit=2,
filters={"age": 30, "name": "John"},
)
upstash_instance.client.query_many.assert_called_once_with(
queries=[
{
"vector": vectors[0],
"top_k": 2,
"namespace": "ns",
"include_metadata": True,
"filter": 'age = 30 AND name = "John"',
}
]
)
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].score == 0.1
assert results[0].payload == {"name": "vector1"}
def test_delete_vector(upstash_instance):
vector_id = "id1"
upstash_instance.delete(vector_id=vector_id)
upstash_instance.client.delete.assert_called_once_with(ids=[vector_id], namespace="ns")
def test_update_vector(upstash_instance):
vector_id = "id1"
new_vector = [0.7, 0.8, 0.9]
new_payload = {"name": "updated_vector"}
upstash_instance.update(vector_id=vector_id, vector=new_vector, payload=new_payload)
upstash_instance.client.update.assert_called_once_with(
id="id1",
vector=new_vector,
data=None,
metadata={"name": "updated_vector"},
namespace="ns",
)
def test_get_vector(upstash_instance):
mock_result = [QueryResult(id="id1", score=None, vector=None, metadata={"name": "vector1"}, data=None)]
upstash_instance.client.fetch.return_value = mock_result
result = upstash_instance.get(vector_id="id1")
upstash_instance.client.fetch.assert_called_once_with(ids=["id1"], namespace="ns", include_metadata=True)
assert result.id == "id1"
assert result.payload == {"name": "vector1"}
def test_list_vectors(upstash_instance):
mock_result = [
QueryResult(id="id1", score=None, vector=None, metadata={"name": "vector1"}, data=None),
QueryResult(id="id2", score=None, vector=None, metadata={"name": "vector2"}, data=None),
QueryResult(id="id3", score=None, vector=None, metadata={"name": "vector3"}, data=None),
]
handler = MagicMock()
upstash_instance.client.info.return_value.dimension = 10
upstash_instance.client.resumable_query.return_value = (mock_result[0:1], handler)
handler.fetch_next.side_effect = [mock_result[1:2], mock_result[2:3], []]
filters = {"age": 30, "name": "John"}
print("filters", filters)
[results] = upstash_instance.list(filters=filters, limit=15)
upstash_instance.client.info.return_value = {
"dimension": 10,
}
upstash_instance.client.resumable_query.assert_called_once_with(
vector=[1.0] * 10,
filter='age = 30 AND name = "John"',
include_metadata=True,
namespace="ns",
top_k=100,
)
handler.fetch_next.assert_has_calls([call(100), call(100), call(100)])
handler.__exit__.assert_called_once()
assert len(results) == len(mock_result)
assert results[0].id == "id1"
assert results[0].payload == {"name": "vector1"}
def test_insert_vectors_with_embeddings(upstash_instance_with_embeddings, mock_index):
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [
{"name": "vector1", "data": "data1"},
{"name": "vector2", "data": "data2"},
]
ids = ["id1", "id2"]
upstash_instance_with_embeddings.insert(vectors=vectors, payloads=payloads, ids=ids)
upstash_instance_with_embeddings.client.upsert.assert_called_once_with(
vectors=[
{
"id": "id1",
# Uses the data field instead of using vectors
"data": "data1",
"metadata": {"name": "vector1", "data": "data1"},
},
{
"id": "id2",
"data": "data2",
"metadata": {"name": "vector2", "data": "data2"},
},
],
namespace="ns",
)
def test_search_vectors_with_embeddings(upstash_instance_with_embeddings, mock_index):
mock_result = [
QueryResult(id="id1", score=0.1, vector=None, metadata={"name": "vector1"}, data="data1"),
QueryResult(id="id2", score=0.2, vector=None, metadata={"name": "vector2"}, data="data2"),
]
upstash_instance_with_embeddings.client.query.return_value = mock_result
results = upstash_instance_with_embeddings.search(
query="hello world",
vectors=[],
limit=2,
filters={"age": 30, "name": "John"},
)
upstash_instance_with_embeddings.client.query.assert_called_once_with(
# Uses the data field instead of using vectors
data="hello world",
top_k=2,
filter='age = 30 AND name = "John"',
include_metadata=True,
namespace="ns",
)
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].score == 0.1
assert results[0].payload == {"name": "vector1"}
def test_update_vector_with_embeddings(upstash_instance_with_embeddings):
vector_id = "id1"
new_payload = {"name": "updated_vector", "data": "updated_data"}
upstash_instance_with_embeddings.update(vector_id=vector_id, payload=new_payload)
upstash_instance_with_embeddings.client.update.assert_called_once_with(
id="id1",
vector=None,
data="updated_data",
metadata={"name": "updated_vector", "data": "updated_data"},
namespace="ns",
)
def test_insert_vectors_with_embeddings_missing_data(upstash_instance_with_embeddings):
vectors = [[0.1, 0.2, 0.3]]
payloads = [{"name": "vector1"}] # Missing data field
ids = ["id1"]
with pytest.raises(
ValueError,
match="When embeddings are enabled, all payloads must contain a 'data' field",
):
upstash_instance_with_embeddings.insert(vectors=vectors, payloads=payloads, ids=ids)
def test_update_vector_with_embeddings_missing_data(upstash_instance_with_embeddings):
# Should still work, data is not required for update
vector_id = "id1"
new_payload = {"name": "updated_vector"} # Missing data field
upstash_instance_with_embeddings.update(vector_id=vector_id, payload=new_payload)
upstash_instance_with_embeddings.client.update.assert_called_once_with(
id="id1",
vector=None,
data=None,
metadata={"name": "updated_vector"},
namespace="ns",
)
def test_list_cols(upstash_instance):
mock_namespaces = ["ns1", "ns2", "ns3"]
upstash_instance.client.list_namespaces.return_value = mock_namespaces
result = upstash_instance.list_cols()
upstash_instance.client.list_namespaces.assert_called_once()
assert result == mock_namespaces
def test_delete_col(upstash_instance):
upstash_instance.delete_col()
upstash_instance.client.reset.assert_called_once_with(namespace="ns")
def test_col_info(upstash_instance):
mock_info = {
"dimension": 10,
"total_vectors": 100,
"pending_vectors": 0,
"disk_size": 1024,
}
upstash_instance.client.info.return_value = mock_info
result = upstash_instance.col_info()
upstash_instance.client.info.assert_called_once()
assert result == mock_info
def test_get_vector_not_found(upstash_instance):
upstash_instance.client.fetch.return_value = []
result = upstash_instance.get(vector_id="nonexistent")
upstash_instance.client.fetch.assert_called_once_with(ids=["nonexistent"], namespace="ns", include_metadata=True)
assert result is None
def test_search_vectors_empty_filters(upstash_instance):
mock_result = [QueryResult(id="id1", score=0.1, vector=None, metadata={"name": "vector1"}, data=None)]
upstash_instance.client.query_many.return_value = [mock_result]
vectors = [[0.1, 0.2, 0.3]]
results = upstash_instance.search(
query="hello world",
vectors=vectors,
limit=1,
filters=None,
)
upstash_instance.client.query_many.assert_called_once_with(
queries=[
{
"vector": vectors[0],
"top_k": 1,
"namespace": "ns",
"include_metadata": True,
"filter": "",
}
]
)
assert len(results) == 1
assert results[0].id == "id1"
def test_insert_vectors_no_payloads(upstash_instance):
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
ids = ["id1", "id2"]
upstash_instance.insert(vectors=vectors, ids=ids)
upstash_instance.client.upsert.assert_called_once_with(
vectors=[
{"id": "id1", "vector": [0.1, 0.2, 0.3], "metadata": None},
{"id": "id2", "vector": [0.4, 0.5, 0.6], "metadata": None},
],
namespace="ns",
)
def test_insert_vectors_no_ids(upstash_instance):
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"name": "vector1"}, {"name": "vector2"}]
upstash_instance.insert(vectors=vectors, payloads=payloads)
upstash_instance.client.upsert.assert_called_once_with(
vectors=[
{"id": None, "vector": [0.1, 0.2, 0.3], "metadata": {"name": "vector1"}},
{"id": None, "vector": [0.4, 0.5, 0.6], "metadata": {"name": "vector2"}},
],
namespace="ns",
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_elasticsearch.py | tests/vector_stores/test_elasticsearch.py | import os
import unittest
from unittest.mock import MagicMock, Mock, patch
import dotenv
try:
from elasticsearch import Elasticsearch
except ImportError:
raise ImportError("Elasticsearch requires extra dependencies. Install with `pip install elasticsearch`") from None
from mem0.vector_stores.elasticsearch import ElasticsearchDB, OutputData
from mem0.configs.vector_stores.elasticsearch import ElasticsearchConfig
class TestElasticsearchDB(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Load environment variables before any test
dotenv.load_dotenv()
# Save original environment variables
cls.original_env = {
"ES_URL": os.getenv("ES_URL", "http://localhost:9200"),
"ES_USERNAME": os.getenv("ES_USERNAME", "test_user"),
"ES_PASSWORD": os.getenv("ES_PASSWORD", "test_password"),
"ES_CLOUD_ID": os.getenv("ES_CLOUD_ID", "test_cloud_id"),
}
# Set test environment variables
os.environ["ES_URL"] = "http://localhost"
os.environ["ES_USERNAME"] = "test_user"
os.environ["ES_PASSWORD"] = "test_password"
def setUp(self):
# Create a mock Elasticsearch client with proper attributes
self.client_mock = MagicMock(spec=Elasticsearch)
self.client_mock.indices = MagicMock()
self.client_mock.indices.exists = MagicMock(return_value=False)
self.client_mock.indices.create = MagicMock()
self.client_mock.indices.delete = MagicMock()
self.client_mock.indices.get_alias = MagicMock()
# Start patches BEFORE creating ElasticsearchDB instance
patcher = patch("mem0.vector_stores.elasticsearch.Elasticsearch", return_value=self.client_mock)
self.mock_es = patcher.start()
self.addCleanup(patcher.stop)
# Initialize ElasticsearchDB with test config and auto_create_index=False
self.es_db = ElasticsearchDB(
host=os.getenv("ES_URL"),
port=9200,
collection_name="test_collection",
embedding_model_dims=1536,
user=os.getenv("ES_USERNAME"),
password=os.getenv("ES_PASSWORD"),
verify_certs=False,
use_ssl=False,
auto_create_index=False, # Disable auto creation for tests
)
# Reset mock counts after initialization
self.client_mock.reset_mock()
@classmethod
def tearDownClass(cls):
# Restore original environment variables
for key, value in cls.original_env.items():
if value is not None:
os.environ[key] = value
else:
os.environ.pop(key, None)
def tearDown(self):
self.client_mock.reset_mock()
# No need to stop patches here as we're using addCleanup
def test_create_index(self):
# Test when index doesn't exist
self.client_mock.indices.exists.return_value = False
self.es_db.create_index()
# Verify index creation was called with correct settings
self.client_mock.indices.create.assert_called_once()
create_args = self.client_mock.indices.create.call_args[1]
# Verify basic index settings
self.assertEqual(create_args["index"], "test_collection")
self.assertIn("mappings", create_args["body"])
# Verify field mappings
mappings = create_args["body"]["mappings"]["properties"]
self.assertEqual(mappings["text"]["type"], "text")
self.assertEqual(mappings["vector"]["type"], "dense_vector")
self.assertEqual(mappings["vector"]["dims"], 1536)
self.assertEqual(mappings["vector"]["index"], True)
self.assertEqual(mappings["vector"]["similarity"], "cosine")
self.assertEqual(mappings["metadata"]["type"], "object")
# Reset mocks for next test
self.client_mock.reset_mock()
# Test when index already exists
self.client_mock.indices.exists.return_value = True
self.es_db.create_index()
# Verify create was not called when index exists
self.client_mock.indices.create.assert_not_called()
def test_auto_create_index(self):
# Reset mock
self.client_mock.reset_mock()
# Test with auto_create_index=True
ElasticsearchDB(
host=os.getenv("ES_URL"),
port=9200,
collection_name="test_collection",
embedding_model_dims=1536,
user=os.getenv("ES_USERNAME"),
password=os.getenv("ES_PASSWORD"),
verify_certs=False,
use_ssl=False,
auto_create_index=True,
)
# Verify create_index was called during initialization
self.client_mock.indices.exists.assert_called_once()
# Reset mock
self.client_mock.reset_mock()
# Test with auto_create_index=False
ElasticsearchDB(
host=os.getenv("ES_URL"),
port=9200,
collection_name="test_collection",
embedding_model_dims=1536,
user=os.getenv("ES_USERNAME"),
password=os.getenv("ES_PASSWORD"),
verify_certs=False,
use_ssl=False,
auto_create_index=False,
)
# Verify create_index was not called during initialization
self.client_mock.indices.exists.assert_not_called()
def test_insert(self):
# Test data
vectors = [[0.1] * 1536, [0.2] * 1536]
payloads = [{"key1": "value1"}, {"key2": "value2"}]
ids = ["id1", "id2"]
# Mock bulk operation
with patch("mem0.vector_stores.elasticsearch.bulk") as mock_bulk:
mock_bulk.return_value = (2, []) # Simulate successful bulk insert
# Perform insert
results = self.es_db.insert(vectors=vectors, payloads=payloads, ids=ids)
# Verify bulk was called
mock_bulk.assert_called_once()
# Verify bulk actions format
actions = mock_bulk.call_args[0][1]
self.assertEqual(len(actions), 2)
self.assertEqual(actions[0]["_index"], "test_collection")
self.assertEqual(actions[0]["_id"], "id1")
self.assertEqual(actions[0]["_source"]["vector"], vectors[0])
self.assertEqual(actions[0]["_source"]["metadata"], payloads[0])
# Verify returned objects
self.assertEqual(len(results), 2)
self.assertIsInstance(results[0], OutputData)
self.assertEqual(results[0].id, "id1")
self.assertEqual(results[0].payload, payloads[0])
def test_search(self):
# Mock search response
mock_response = {
"hits": {
"hits": [
{"_id": "id1", "_score": 0.8, "_source": {"vector": [0.1] * 1536, "metadata": {"key1": "value1"}}}
]
}
}
self.client_mock.search.return_value = mock_response
# Perform search
vectors = [[0.1] * 1536]
results = self.es_db.search(query="", vectors=vectors, limit=5)
# Verify search call
self.client_mock.search.assert_called_once()
search_args = self.client_mock.search.call_args[1]
# Verify search parameters
self.assertEqual(search_args["index"], "test_collection")
body = search_args["body"]
# Verify KNN query structure
self.assertIn("knn", body)
self.assertEqual(body["knn"]["field"], "vector")
self.assertEqual(body["knn"]["query_vector"], vectors)
self.assertEqual(body["knn"]["k"], 5)
self.assertEqual(body["knn"]["num_candidates"], 10)
# Verify results
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, "id1")
self.assertEqual(results[0].score, 0.8)
self.assertEqual(results[0].payload, {"key1": "value1"})
def test_custom_search_query(self):
# Mock custom search query
self.es_db.custom_search_query = Mock()
self.es_db.custom_search_query.return_value = {"custom_key": "custom_value"}
# Perform search
vectors = [[0.1] * 1536]
limit = 5
filters = {"key1": "value1"}
self.es_db.search(query="", vectors=vectors, limit=limit, filters=filters)
# Verify custom search query function was called
self.es_db.custom_search_query.assert_called_once_with(vectors, limit, filters)
# Verify custom search query was used
self.client_mock.search.assert_called_once_with(
index=self.es_db.collection_name, body={"custom_key": "custom_value"}
)
def test_get(self):
# Mock get response with correct structure
mock_response = {
"_id": "id1",
"_source": {"vector": [0.1] * 1536, "metadata": {"key": "value"}, "text": "sample text"},
}
self.client_mock.get.return_value = mock_response
# Perform get
result = self.es_db.get(vector_id="id1")
# Verify get call
self.client_mock.get.assert_called_once_with(index="test_collection", id="id1")
# Verify result
self.assertIsNotNone(result)
self.assertEqual(result.id, "id1")
self.assertEqual(result.score, 1.0)
self.assertEqual(result.payload, {"key": "value"})
def test_get_not_found(self):
# Mock get raising exception
self.client_mock.get.side_effect = Exception("Not found")
# Verify get returns None when document not found
result = self.es_db.get(vector_id="nonexistent")
self.assertIsNone(result)
def test_list(self):
# Mock search response with scores
mock_response = {
"hits": {
"hits": [
{"_id": "id1", "_source": {"vector": [0.1] * 1536, "metadata": {"key1": "value1"}}, "_score": 1.0},
{"_id": "id2", "_source": {"vector": [0.2] * 1536, "metadata": {"key2": "value2"}}, "_score": 0.8},
]
}
}
self.client_mock.search.return_value = mock_response
# Perform list operation
results = self.es_db.list(limit=10)
# Verify search call
self.client_mock.search.assert_called_once()
# Verify results
self.assertEqual(len(results), 1) # Outer list
self.assertEqual(len(results[0]), 2) # Inner list
self.assertIsInstance(results[0][0], OutputData)
self.assertEqual(results[0][0].id, "id1")
self.assertEqual(results[0][0].payload, {"key1": "value1"})
self.assertEqual(results[0][1].id, "id2")
self.assertEqual(results[0][1].payload, {"key2": "value2"})
def test_delete(self):
# Perform delete
self.es_db.delete(vector_id="id1")
# Verify delete call
self.client_mock.delete.assert_called_once_with(index="test_collection", id="id1")
def test_list_cols(self):
# Mock indices response
mock_indices = {"index1": {}, "index2": {}}
self.client_mock.indices.get_alias.return_value = mock_indices
# Get collections
result = self.es_db.list_cols()
# Verify result
self.assertEqual(result, ["index1", "index2"])
def test_delete_col(self):
# Delete collection
self.es_db.delete_col()
# Verify delete call
self.client_mock.indices.delete.assert_called_once_with(index="test_collection")
def test_es_config(self):
config = {"host": "localhost", "port": 9200, "user": "elastic", "password": "password"}
es_config = ElasticsearchConfig(**config)
# Assert that the config object was created successfully
self.assertIsNotNone(es_config)
self.assertIsInstance(es_config, ElasticsearchConfig)
# Assert that the configuration values are correctly set
self.assertEqual(es_config.host, "localhost")
self.assertEqual(es_config.port, 9200)
self.assertEqual(es_config.user, "elastic")
self.assertEqual(es_config.password, "password")
def test_es_valid_headers(self):
config = {
"host": "localhost",
"port": 9200,
"user": "elastic",
"password": "password",
"headers": {"x-extra-info": "my-mem0-instance"},
}
es_config = ElasticsearchConfig(**config)
self.assertIsNotNone(es_config.headers)
self.assertEqual(len(es_config.headers), 1)
self.assertEqual(es_config.headers["x-extra-info"], "my-mem0-instance")
def test_es_invalid_headers(self):
base_config = {
"host": "localhost",
"port": 9200,
"user": "elastic",
"password": "password",
}
invalid_headers = [
"not-a-dict", # Non-dict headers
{"x-extra-info": 123}, # Non-string values
{123: "456"}, # Non-string keys
]
for headers in invalid_headers:
with self.assertRaises(ValueError):
config = {**base_config, "headers": headers}
ElasticsearchConfig(**config)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_pgvector.py | tests/vector_stores/test_pgvector.py | import importlib
import sys
import unittest
import uuid
from unittest.mock import MagicMock, patch
from mem0.vector_stores.pgvector import PGVector
class TestPGVector(unittest.TestCase):
def setUp(self):
"""Set up test fixtures."""
self.mock_conn = MagicMock()
self.mock_cursor = MagicMock()
self.mock_conn.cursor.return_value = self.mock_cursor
# Mock connection pool
self.mock_pool_psycopg2 = MagicMock()
self.mock_pool_psycopg2.getconn.return_value = self.mock_conn
self.mock_pool_psycopg = MagicMock()
self.mock_pool_psycopg.connection.return_value = self.mock_conn
self.mock_get_cursor = MagicMock()
self.mock_get_cursor.return_value = self.mock_cursor
# Mock connection string
self.connection_string = "postgresql://user:pass@host:5432/db"
# Test data
self.test_vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
self.test_payloads = [{"key": "value1"}, {"key": "value2"}]
self.test_ids = [str(uuid.uuid4()), str(uuid.uuid4())]
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
def test_init_with_individual_params_psycopg3(self, mock_psycopg_pool):
"""Test initialization with individual parameters using psycopg3."""
# Mock psycopg3 to be available
mock_psycopg_pool.return_value = self.mock_pool_psycopg
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4,
)
mock_psycopg_pool.assert_called_once_with(
conninfo="postgresql://test_user:test_pass@localhost:5432/test_db",
min_size=1,
max_size=4,
open=True,
)
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
def test_init_with_individual_params_psycopg2(self, mock_pcycopg2_pool):
"""Test initialization with individual parameters using psycopg2."""
mock_pcycopg2_pool.return_value = self.mock_pool_psycopg2
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4,
)
mock_pcycopg2_pool.assert_called_once_with(
minconn=1,
maxconn=4,
dsn="postgresql://test_user:test_pass@localhost:5432/test_db",
)
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_create_col_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test collection creation with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify vector extension and table creation
self.mock_cursor.execute.assert_any_call("CREATE EXTENSION IF NOT EXISTS vector")
table_creation_calls = [call for call in self.mock_cursor.execute.call_args_list
if "CREATE TABLE IF NOT EXISTS test_collection" in str(call)]
self.assertTrue(len(table_creation_calls) > 0)
# Verify pgvector instance properties
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_create_col_psycopg3_with_explicit_pool(self, mock_get_cursor, mock_connection_pool):
"""
Test collection creation with psycopg3 when an explicit psycopg_pool.ConnectionPool is provided.
This ensures that PGVector uses the provided pool and still performs collection creation logic.
"""
# Set up a real (mocked) psycopg_pool.ConnectionPool instance
explicit_pool = MagicMock(name="ExplicitPsycopgPool")
# The patch for ConnectionPool should not be used in this case, but we patch it for isolation
mock_connection_pool.return_value = MagicMock(name="ShouldNotBeUsed")
# Configure the _get_cursor mock to return our mock cursor as a context manager
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
# Simulate no existing collections in the database
self.mock_cursor.fetchall.return_value = []
# Pass the explicit pool to PGVector
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4,
connection_pool=explicit_pool
)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
mock_connection_pool.assert_not_called()
# Verify vector extension and table creation
self.mock_cursor.execute.assert_any_call("CREATE EXTENSION IF NOT EXISTS vector")
table_creation_calls = [call for call in self.mock_cursor.execute.call_args_list
if "CREATE TABLE IF NOT EXISTS test_collection" in str(call)]
self.assertTrue(len(table_creation_calls) > 0)
# Verify pgvector instance properties
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
# Ensure the pool used is the explicit one
self.assertIs(pgvector.connection_pool, explicit_pool)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_create_col_psycopg2_with_explicit_pool(self, mock_get_cursor, mock_connection_pool):
"""
Test collection creation with psycopg2 when an explicit psycopg2 ThreadedConnectionPool is provided.
This ensures that PGVector uses the provided pool and still performs collection creation logic.
"""
# Set up a real (mocked) psycopg2 ThreadedConnectionPool instance
explicit_pool = MagicMock(name="ExplicitPsycopg2Pool")
# The patch for ConnectionPool should not be used in this case, but we patch it for isolation
mock_connection_pool.return_value = MagicMock(name="ShouldNotBeUsed")
# Configure the _get_cursor mock to return our mock cursor as a context manager
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
# Simulate no existing collections in the database
self.mock_cursor.fetchall.return_value = []
# Pass the explicit pool to PGVector
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4,
connection_pool=explicit_pool
)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
mock_connection_pool.assert_not_called()
# Verify vector extension and table creation
self.mock_cursor.execute.assert_any_call("CREATE EXTENSION IF NOT EXISTS vector")
table_creation_calls = [call for call in self.mock_cursor.execute.call_args_list
if "CREATE TABLE IF NOT EXISTS test_collection" in str(call)]
self.assertTrue(len(table_creation_calls) > 0)
# Verify pgvector instance properties
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
# Ensure the pool used is the explicit one
self.assertIs(pgvector.connection_pool, explicit_pool)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_create_col_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test collection creation with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify vector extension and table creation
self.mock_cursor.execute.assert_any_call("CREATE EXTENSION IF NOT EXISTS vector")
table_creation_calls = [call for call in self.mock_cursor.execute.call_args_list
if "CREATE TABLE IF NOT EXISTS test_collection" in str(call)]
self.assertTrue(len(table_creation_calls) > 0)
# Verify pgvector instance properties
self.assertEqual(pgvector.collection_name, "test_collection")
self.assertEqual(pgvector.embedding_model_dims, 3)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_insert_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test vector insertion with psycopg3."""
# Set up mock pool and cursor
mock_connection_pool.return_value = self.mock_pool_psycopg
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.insert(self.test_vectors, self.test_payloads, self.test_ids)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify insert query was executed (psycopg3 uses executemany)
insert_calls = [call for call in self.mock_cursor.executemany.call_args_list
if "INSERT INTO test_collection" in str(call)]
self.assertTrue(len(insert_calls) > 0)
# Verify data format
call_args = self.mock_cursor.executemany.call_args
data_arg = call_args[0][1]
self.assertEqual(len(data_arg), 2)
self.assertEqual(data_arg[0][0], self.test_ids[0])
self.assertEqual(data_arg[1][0], self.test_ids[1])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_insert_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""
Test vector insertion with psycopg2.
This test ensures that PGVector.insert uses psycopg2.extras.execute_values for batch inserts
and that the data passed to execute_values is correctly formatted.
"""
# --- Setup mocks for psycopg2 and its submodules ---
mock_execute_values = MagicMock()
mock_pool = MagicMock()
# Mock psycopg2.extras with execute_values
mock_psycopg2_extras = MagicMock()
mock_psycopg2_extras.execute_values = mock_execute_values
mock_psycopg2_pool = MagicMock()
mock_psycopg2_pool.ThreadedConnectionPool = mock_pool
# Mock psycopg2 root module
mock_psycopg2 = MagicMock()
mock_psycopg2.extras = mock_psycopg2_extras
mock_psycopg2.pool = mock_psycopg2_pool
# Patch sys.modules so that imports in PGVector use our mocks
with patch.dict('sys.modules', {
'psycopg': None, # Ensure psycopg3 is not available
'psycopg_pool': None,
'psycopg.types.json': None,
'psycopg2': mock_psycopg2,
'psycopg2.extras': mock_psycopg2_extras,
'psycopg2.pool': mock_psycopg2_pool
}):
# Force reload of PGVector to pick up the mocked modules
if 'mem0.vector_stores.pgvector' in sys.modules:
importlib.reload(sys.modules['mem0.vector_stores.pgvector'])
mock_connection_pool.return_value = self.mock_pool_psycopg
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = []
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.insert(self.test_vectors, self.test_payloads, self.test_ids)
mock_get_cursor.assert_called()
mock_execute_values.assert_called_once()
call_args = mock_execute_values.call_args
self.assertIn("INSERT INTO test_collection", call_args[0][1])
# The data argument should be a list of tuples, one per vector
data_arg = call_args[0][2]
self.assertEqual(len(data_arg), 2)
self.assertEqual(data_arg[0][0], self.test_ids[0])
self.assertEqual(data_arg[1][0], self.test_ids[1])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_search_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test search with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], 0.1, {"key": "value1"}),
(self.test_ids[1], 0.2, {"key": "value2"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
results = pgvector.search("test query", [0.1, 0.2, 0.3], limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify search query was executed
search_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector <=" in str(call)]
self.assertTrue(len(search_calls) > 0)
# Verify results
self.assertEqual(len(results), 2)
self.assertEqual(results[0].id, self.test_ids[0])
self.assertEqual(results[0].score, 0.1)
self.assertEqual(results[1].id, self.test_ids[1])
self.assertEqual(results[1].score, 0.2)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_search_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test search with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [
(self.test_ids[0], 0.1, {"key": "value1"}),
(self.test_ids[1], 0.2, {"key": "value2"}),
]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
results = pgvector.search("test query", [0.1, 0.2, 0.3], limit=2)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify search query was executed
search_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector <=" in str(call)]
self.assertTrue(len(search_calls) > 0)
# Verify results
self.assertEqual(len(results), 2)
self.assertEqual(results[0].id, self.test_ids[0])
self.assertEqual(results[0].score, 0.1)
self.assertEqual(results[1].id, self.test_ids[1])
self.assertEqual(results[1].score, 0.2)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_delete_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test delete with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.delete(self.test_ids[0])
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify delete query was executed
delete_calls = [call for call in self.mock_cursor.execute.call_args_list
if "DELETE FROM test_collection" in str(call)]
self.assertTrue(len(delete_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_delete_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test delete with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
pgvector.delete(self.test_ids[0])
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify delete query was executed
delete_calls = [call for call in self.mock_cursor.execute.call_args_list
if "DELETE FROM test_collection" in str(call)]
self.assertTrue(len(delete_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_update_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test update with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
updated_vector = [0.5, 0.6, 0.7]
updated_payload = {"updated": "value"}
pgvector.update(self.test_ids[0], vector=updated_vector, payload=updated_payload)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify update queries were executed
update_calls = [call for call in self.mock_cursor.execute.call_args_list
if "UPDATE test_collection" in str(call)]
self.assertTrue(len(update_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_update_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test update with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
updated_vector = [0.5, 0.6, 0.7]
updated_payload = {"updated": "value"}
pgvector.update(self.test_ids[0], vector=updated_vector, payload=updated_payload)
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify update queries were executed
update_calls = [call for call in self.mock_cursor.execute.call_args_list
if "UPDATE test_collection" in str(call)]
self.assertTrue(len(update_calls) > 0)
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_get_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test get with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
self.mock_cursor.fetchone.return_value = (self.test_ids[0], [0.1, 0.2, 0.3], {"key": "value1"})
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
result = pgvector.get(self.test_ids[0])
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify get query was executed
get_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call)]
self.assertTrue(len(get_calls) > 0)
# Verify result
self.assertIsNotNone(result)
self.assertEqual(result.id, self.test_ids[0])
self.assertEqual(result.payload, {"key": "value1"})
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_get_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test get with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [] # No existing collections
self.mock_cursor.fetchone.return_value = (self.test_ids[0], [0.1, 0.2, 0.3], {"key": "value1"})
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
result = pgvector.get(self.test_ids[0])
# Verify the _get_cursor context manager was called
mock_get_cursor.assert_called()
# Verify get query was executed
get_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT id, vector, payload" in str(call)]
self.assertTrue(len(get_calls) > 0)
# Verify result
self.assertIsNotNone(result)
self.assertEqual(result.id, self.test_ids[0])
self.assertEqual(result.payload, {"key": "value1"})
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 3)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_cols_psycopg3(self, mock_get_cursor, mock_connection_pool):
"""Test list_cols with psycopg3."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [("test_collection",), ("other_table",)]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
diskann=False,
hnsw=False,
minconn=1,
maxconn=4
)
collections = pgvector.list_cols()
# Verify list_cols query was executed
list_calls = [call for call in self.mock_cursor.execute.call_args_list
if "SELECT table_name FROM information_schema.tables" in str(call)]
self.assertTrue(len(list_calls) > 0)
# Verify result
self.assertEqual(collections, ["test_collection", "other_table"])
@patch('mem0.vector_stores.pgvector.PSYCOPG_VERSION', 2)
@patch('mem0.vector_stores.pgvector.ConnectionPool')
@patch.object(PGVector, '_get_cursor')
def test_list_cols_psycopg2(self, mock_get_cursor, mock_connection_pool):
"""Test list_cols with psycopg2."""
# Set up mock pool and cursor
mock_pool = MagicMock()
mock_connection_pool.return_value = mock_pool
# Configure the _get_cursor mock to return our mock cursor
mock_get_cursor.return_value.__enter__.return_value = self.mock_cursor
mock_get_cursor.return_value.__exit__.return_value = None
self.mock_cursor.fetchall.return_value = [("test_collection",), ("other_table",)]
pgvector = PGVector(
dbname="test_db",
collection_name="test_collection",
embedding_model_dims=3,
user="test_user",
password="test_pass",
host="localhost",
port=5432,
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | true |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/vector_stores/test_baidu.py | tests/vector_stores/test_baidu.py | from unittest.mock import Mock, PropertyMock, patch
import pytest
from pymochow.exception import ServerError
from pymochow.model.enum import ServerErrCode, TableState
from pymochow.model.table import (
FloatVector,
Table,
VectorSearchConfig,
VectorTopkSearchRequest,
)
from mem0.vector_stores.baidu import BaiduDB
@pytest.fixture
def mock_mochow_client():
with patch("pymochow.MochowClient") as mock_client:
yield mock_client
@pytest.fixture
def mock_configuration():
with patch("pymochow.configuration.Configuration") as mock_config:
yield mock_config
@pytest.fixture
def mock_bce_credentials():
with patch("pymochow.auth.bce_credentials.BceCredentials") as mock_creds:
yield mock_creds
@pytest.fixture
def mock_table():
mock_table = Mock(spec=Table)
# 设置 Table 类的属性
type(mock_table).database_name = PropertyMock(return_value="test_db")
type(mock_table).table_name = PropertyMock(return_value="test_table")
type(mock_table).schema = PropertyMock(return_value=Mock())
type(mock_table).replication = PropertyMock(return_value=1)
type(mock_table).partition = PropertyMock(return_value=Mock())
type(mock_table).enable_dynamic_field = PropertyMock(return_value=False)
type(mock_table).description = PropertyMock(return_value="")
type(mock_table).create_time = PropertyMock(return_value="")
type(mock_table).state = PropertyMock(return_value=TableState.NORMAL)
type(mock_table).aliases = PropertyMock(return_value=[])
return mock_table
@pytest.fixture
def mochow_instance(mock_mochow_client, mock_configuration, mock_bce_credentials, mock_table):
mock_database = Mock()
mock_client_instance = Mock()
# Mock the client creation
mock_mochow_client.return_value = mock_client_instance
# Mock database operations
mock_client_instance.list_databases.return_value = []
mock_client_instance.create_database.return_value = mock_database
mock_client_instance.database.return_value = mock_database
# Mock table operations
mock_database.list_table.return_value = []
mock_database.create_table.return_value = mock_table
mock_database.describe_table.return_value = Mock(state=TableState.NORMAL)
mock_database.table.return_value = mock_table
return BaiduDB(
endpoint="http://localhost:8287",
account="test_account",
api_key="test_api_key",
database_name="test_db",
table_name="test_table",
embedding_model_dims=128,
metric_type="COSINE",
)
def test_insert(mochow_instance, mock_mochow_client):
vectors = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
payloads = [{"name": "vector1"}, {"name": "vector2"}]
ids = ["id1", "id2"]
mochow_instance.insert(vectors=vectors, payloads=payloads, ids=ids)
# Verify table.upsert was called with correct data
assert mochow_instance._table.upsert.call_count == 2
calls = mochow_instance._table.upsert.call_args_list
# Check first call
first_row = calls[0][1]["rows"][0]
assert first_row._data["id"] == "id1"
assert first_row._data["vector"] == [0.1, 0.2, 0.3]
assert first_row._data["metadata"] == {"name": "vector1"}
# Check second call
second_row = calls[1][1]["rows"][0]
assert second_row._data["id"] == "id2"
assert second_row._data["vector"] == [0.4, 0.5, 0.6]
assert second_row._data["metadata"] == {"name": "vector2"}
def test_search(mochow_instance, mock_mochow_client):
# Mock search results
mock_search_results = Mock()
mock_search_results.rows = [
{"row": {"id": "id1", "metadata": {"name": "vector1"}}, "score": 0.1},
{"row": {"id": "id2", "metadata": {"name": "vector2"}}, "score": 0.2},
]
mochow_instance._table.vector_search.return_value = mock_search_results
vectors = [0.1, 0.2, 0.3]
results = mochow_instance.search(query="test", vectors=vectors, limit=2)
# Verify search was called with correct parameters
mochow_instance._table.vector_search.assert_called_once()
call_args = mochow_instance._table.vector_search.call_args
request = call_args[0][0] if call_args[0] else call_args[1]["request"]
assert isinstance(request, VectorTopkSearchRequest)
assert request._vector_field == "vector"
assert isinstance(request._vector, FloatVector)
assert request._vector._floats == vectors
assert request._limit == 2
assert isinstance(request._config, VectorSearchConfig)
assert request._config._ef == 200
# Verify results
assert len(results) == 2
assert results[0].id == "id1"
assert results[0].score == 0.1
assert results[0].payload == {"name": "vector1"}
assert results[1].id == "id2"
assert results[1].score == 0.2
assert results[1].payload == {"name": "vector2"}
def test_search_with_filters(mochow_instance, mock_mochow_client):
mochow_instance._table.vector_search.return_value = Mock(rows=[])
vectors = [0.1, 0.2, 0.3]
filters = {"user_id": "user123", "agent_id": "agent456"}
mochow_instance.search(query="test", vectors=vectors, limit=2, filters=filters)
# Verify search was called with filter
call_args = mochow_instance._table.vector_search.call_args
request = call_args[0][0] if call_args[0] else call_args[1]["request"]
assert request._filter == 'metadata["user_id"] = "user123" AND metadata["agent_id"] = "agent456"'
def test_delete(mochow_instance, mock_mochow_client):
vector_id = "id1"
mochow_instance.delete(vector_id=vector_id)
mochow_instance._table.delete.assert_called_once_with(primary_key={"id": vector_id})
def test_update(mochow_instance, mock_mochow_client):
vector_id = "id1"
new_vector = [0.7, 0.8, 0.9]
new_payload = {"name": "updated_vector"}
mochow_instance.update(vector_id=vector_id, vector=new_vector, payload=new_payload)
mochow_instance._table.upsert.assert_called_once()
call_args = mochow_instance._table.upsert.call_args
row = call_args[0][0] if call_args[0] else call_args[1]["rows"][0]
assert row._data["id"] == vector_id
assert row._data["vector"] == new_vector
assert row._data["metadata"] == new_payload
def test_get(mochow_instance, mock_mochow_client):
# Mock query result
mock_result = Mock()
mock_result.row = {"id": "id1", "metadata": {"name": "vector1"}}
mochow_instance._table.query.return_value = mock_result
result = mochow_instance.get(vector_id="id1")
mochow_instance._table.query.assert_called_once_with(primary_key={"id": "id1"}, projections=["id", "metadata"])
assert result.id == "id1"
assert result.score is None
assert result.payload == {"name": "vector1"}
def test_list(mochow_instance, mock_mochow_client):
# Mock select result
mock_result = Mock()
mock_result.rows = [{"id": "id1", "metadata": {"name": "vector1"}}, {"id": "id2", "metadata": {"name": "vector2"}}]
mochow_instance._table.select.return_value = mock_result
results = mochow_instance.list(limit=2)
mochow_instance._table.select.assert_called_once_with(filter=None, projections=["id", "metadata"], limit=2)
assert len(results[0]) == 2
assert results[0][0].id == "id1"
assert results[0][1].id == "id2"
def test_list_cols(mochow_instance, mock_mochow_client):
# Mock table list
mock_tables = [
Mock(spec=Table, database_name="test_db", table_name="table1"),
Mock(spec=Table, database_name="test_db", table_name="table2"),
]
mochow_instance._database.list_table.return_value = mock_tables
result = mochow_instance.list_cols()
assert result == ["table1", "table2"]
def test_delete_col_not_exists(mochow_instance, mock_mochow_client):
# 使用正确的 ServerErrCode 枚举值
mochow_instance._database.drop_table.side_effect = ServerError(
"Table not exists", code=ServerErrCode.TABLE_NOT_EXIST
)
# Should not raise exception
mochow_instance.delete_col()
def test_col_info(mochow_instance, mock_mochow_client):
mock_table_info = {"table_name": "test_table", "fields": []}
mochow_instance._table.stats.return_value = mock_table_info
result = mochow_instance.col_info()
assert result == mock_table_info
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/configs/test_prompts.py | tests/configs/test_prompts.py | from mem0.configs import prompts
def test_get_update_memory_messages():
retrieved_old_memory_dict = [{"id": "1", "text": "old memory 1"}]
response_content = ["new fact"]
custom_update_memory_prompt = "custom prompt determining memory update"
## When custom update memory prompt is provided
##
result = prompts.get_update_memory_messages(
retrieved_old_memory_dict, response_content, custom_update_memory_prompt
)
assert result.startswith(custom_update_memory_prompt)
## When custom update memory prompt is not provided
##
result = prompts.get_update_memory_messages(retrieved_old_memory_dict, response_content, None)
assert result.startswith(prompts.DEFAULT_UPDATE_MEMORY_PROMPT)
def test_get_update_memory_messages_empty_memory():
# Test with None for retrieved_old_memory_dict
result = prompts.get_update_memory_messages(
None,
["new fact"],
None
)
assert "Current memory is empty" in result
# Test with empty list for retrieved_old_memory_dict
result = prompts.get_update_memory_messages(
[],
["new fact"],
None
)
assert "Current memory is empty" in result
def test_get_update_memory_messages_non_empty_memory():
# Non-empty memory scenario
memory_data = [{"id": "1", "text": "existing memory"}]
result = prompts.get_update_memory_messages(
memory_data,
["new fact"],
None
)
# Check that the memory data is displayed
assert str(memory_data) in result
# And that the non-empty memory message is present
assert "current content of my memory" in result
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/embeddings/test_lm_studio_embeddings.py | tests/embeddings/test_lm_studio_embeddings.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.lmstudio import LMStudioEmbedding
@pytest.fixture
def mock_lm_studio_client():
with patch("mem0.embeddings.lmstudio.OpenAI") as mock_openai:
mock_client = Mock()
mock_client.embeddings.create.return_value = Mock(data=[Mock(embedding=[0.1, 0.2, 0.3, 0.4, 0.5])])
mock_openai.return_value = mock_client
yield mock_client
def test_embed_text(mock_lm_studio_client):
config = BaseEmbedderConfig(model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf", embedding_dims=512)
embedder = LMStudioEmbedding(config)
text = "Sample text to embed."
embedding = embedder.embed(text)
mock_lm_studio_client.embeddings.create.assert_called_once_with(
input=["Sample text to embed."], model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf"
)
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/embeddings/test_ollama_embeddings.py | tests/embeddings/test_ollama_embeddings.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.ollama import OllamaEmbedding
@pytest.fixture
def mock_ollama_client():
with patch("mem0.embeddings.ollama.Client") as mock_ollama:
mock_client = Mock()
mock_client.list.return_value = {"models": [{"name": "nomic-embed-text"}]}
mock_ollama.return_value = mock_client
yield mock_client
def test_embed_text(mock_ollama_client):
config = BaseEmbedderConfig(model="nomic-embed-text", embedding_dims=512)
embedder = OllamaEmbedding(config)
mock_response = {"embedding": [0.1, 0.2, 0.3, 0.4, 0.5]}
mock_ollama_client.embeddings.return_value = mock_response
text = "Sample text to embed."
embedding = embedder.embed(text)
mock_ollama_client.embeddings.assert_called_once_with(model="nomic-embed-text", prompt=text)
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
def test_ensure_model_exists(mock_ollama_client):
config = BaseEmbedderConfig(model="nomic-embed-text", embedding_dims=512)
embedder = OllamaEmbedding(config)
mock_ollama_client.pull.assert_not_called()
mock_ollama_client.list.return_value = {"models": []}
embedder._ensure_model_exists()
mock_ollama_client.pull.assert_called_once_with("nomic-embed-text")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/embeddings/test_vertexai_embeddings.py | tests/embeddings/test_vertexai_embeddings.py | from unittest.mock import Mock, patch
import pytest
from mem0.embeddings.vertexai import VertexAIEmbedding
@pytest.fixture
def mock_text_embedding_model():
with patch("mem0.embeddings.vertexai.TextEmbeddingModel") as mock_model:
mock_instance = Mock()
mock_model.from_pretrained.return_value = mock_instance
yield mock_instance
@pytest.fixture
def mock_os_environ():
with patch("mem0.embeddings.vertexai.os.environ", {}) as mock_environ:
yield mock_environ
@pytest.fixture
def mock_config():
with patch("mem0.configs.embeddings.base.BaseEmbedderConfig") as mock_config:
mock_config.return_value.vertex_credentials_json = "/path/to/credentials.json"
yield mock_config
@pytest.fixture
def mock_embedding_types():
return [
"SEMANTIC_SIMILARITY",
"CLASSIFICATION",
"CLUSTERING",
"RETRIEVAL_DOCUMENT",
"RETRIEVAL_QUERY",
"QUESTION_ANSWERING",
"FACT_VERIFICATION",
"CODE_RETRIEVAL_QUERY",
]
@pytest.fixture
def mock_text_embedding_input():
with patch("mem0.embeddings.vertexai.TextEmbeddingInput") as mock_input:
yield mock_input
@patch("mem0.embeddings.vertexai.TextEmbeddingModel")
def test_embed_default_model(mock_text_embedding_model, mock_os_environ, mock_config, mock_text_embedding_input):
mock_config.return_value.model = "text-embedding-004"
mock_config.return_value.embedding_dims = 256
config = mock_config()
embedder = VertexAIEmbedding(config)
mock_embedding = Mock(values=[0.1, 0.2, 0.3])
mock_text_embedding_model.from_pretrained.return_value.get_embeddings.return_value = [mock_embedding]
embedder.embed("Hello world")
mock_text_embedding_input.assert_called_once_with(text="Hello world", task_type="SEMANTIC_SIMILARITY")
mock_text_embedding_model.from_pretrained.assert_called_once_with("text-embedding-004")
mock_text_embedding_model.from_pretrained.return_value.get_embeddings.assert_called_once_with(
texts=[mock_text_embedding_input("Hello world")], output_dimensionality=256
)
@patch("mem0.embeddings.vertexai.TextEmbeddingModel")
def test_embed_custom_model(mock_text_embedding_model, mock_os_environ, mock_config, mock_text_embedding_input):
mock_config.return_value.model = "custom-embedding-model"
mock_config.return_value.embedding_dims = 512
config = mock_config()
embedder = VertexAIEmbedding(config)
mock_embedding = Mock(values=[0.4, 0.5, 0.6])
mock_text_embedding_model.from_pretrained.return_value.get_embeddings.return_value = [mock_embedding]
result = embedder.embed("Test embedding")
mock_text_embedding_input.assert_called_once_with(text="Test embedding", task_type="SEMANTIC_SIMILARITY")
mock_text_embedding_model.from_pretrained.assert_called_with("custom-embedding-model")
mock_text_embedding_model.from_pretrained.return_value.get_embeddings.assert_called_once_with(
texts=[mock_text_embedding_input("Test embedding")], output_dimensionality=512
)
assert result == [0.4, 0.5, 0.6]
@patch("mem0.embeddings.vertexai.TextEmbeddingModel")
def test_embed_with_memory_action(
mock_text_embedding_model, mock_os_environ, mock_config, mock_embedding_types, mock_text_embedding_input
):
mock_config.return_value.model = "text-embedding-004"
mock_config.return_value.embedding_dims = 256
for embedding_type in mock_embedding_types:
mock_config.return_value.memory_add_embedding_type = embedding_type
mock_config.return_value.memory_update_embedding_type = embedding_type
mock_config.return_value.memory_search_embedding_type = embedding_type
config = mock_config()
embedder = VertexAIEmbedding(config)
mock_text_embedding_model.from_pretrained.assert_called_with("text-embedding-004")
for memory_action in ["add", "update", "search"]:
embedder.embed("Hello world", memory_action=memory_action)
mock_text_embedding_input.assert_called_with(text="Hello world", task_type=embedding_type)
mock_text_embedding_model.from_pretrained.return_value.get_embeddings.assert_called_with(
texts=[mock_text_embedding_input("Hello world", embedding_type)], output_dimensionality=256
)
@patch("mem0.embeddings.vertexai.os")
def test_credentials_from_environment(mock_os, mock_text_embedding_model, mock_config):
mock_config.vertex_credentials_json = None
config = mock_config()
VertexAIEmbedding(config)
mock_os.environ.setitem.assert_not_called()
@patch("mem0.embeddings.vertexai.os")
def test_missing_credentials(mock_os, mock_text_embedding_model, mock_config):
mock_os.getenv.return_value = None
mock_config.return_value.vertex_credentials_json = None
config = mock_config()
with pytest.raises(ValueError, match="Google application credentials JSON is not provided"):
VertexAIEmbedding(config)
@patch("mem0.embeddings.vertexai.TextEmbeddingModel")
def test_embed_with_different_dimensions(mock_text_embedding_model, mock_os_environ, mock_config):
mock_config.return_value.embedding_dims = 1024
config = mock_config()
embedder = VertexAIEmbedding(config)
mock_embedding = Mock(values=[0.1] * 1024)
mock_text_embedding_model.from_pretrained.return_value.get_embeddings.return_value = [mock_embedding]
result = embedder.embed("Large embedding test")
assert result == [0.1] * 1024
@patch("mem0.embeddings.vertexai.TextEmbeddingModel")
def test_invalid_memory_action(mock_text_embedding_model, mock_config):
mock_config.return_value.model = "text-embedding-004"
mock_config.return_value.embedding_dims = 256
config = mock_config()
embedder = VertexAIEmbedding(config)
with pytest.raises(ValueError):
embedder.embed("Hello world", memory_action="invalid_action")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/embeddings/test_gemini_emeddings.py | tests/embeddings/test_gemini_emeddings.py | from unittest.mock import ANY, patch
import pytest
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.gemini import GoogleGenAIEmbedding
@pytest.fixture
def mock_genai():
with patch("mem0.embeddings.gemini.genai.Client") as mock_client_class:
mock_client = mock_client_class.return_value
mock_client.models.embed_content.return_value = None
yield mock_client.models.embed_content
@pytest.fixture
def config():
return BaseEmbedderConfig(api_key="dummy_api_key", model="test_model", embedding_dims=786)
def test_embed_query(mock_genai, config):
mock_embedding_response = type(
"Response", (), {"embeddings": [type("Embedding", (), {"values": [0.1, 0.2, 0.3, 0.4]})]}
)()
mock_genai.return_value = mock_embedding_response
embedder = GoogleGenAIEmbedding(config)
text = "Hello, world!"
embedding = embedder.embed(text)
assert embedding == [0.1, 0.2, 0.3, 0.4]
mock_genai.assert_called_once_with(model="test_model", contents="Hello, world!", config=ANY)
def test_embed_returns_empty_list_if_none(mock_genai, config):
mock_genai.return_value = type("Response", (), {"embeddings": [type("Embedding", (), {"values": []})]})()
embedder = GoogleGenAIEmbedding(config)
result = embedder.embed("test")
assert result == []
def test_embed_raises_on_error(mock_genai, config):
mock_genai.side_effect = RuntimeError("Embedding failed")
embedder = GoogleGenAIEmbedding(config)
with pytest.raises(RuntimeError, match="Embedding failed"):
embedder.embed("some input")
def test_config_initialization(config):
embedder = GoogleGenAIEmbedding(config)
assert embedder.config.api_key == "dummy_api_key"
assert embedder.config.model == "test_model"
assert embedder.config.embedding_dims == 786
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/embeddings/test_openai_embeddings.py | tests/embeddings/test_openai_embeddings.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.openai import OpenAIEmbedding
@pytest.fixture
def mock_openai_client():
with patch("mem0.embeddings.openai.OpenAI") as mock_openai:
mock_client = Mock()
mock_openai.return_value = mock_client
yield mock_client
def test_embed_default_model(mock_openai_client):
config = BaseEmbedderConfig()
embedder = OpenAIEmbedding(config)
mock_response = Mock()
mock_response.data = [Mock(embedding=[0.1, 0.2, 0.3])]
mock_openai_client.embeddings.create.return_value = mock_response
result = embedder.embed("Hello world")
mock_openai_client.embeddings.create.assert_called_once_with(
input=["Hello world"], model="text-embedding-3-small", dimensions=1536
)
assert result == [0.1, 0.2, 0.3]
def test_embed_custom_model(mock_openai_client):
config = BaseEmbedderConfig(model="text-embedding-2-medium", embedding_dims=1024)
embedder = OpenAIEmbedding(config)
mock_response = Mock()
mock_response.data = [Mock(embedding=[0.4, 0.5, 0.6])]
mock_openai_client.embeddings.create.return_value = mock_response
result = embedder.embed("Test embedding")
mock_openai_client.embeddings.create.assert_called_once_with(
input=["Test embedding"], model="text-embedding-2-medium", dimensions=1024
)
assert result == [0.4, 0.5, 0.6]
def test_embed_removes_newlines(mock_openai_client):
config = BaseEmbedderConfig()
embedder = OpenAIEmbedding(config)
mock_response = Mock()
mock_response.data = [Mock(embedding=[0.7, 0.8, 0.9])]
mock_openai_client.embeddings.create.return_value = mock_response
result = embedder.embed("Hello\nworld")
mock_openai_client.embeddings.create.assert_called_once_with(
input=["Hello world"], model="text-embedding-3-small", dimensions=1536
)
assert result == [0.7, 0.8, 0.9]
def test_embed_without_api_key_env_var(mock_openai_client):
config = BaseEmbedderConfig(api_key="test_key")
embedder = OpenAIEmbedding(config)
mock_response = Mock()
mock_response.data = [Mock(embedding=[1.0, 1.1, 1.2])]
mock_openai_client.embeddings.create.return_value = mock_response
result = embedder.embed("Testing API key")
mock_openai_client.embeddings.create.assert_called_once_with(
input=["Testing API key"], model="text-embedding-3-small", dimensions=1536
)
assert result == [1.0, 1.1, 1.2]
def test_embed_uses_environment_api_key(mock_openai_client, monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "env_key")
config = BaseEmbedderConfig()
embedder = OpenAIEmbedding(config)
mock_response = Mock()
mock_response.data = [Mock(embedding=[1.3, 1.4, 1.5])]
mock_openai_client.embeddings.create.return_value = mock_response
result = embedder.embed("Environment key test")
mock_openai_client.embeddings.create.assert_called_once_with(
input=["Environment key test"], model="text-embedding-3-small", dimensions=1536
)
assert result == [1.3, 1.4, 1.5]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/embeddings/test_fastembed_embeddings.py | tests/embeddings/test_fastembed_embeddings.py | from unittest.mock import Mock, patch
import pytest
import numpy as np
from mem0.configs.embeddings.base import BaseEmbedderConfig
try:
from mem0.embeddings.fastembed import FastEmbedEmbedding
except ImportError:
pytest.skip("fastembed not installed", allow_module_level=True)
@pytest.fixture
def mock_fastembed_client():
with patch("mem0.embeddings.fastembed.TextEmbedding") as mock_fastembed:
mock_client = Mock()
mock_fastembed.return_value = mock_client
yield mock_client
def test_embed_with_jina_model(mock_fastembed_client):
config = BaseEmbedderConfig(model="jinaai/jina-embeddings-v2-base-en", embedding_dims=768)
embedder = FastEmbedEmbedding(config)
mock_embedding = np.array([0.1, 0.2, 0.3, 0.4, 0.5])
mock_fastembed_client.embed.return_value = iter([mock_embedding])
text = "Sample text to embed."
embedding = embedder.embed(text)
mock_fastembed_client.embed.assert_called_once_with(text)
assert list(embedding) == [0.1, 0.2, 0.3, 0.4, 0.5]
def test_embed_removes_newlines(mock_fastembed_client):
config = BaseEmbedderConfig(model="jinaai/jina-embeddings-v2-base-en", embedding_dims=768)
embedder = FastEmbedEmbedding(config)
mock_embedding = np.array([0.7, 0.8, 0.9])
mock_fastembed_client.embed.return_value = iter([mock_embedding])
text_with_newlines = "Hello\nworld"
embedding = embedder.embed(text_with_newlines)
mock_fastembed_client.embed.assert_called_once_with("Hello world")
assert list(embedding) == [0.7, 0.8, 0.9] | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/embeddings/test_huggingface_embeddings.py | tests/embeddings/test_huggingface_embeddings.py | from unittest.mock import Mock, patch
import numpy as np
import pytest
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.huggingface import HuggingFaceEmbedding
@pytest.fixture
def mock_sentence_transformer():
with patch("mem0.embeddings.huggingface.SentenceTransformer") as mock_transformer:
mock_model = Mock()
mock_transformer.return_value = mock_model
yield mock_model
def test_embed_default_model(mock_sentence_transformer):
config = BaseEmbedderConfig()
embedder = HuggingFaceEmbedding(config)
mock_sentence_transformer.encode.return_value = np.array([0.1, 0.2, 0.3])
result = embedder.embed("Hello world")
mock_sentence_transformer.encode.assert_called_once_with("Hello world", convert_to_numpy=True)
assert result == [0.1, 0.2, 0.3]
def test_embed_custom_model(mock_sentence_transformer):
config = BaseEmbedderConfig(model="paraphrase-MiniLM-L6-v2")
embedder = HuggingFaceEmbedding(config)
mock_sentence_transformer.encode.return_value = np.array([0.4, 0.5, 0.6])
result = embedder.embed("Custom model test")
mock_sentence_transformer.encode.assert_called_once_with("Custom model test", convert_to_numpy=True)
assert result == [0.4, 0.5, 0.6]
def test_embed_with_model_kwargs(mock_sentence_transformer):
config = BaseEmbedderConfig(model="all-MiniLM-L6-v2", model_kwargs={"device": "cuda"})
embedder = HuggingFaceEmbedding(config)
mock_sentence_transformer.encode.return_value = np.array([0.7, 0.8, 0.9])
result = embedder.embed("Test with device")
mock_sentence_transformer.encode.assert_called_once_with("Test with device", convert_to_numpy=True)
assert result == [0.7, 0.8, 0.9]
def test_embed_sets_embedding_dims(mock_sentence_transformer):
config = BaseEmbedderConfig()
mock_sentence_transformer.get_sentence_embedding_dimension.return_value = 384
embedder = HuggingFaceEmbedding(config)
assert embedder.config.embedding_dims == 384
mock_sentence_transformer.get_sentence_embedding_dimension.assert_called_once()
def test_embed_with_custom_embedding_dims(mock_sentence_transformer):
config = BaseEmbedderConfig(model="all-mpnet-base-v2", embedding_dims=768)
embedder = HuggingFaceEmbedding(config)
mock_sentence_transformer.encode.return_value = np.array([1.0, 1.1, 1.2])
result = embedder.embed("Custom embedding dims")
mock_sentence_transformer.encode.assert_called_once_with("Custom embedding dims", convert_to_numpy=True)
assert embedder.config.embedding_dims == 768
assert result == [1.0, 1.1, 1.2]
def test_embed_with_huggingface_base_url():
config = BaseEmbedderConfig(
huggingface_base_url="http://localhost:8080",
model="my-custom-model",
model_kwargs={"truncate": True},
)
with patch("mem0.embeddings.huggingface.OpenAI") as mock_openai:
mock_client = Mock()
mock_openai.return_value = mock_client
# Create a mock for the response object and its attributes
mock_embedding_response = Mock()
mock_embedding_response.embedding = [0.1, 0.2, 0.3]
mock_create_response = Mock()
mock_create_response.data = [mock_embedding_response]
mock_client.embeddings.create.return_value = mock_create_response
embedder = HuggingFaceEmbedding(config)
result = embedder.embed("Hello from custom endpoint")
mock_openai.assert_called_once_with(base_url="http://localhost:8080")
mock_client.embeddings.create.assert_called_once_with(
input="Hello from custom endpoint",
model="my-custom-model",
truncate=True,
)
assert result == [0.1, 0.2, 0.3]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/tests/embeddings/test_azure_openai_embeddings.py | tests/embeddings/test_azure_openai_embeddings.py | from unittest.mock import Mock, patch
import pytest
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.azure_openai import AzureOpenAIEmbedding
@pytest.fixture
def mock_openai_client():
with patch("mem0.embeddings.azure_openai.AzureOpenAI") as mock_openai:
mock_client = Mock()
mock_openai.return_value = mock_client
yield mock_client
def test_embed_text(mock_openai_client):
config = BaseEmbedderConfig(model="text-embedding-ada-002")
embedder = AzureOpenAIEmbedding(config)
mock_embedding_response = Mock()
mock_embedding_response.data = [Mock(embedding=[0.1, 0.2, 0.3])]
mock_openai_client.embeddings.create.return_value = mock_embedding_response
text = "Hello, this is a test."
embedding = embedder.embed(text)
mock_openai_client.embeddings.create.assert_called_once_with(
input=["Hello, this is a test."], model="text-embedding-ada-002"
)
assert embedding == [0.1, 0.2, 0.3]
@pytest.mark.parametrize(
"default_headers, expected_header",
[(None, None), ({"Test": "test_value"}, "test_value"), ({}, None)],
)
def test_embed_text_with_default_headers(default_headers, expected_header):
config = BaseEmbedderConfig(
model="text-embedding-ada-002",
azure_kwargs={
"api_key": "test",
"api_version": "test_version",
"azure_endpoint": "test_endpoint",
"azuer_deployment": "test_deployment",
"default_headers": default_headers,
},
)
embedder = AzureOpenAIEmbedding(config)
assert embedder.client.api_key == "test"
assert embedder.client._api_version == "test_version"
assert embedder.client.default_headers.get("Test") == expected_header
@pytest.fixture
def base_embedder_config():
class DummyAzureKwargs:
api_key = None
azure_deployment = None
azure_endpoint = None
api_version = None
default_headers = None
class DummyConfig(BaseEmbedderConfig):
azure_kwargs = DummyAzureKwargs()
http_client = None
model = "test-model"
return DummyConfig()
def test_init_with_api_key(monkeypatch, base_embedder_config):
base_embedder_config.azure_kwargs.api_key = "test-key"
base_embedder_config.azure_kwargs.azure_deployment = "test-deployment"
base_embedder_config.azure_kwargs.azure_endpoint = "https://test.endpoint"
base_embedder_config.azure_kwargs.api_version = "2024-01-01"
base_embedder_config.azure_kwargs.default_headers = {"X-Test": "Header"}
with (
patch("mem0.embeddings.azure_openai.AzureOpenAI") as mock_azure_openai,
patch("mem0.embeddings.azure_openai.DefaultAzureCredential") as mock_cred,
patch("mem0.embeddings.azure_openai.get_bearer_token_provider") as mock_token_provider,
):
AzureOpenAIEmbedding(base_embedder_config)
mock_azure_openai.assert_called_once_with(
azure_deployment="test-deployment",
azure_endpoint="https://test.endpoint",
azure_ad_token_provider=None,
api_version="2024-01-01",
api_key="test-key",
http_client=None,
default_headers={"X-Test": "Header"},
)
mock_cred.assert_not_called()
mock_token_provider.assert_not_called()
def test_init_with_env_vars(monkeypatch, base_embedder_config):
monkeypatch.setenv("EMBEDDING_AZURE_OPENAI_API_KEY", "env-key")
monkeypatch.setenv("EMBEDDING_AZURE_DEPLOYMENT", "env-deployment")
monkeypatch.setenv("EMBEDDING_AZURE_ENDPOINT", "https://env.endpoint")
monkeypatch.setenv("EMBEDDING_AZURE_API_VERSION", "2024-02-02")
with patch("mem0.embeddings.azure_openai.AzureOpenAI") as mock_azure_openai:
AzureOpenAIEmbedding(base_embedder_config)
mock_azure_openai.assert_called_once_with(
azure_deployment="env-deployment",
azure_endpoint="https://env.endpoint",
azure_ad_token_provider=None,
api_version="2024-02-02",
api_key="env-key",
http_client=None,
default_headers=None,
)
def test_init_with_default_azure_credential(monkeypatch, base_embedder_config):
base_embedder_config.azure_kwargs.api_key = ""
with (
patch("mem0.embeddings.azure_openai.DefaultAzureCredential") as mock_cred,
patch("mem0.embeddings.azure_openai.get_bearer_token_provider") as mock_token_provider,
patch("mem0.embeddings.azure_openai.AzureOpenAI") as mock_azure_openai,
):
mock_cred_instance = Mock()
mock_cred.return_value = mock_cred_instance
mock_token_provider_instance = Mock()
mock_token_provider.return_value = mock_token_provider_instance
AzureOpenAIEmbedding(base_embedder_config)
mock_cred.assert_called_once()
mock_token_provider.assert_called_once_with(mock_cred_instance, "https://cognitiveservices.azure.com/.default")
mock_azure_openai.assert_called_once_with(
azure_deployment=None,
azure_endpoint=None,
azure_ad_token_provider=mock_token_provider_instance,
api_version=None,
api_key=None,
http_client=None,
default_headers=None,
)
def test_init_with_placeholder_api_key(monkeypatch, base_embedder_config):
base_embedder_config.azure_kwargs.api_key = "your-api-key"
with (
patch("mem0.embeddings.azure_openai.DefaultAzureCredential") as mock_cred,
patch("mem0.embeddings.azure_openai.get_bearer_token_provider") as mock_token_provider,
patch("mem0.embeddings.azure_openai.AzureOpenAI") as mock_azure_openai,
):
mock_cred_instance = Mock()
mock_cred.return_value = mock_cred_instance
mock_token_provider_instance = Mock()
mock_token_provider.return_value = mock_token_provider_instance
AzureOpenAIEmbedding(base_embedder_config)
mock_cred.assert_called_once()
mock_token_provider.assert_called_once_with(mock_cred_instance, "https://cognitiveservices.azure.com/.default")
mock_azure_openai.assert_called_once_with(
azure_deployment=None,
azure_endpoint=None,
azure_ad_token_provider=mock_token_provider_instance,
api_version=None,
api_key=None,
http_client=None,
default_headers=None,
)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/main.py | openmemory/api/main.py | import datetime
from uuid import uuid4
from app.config import DEFAULT_APP_ID, USER_ID
from app.database import Base, SessionLocal, engine
from app.mcp_server import setup_mcp_server
from app.models import App, User
from app.routers import apps_router, backup_router, config_router, memories_router, stats_router
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi_pagination import add_pagination
app = FastAPI(title="OpenMemory API")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Create all tables
Base.metadata.create_all(bind=engine)
# Check for USER_ID and create default user if needed
def create_default_user():
db = SessionLocal()
try:
# Check if user exists
user = db.query(User).filter(User.user_id == USER_ID).first()
if not user:
# Create default user
user = User(
id=uuid4(),
user_id=USER_ID,
name="Default User",
created_at=datetime.datetime.now(datetime.UTC)
)
db.add(user)
db.commit()
finally:
db.close()
def create_default_app():
db = SessionLocal()
try:
user = db.query(User).filter(User.user_id == USER_ID).first()
if not user:
return
# Check if app already exists
existing_app = db.query(App).filter(
App.name == DEFAULT_APP_ID,
App.owner_id == user.id
).first()
if existing_app:
return
app = App(
id=uuid4(),
name=DEFAULT_APP_ID,
owner_id=user.id,
created_at=datetime.datetime.now(datetime.UTC),
updated_at=datetime.datetime.now(datetime.UTC),
)
db.add(app)
db.commit()
finally:
db.close()
# Create default user on startup
create_default_user()
create_default_app()
# Setup MCP server
setup_mcp_server(app)
# Include routers
app.include_router(memories_router)
app.include_router(apps_router)
app.include_router(stats_router)
app.include_router(config_router)
app.include_router(backup_router)
# Add pagination support
add_pagination(app)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/schemas.py | openmemory/api/app/schemas.py | from datetime import datetime
from typing import List, Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, validator
class MemoryBase(BaseModel):
content: str
metadata_: Optional[dict] = Field(default_factory=dict)
class MemoryCreate(MemoryBase):
user_id: UUID
app_id: UUID
class Category(BaseModel):
name: str
class App(BaseModel):
id: UUID
name: str
class Memory(MemoryBase):
id: UUID
user_id: UUID
app_id: UUID
created_at: datetime
updated_at: Optional[datetime] = None
state: str
categories: Optional[List[Category]] = None
app: App
model_config = ConfigDict(from_attributes=True)
class MemoryUpdate(BaseModel):
content: Optional[str] = None
metadata_: Optional[dict] = None
state: Optional[str] = None
class MemoryResponse(BaseModel):
id: UUID
content: str
created_at: int
state: str
app_id: UUID
app_name: str
categories: List[str]
metadata_: Optional[dict] = None
@validator('created_at', pre=True)
def convert_to_epoch(cls, v):
if isinstance(v, datetime):
return int(v.timestamp())
return v
class PaginatedMemoryResponse(BaseModel):
items: List[MemoryResponse]
total: int
page: int
size: int
pages: int
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/models.py | openmemory/api/app/models.py | import datetime
import enum
import uuid
import sqlalchemy as sa
from app.database import Base
from app.utils.categorization import get_categories_for_memory
from sqlalchemy import (
JSON,
UUID,
Boolean,
Column,
DateTime,
Enum,
ForeignKey,
Index,
Integer,
String,
Table,
event,
)
from sqlalchemy.orm import Session, relationship
def get_current_utc_time():
"""Get current UTC time"""
return datetime.datetime.now(datetime.UTC)
class MemoryState(enum.Enum):
active = "active"
paused = "paused"
archived = "archived"
deleted = "deleted"
class User(Base):
__tablename__ = "users"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
user_id = Column(String, nullable=False, unique=True, index=True)
name = Column(String, nullable=True, index=True)
email = Column(String, unique=True, nullable=True, index=True)
metadata_ = Column('metadata', JSON, default=dict)
created_at = Column(DateTime, default=get_current_utc_time, index=True)
updated_at = Column(DateTime,
default=get_current_utc_time,
onupdate=get_current_utc_time)
apps = relationship("App", back_populates="owner")
memories = relationship("Memory", back_populates="user")
class App(Base):
__tablename__ = "apps"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
owner_id = Column(UUID, ForeignKey("users.id"), nullable=False, index=True)
name = Column(String, nullable=False, index=True)
description = Column(String)
metadata_ = Column('metadata', JSON, default=dict)
is_active = Column(Boolean, default=True, index=True)
created_at = Column(DateTime, default=get_current_utc_time, index=True)
updated_at = Column(DateTime,
default=get_current_utc_time,
onupdate=get_current_utc_time)
owner = relationship("User", back_populates="apps")
memories = relationship("Memory", back_populates="app")
__table_args__ = (
sa.UniqueConstraint('owner_id', 'name', name='idx_app_owner_name'),
)
class Config(Base):
__tablename__ = "configs"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
key = Column(String, unique=True, nullable=False, index=True)
value = Column(JSON, nullable=False)
created_at = Column(DateTime, default=get_current_utc_time)
updated_at = Column(DateTime,
default=get_current_utc_time,
onupdate=get_current_utc_time)
class Memory(Base):
__tablename__ = "memories"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
user_id = Column(UUID, ForeignKey("users.id"), nullable=False, index=True)
app_id = Column(UUID, ForeignKey("apps.id"), nullable=False, index=True)
content = Column(String, nullable=False)
vector = Column(String)
metadata_ = Column('metadata', JSON, default=dict)
state = Column(Enum(MemoryState), default=MemoryState.active, index=True)
created_at = Column(DateTime, default=get_current_utc_time, index=True)
updated_at = Column(DateTime,
default=get_current_utc_time,
onupdate=get_current_utc_time)
archived_at = Column(DateTime, nullable=True, index=True)
deleted_at = Column(DateTime, nullable=True, index=True)
user = relationship("User", back_populates="memories")
app = relationship("App", back_populates="memories")
categories = relationship("Category", secondary="memory_categories", back_populates="memories")
__table_args__ = (
Index('idx_memory_user_state', 'user_id', 'state'),
Index('idx_memory_app_state', 'app_id', 'state'),
Index('idx_memory_user_app', 'user_id', 'app_id'),
)
class Category(Base):
__tablename__ = "categories"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
name = Column(String, unique=True, nullable=False, index=True)
description = Column(String)
created_at = Column(DateTime, default=datetime.datetime.now(datetime.UTC), index=True)
updated_at = Column(DateTime,
default=get_current_utc_time,
onupdate=get_current_utc_time)
memories = relationship("Memory", secondary="memory_categories", back_populates="categories")
memory_categories = Table(
"memory_categories", Base.metadata,
Column("memory_id", UUID, ForeignKey("memories.id"), primary_key=True, index=True),
Column("category_id", UUID, ForeignKey("categories.id"), primary_key=True, index=True),
Index('idx_memory_category', 'memory_id', 'category_id')
)
class AccessControl(Base):
__tablename__ = "access_controls"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
subject_type = Column(String, nullable=False, index=True)
subject_id = Column(UUID, nullable=True, index=True)
object_type = Column(String, nullable=False, index=True)
object_id = Column(UUID, nullable=True, index=True)
effect = Column(String, nullable=False, index=True)
created_at = Column(DateTime, default=get_current_utc_time, index=True)
__table_args__ = (
Index('idx_access_subject', 'subject_type', 'subject_id'),
Index('idx_access_object', 'object_type', 'object_id'),
)
class ArchivePolicy(Base):
__tablename__ = "archive_policies"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
criteria_type = Column(String, nullable=False, index=True)
criteria_id = Column(UUID, nullable=True, index=True)
days_to_archive = Column(Integer, nullable=False)
created_at = Column(DateTime, default=get_current_utc_time, index=True)
__table_args__ = (
Index('idx_policy_criteria', 'criteria_type', 'criteria_id'),
)
class MemoryStatusHistory(Base):
__tablename__ = "memory_status_history"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
memory_id = Column(UUID, ForeignKey("memories.id"), nullable=False, index=True)
changed_by = Column(UUID, ForeignKey("users.id"), nullable=False, index=True)
old_state = Column(Enum(MemoryState), nullable=False, index=True)
new_state = Column(Enum(MemoryState), nullable=False, index=True)
changed_at = Column(DateTime, default=get_current_utc_time, index=True)
__table_args__ = (
Index('idx_history_memory_state', 'memory_id', 'new_state'),
Index('idx_history_user_time', 'changed_by', 'changed_at'),
)
class MemoryAccessLog(Base):
__tablename__ = "memory_access_logs"
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
memory_id = Column(UUID, ForeignKey("memories.id"), nullable=False, index=True)
app_id = Column(UUID, ForeignKey("apps.id"), nullable=False, index=True)
accessed_at = Column(DateTime, default=get_current_utc_time, index=True)
access_type = Column(String, nullable=False, index=True)
metadata_ = Column('metadata', JSON, default=dict)
__table_args__ = (
Index('idx_access_memory_time', 'memory_id', 'accessed_at'),
Index('idx_access_app_time', 'app_id', 'accessed_at'),
)
def categorize_memory(memory: Memory, db: Session) -> None:
"""Categorize a memory using OpenAI and store the categories in the database."""
try:
# Get categories from OpenAI
categories = get_categories_for_memory(memory.content)
# Get or create categories in the database
for category_name in categories:
category = db.query(Category).filter(Category.name == category_name).first()
if not category:
category = Category(
name=category_name,
description=f"Automatically created category for {category_name}"
)
db.add(category)
db.flush() # Flush to get the category ID
# Check if the memory-category association already exists
existing = db.execute(
memory_categories.select().where(
(memory_categories.c.memory_id == memory.id) &
(memory_categories.c.category_id == category.id)
)
).first()
if not existing:
# Create the association
db.execute(
memory_categories.insert().values(
memory_id=memory.id,
category_id=category.id
)
)
db.commit()
except Exception as e:
db.rollback()
print(f"Error categorizing memory: {e}")
@event.listens_for(Memory, 'after_insert')
def after_memory_insert(mapper, connection, target):
"""Trigger categorization after a memory is inserted."""
db = Session(bind=connection)
categorize_memory(target, db)
db.close()
@event.listens_for(Memory, 'after_update')
def after_memory_update(mapper, connection, target):
"""Trigger categorization after a memory is updated."""
db = Session(bind=connection)
categorize_memory(target, db)
db.close()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/database.py | openmemory/api/app/database.py | import os
from dotenv import load_dotenv
from sqlalchemy import create_engine
from sqlalchemy.orm import declarative_base, sessionmaker
# load .env file (make sure you have DATABASE_URL set)
load_dotenv()
DATABASE_URL = os.getenv("DATABASE_URL", "sqlite:///./openmemory.db")
if not DATABASE_URL:
raise RuntimeError("DATABASE_URL is not set in environment")
# SQLAlchemy engine & session
engine = create_engine(
DATABASE_URL,
connect_args={"check_same_thread": False} # Needed for SQLite
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# Base class for models
Base = declarative_base()
# Dependency for FastAPI
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/config.py | openmemory/api/app/config.py | import os
USER_ID = os.getenv("USER", "default_user")
DEFAULT_APP_ID = "openmemory" | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/__init__.py | openmemory/api/app/__init__.py | # This file makes the app directory a Python package | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/mcp_server.py | openmemory/api/app/mcp_server.py | """
MCP Server for OpenMemory with resilient memory client handling.
This module implements an MCP (Model Context Protocol) server that provides
memory operations for OpenMemory. The memory client is initialized lazily
to prevent server crashes when external dependencies (like Ollama) are
unavailable. If the memory client cannot be initialized, the server will
continue running with limited functionality and appropriate error messages.
Key features:
- Lazy memory client initialization
- Graceful error handling for unavailable dependencies
- Fallback to database-only mode when vector store is unavailable
- Proper logging for debugging connection issues
- Environment variable parsing for API keys
"""
import contextvars
import datetime
import json
import logging
import uuid
from app.database import SessionLocal
from app.models import Memory, MemoryAccessLog, MemoryState, MemoryStatusHistory
from app.utils.db import get_user_and_app
from app.utils.memory import get_memory_client
from app.utils.permissions import check_memory_access_permissions
from dotenv import load_dotenv
from fastapi import FastAPI, Request
from fastapi.routing import APIRouter
from mcp.server.fastmcp import FastMCP
from mcp.server.sse import SseServerTransport
# Load environment variables
load_dotenv()
# Initialize MCP
mcp = FastMCP("mem0-mcp-server")
# Don't initialize memory client at import time - do it lazily when needed
def get_memory_client_safe():
"""Get memory client with error handling. Returns None if client cannot be initialized."""
try:
return get_memory_client()
except Exception as e:
logging.warning(f"Failed to get memory client: {e}")
return None
# Context variables for user_id and client_name
user_id_var: contextvars.ContextVar[str] = contextvars.ContextVar("user_id")
client_name_var: contextvars.ContextVar[str] = contextvars.ContextVar("client_name")
# Create a router for MCP endpoints
mcp_router = APIRouter(prefix="/mcp")
# Initialize SSE transport
sse = SseServerTransport("/mcp/messages/")
@mcp.tool(description="Add a new memory. This method is called everytime the user informs anything about themselves, their preferences, or anything that has any relevant information which can be useful in the future conversation. This can also be called when the user asks you to remember something.")
async def add_memories(text: str) -> str:
uid = user_id_var.get(None)
client_name = client_name_var.get(None)
if not uid:
return "Error: user_id not provided"
if not client_name:
return "Error: client_name not provided"
# Get memory client safely
memory_client = get_memory_client_safe()
if not memory_client:
return "Error: Memory system is currently unavailable. Please try again later."
try:
db = SessionLocal()
try:
# Get or create user and app
user, app = get_user_and_app(db, user_id=uid, app_id=client_name)
# Check if app is active
if not app.is_active:
return f"Error: App {app.name} is currently paused on OpenMemory. Cannot create new memories."
response = memory_client.add(text,
user_id=uid,
metadata={
"source_app": "openmemory",
"mcp_client": client_name,
})
# Process the response and update database
if isinstance(response, dict) and 'results' in response:
for result in response['results']:
memory_id = uuid.UUID(result['id'])
memory = db.query(Memory).filter(Memory.id == memory_id).first()
if result['event'] == 'ADD':
if not memory:
memory = Memory(
id=memory_id,
user_id=user.id,
app_id=app.id,
content=result['memory'],
state=MemoryState.active
)
db.add(memory)
else:
memory.state = MemoryState.active
memory.content = result['memory']
# Create history entry
history = MemoryStatusHistory(
memory_id=memory_id,
changed_by=user.id,
old_state=MemoryState.deleted if memory else None,
new_state=MemoryState.active
)
db.add(history)
elif result['event'] == 'DELETE':
if memory:
memory.state = MemoryState.deleted
memory.deleted_at = datetime.datetime.now(datetime.UTC)
# Create history entry
history = MemoryStatusHistory(
memory_id=memory_id,
changed_by=user.id,
old_state=MemoryState.active,
new_state=MemoryState.deleted
)
db.add(history)
db.commit()
return json.dumps(response)
finally:
db.close()
except Exception as e:
logging.exception(f"Error adding to memory: {e}")
return f"Error adding to memory: {e}"
@mcp.tool(description="Search through stored memories. This method is called EVERYTIME the user asks anything.")
async def search_memory(query: str) -> str:
uid = user_id_var.get(None)
client_name = client_name_var.get(None)
if not uid:
return "Error: user_id not provided"
if not client_name:
return "Error: client_name not provided"
# Get memory client safely
memory_client = get_memory_client_safe()
if not memory_client:
return "Error: Memory system is currently unavailable. Please try again later."
try:
db = SessionLocal()
try:
# Get or create user and app
user, app = get_user_and_app(db, user_id=uid, app_id=client_name)
# Get accessible memory IDs based on ACL
user_memories = db.query(Memory).filter(Memory.user_id == user.id).all()
accessible_memory_ids = [memory.id for memory in user_memories if check_memory_access_permissions(db, memory, app.id)]
filters = {
"user_id": uid
}
embeddings = memory_client.embedding_model.embed(query, "search")
hits = memory_client.vector_store.search(
query=query,
vectors=embeddings,
limit=10,
filters=filters,
)
allowed = set(str(mid) for mid in accessible_memory_ids) if accessible_memory_ids else None
results = []
for h in hits:
# All vector db search functions return OutputData class
id, score, payload = h.id, h.score, h.payload
if allowed and h.id is None or h.id not in allowed:
continue
results.append({
"id": id,
"memory": payload.get("data"),
"hash": payload.get("hash"),
"created_at": payload.get("created_at"),
"updated_at": payload.get("updated_at"),
"score": score,
})
for r in results:
if r.get("id"):
access_log = MemoryAccessLog(
memory_id=uuid.UUID(r["id"]),
app_id=app.id,
access_type="search",
metadata_={
"query": query,
"score": r.get("score"),
"hash": r.get("hash"),
},
)
db.add(access_log)
db.commit()
return json.dumps({"results": results}, indent=2)
finally:
db.close()
except Exception as e:
logging.exception(e)
return f"Error searching memory: {e}"
@mcp.tool(description="List all memories in the user's memory")
async def list_memories() -> str:
uid = user_id_var.get(None)
client_name = client_name_var.get(None)
if not uid:
return "Error: user_id not provided"
if not client_name:
return "Error: client_name not provided"
# Get memory client safely
memory_client = get_memory_client_safe()
if not memory_client:
return "Error: Memory system is currently unavailable. Please try again later."
try:
db = SessionLocal()
try:
# Get or create user and app
user, app = get_user_and_app(db, user_id=uid, app_id=client_name)
# Get all memories
memories = memory_client.get_all(user_id=uid)
filtered_memories = []
# Filter memories based on permissions
user_memories = db.query(Memory).filter(Memory.user_id == user.id).all()
accessible_memory_ids = [memory.id for memory in user_memories if check_memory_access_permissions(db, memory, app.id)]
if isinstance(memories, dict) and 'results' in memories:
for memory_data in memories['results']:
if 'id' in memory_data:
memory_id = uuid.UUID(memory_data['id'])
if memory_id in accessible_memory_ids:
# Create access log entry
access_log = MemoryAccessLog(
memory_id=memory_id,
app_id=app.id,
access_type="list",
metadata_={
"hash": memory_data.get('hash')
}
)
db.add(access_log)
filtered_memories.append(memory_data)
db.commit()
else:
for memory in memories:
memory_id = uuid.UUID(memory['id'])
memory_obj = db.query(Memory).filter(Memory.id == memory_id).first()
if memory_obj and check_memory_access_permissions(db, memory_obj, app.id):
# Create access log entry
access_log = MemoryAccessLog(
memory_id=memory_id,
app_id=app.id,
access_type="list",
metadata_={
"hash": memory.get('hash')
}
)
db.add(access_log)
filtered_memories.append(memory)
db.commit()
return json.dumps(filtered_memories, indent=2)
finally:
db.close()
except Exception as e:
logging.exception(f"Error getting memories: {e}")
return f"Error getting memories: {e}"
@mcp.tool(description="Delete specific memories by their IDs")
async def delete_memories(memory_ids: list[str]) -> str:
uid = user_id_var.get(None)
client_name = client_name_var.get(None)
if not uid:
return "Error: user_id not provided"
if not client_name:
return "Error: client_name not provided"
# Get memory client safely
memory_client = get_memory_client_safe()
if not memory_client:
return "Error: Memory system is currently unavailable. Please try again later."
try:
db = SessionLocal()
try:
# Get or create user and app
user, app = get_user_and_app(db, user_id=uid, app_id=client_name)
# Convert string IDs to UUIDs and filter accessible ones
requested_ids = [uuid.UUID(mid) for mid in memory_ids]
user_memories = db.query(Memory).filter(Memory.user_id == user.id).all()
accessible_memory_ids = [memory.id for memory in user_memories if check_memory_access_permissions(db, memory, app.id)]
# Only delete memories that are both requested and accessible
ids_to_delete = [mid for mid in requested_ids if mid in accessible_memory_ids]
if not ids_to_delete:
return "Error: No accessible memories found with provided IDs"
# Delete from vector store
for memory_id in ids_to_delete:
try:
memory_client.delete(str(memory_id))
except Exception as delete_error:
logging.warning(f"Failed to delete memory {memory_id} from vector store: {delete_error}")
# Update each memory's state and create history entries
now = datetime.datetime.now(datetime.UTC)
for memory_id in ids_to_delete:
memory = db.query(Memory).filter(Memory.id == memory_id).first()
if memory:
# Update memory state
memory.state = MemoryState.deleted
memory.deleted_at = now
# Create history entry
history = MemoryStatusHistory(
memory_id=memory_id,
changed_by=user.id,
old_state=MemoryState.active,
new_state=MemoryState.deleted
)
db.add(history)
# Create access log entry
access_log = MemoryAccessLog(
memory_id=memory_id,
app_id=app.id,
access_type="delete",
metadata_={"operation": "delete_by_id"}
)
db.add(access_log)
db.commit()
return f"Successfully deleted {len(ids_to_delete)} memories"
finally:
db.close()
except Exception as e:
logging.exception(f"Error deleting memories: {e}")
return f"Error deleting memories: {e}"
@mcp.tool(description="Delete all memories in the user's memory")
async def delete_all_memories() -> str:
uid = user_id_var.get(None)
client_name = client_name_var.get(None)
if not uid:
return "Error: user_id not provided"
if not client_name:
return "Error: client_name not provided"
# Get memory client safely
memory_client = get_memory_client_safe()
if not memory_client:
return "Error: Memory system is currently unavailable. Please try again later."
try:
db = SessionLocal()
try:
# Get or create user and app
user, app = get_user_and_app(db, user_id=uid, app_id=client_name)
user_memories = db.query(Memory).filter(Memory.user_id == user.id).all()
accessible_memory_ids = [memory.id for memory in user_memories if check_memory_access_permissions(db, memory, app.id)]
# delete the accessible memories only
for memory_id in accessible_memory_ids:
try:
memory_client.delete(str(memory_id))
except Exception as delete_error:
logging.warning(f"Failed to delete memory {memory_id} from vector store: {delete_error}")
# Update each memory's state and create history entries
now = datetime.datetime.now(datetime.UTC)
for memory_id in accessible_memory_ids:
memory = db.query(Memory).filter(Memory.id == memory_id).first()
# Update memory state
memory.state = MemoryState.deleted
memory.deleted_at = now
# Create history entry
history = MemoryStatusHistory(
memory_id=memory_id,
changed_by=user.id,
old_state=MemoryState.active,
new_state=MemoryState.deleted
)
db.add(history)
# Create access log entry
access_log = MemoryAccessLog(
memory_id=memory_id,
app_id=app.id,
access_type="delete_all",
metadata_={"operation": "bulk_delete"}
)
db.add(access_log)
db.commit()
return "Successfully deleted all memories"
finally:
db.close()
except Exception as e:
logging.exception(f"Error deleting memories: {e}")
return f"Error deleting memories: {e}"
@mcp_router.get("/{client_name}/sse/{user_id}")
async def handle_sse(request: Request):
"""Handle SSE connections for a specific user and client"""
# Extract user_id and client_name from path parameters
uid = request.path_params.get("user_id")
user_token = user_id_var.set(uid or "")
client_name = request.path_params.get("client_name")
client_token = client_name_var.set(client_name or "")
try:
# Handle SSE connection
async with sse.connect_sse(
request.scope,
request.receive,
request._send,
) as (read_stream, write_stream):
await mcp._mcp_server.run(
read_stream,
write_stream,
mcp._mcp_server.create_initialization_options(),
)
finally:
# Clean up context variables
user_id_var.reset(user_token)
client_name_var.reset(client_token)
@mcp_router.post("/messages/")
async def handle_get_message(request: Request):
return await handle_post_message(request)
@mcp_router.post("/{client_name}/sse/{user_id}/messages/")
async def handle_post_message(request: Request):
return await handle_post_message(request)
async def handle_post_message(request: Request):
"""Handle POST messages for SSE"""
try:
body = await request.body()
# Create a simple receive function that returns the body
async def receive():
return {"type": "http.request", "body": body, "more_body": False}
# Create a simple send function that does nothing
async def send(message):
return {}
# Call handle_post_message with the correct arguments
await sse.handle_post_message(request.scope, receive, send)
# Return a success response
return {"status": "ok"}
finally:
pass
def setup_mcp_server(app: FastAPI):
"""Setup MCP server with the FastAPI application"""
mcp._mcp_server.name = "mem0-mcp-server"
# Include MCP router in the FastAPI app
app.include_router(mcp_router)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.